file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
types.ts
|
import { Point } from "../types";
import { FONT_FAMILY, THEME, VERTICAL_ALIGN } from "../constants";
export type ChartType = "bar" | "line";
export type FillStyle = "hachure" | "cross-hatch" | "solid";
export type FontFamilyKeys = keyof typeof FONT_FAMILY;
export type FontFamilyValues = typeof FONT_FAMILY[FontFamilyKeys];
export type Theme = typeof THEME[keyof typeof THEME];
export type FontString = string & { _brand: "fontString" };
export type GroupId = string;
export type PointerType = "mouse" | "pen" | "touch";
export type StrokeSharpness = "round" | "sharp";
export type StrokeStyle = "solid" | "dashed" | "dotted";
export type TextAlign = "left" | "center" | "right";
type VerticalAlignKeys = keyof typeof VERTICAL_ALIGN;
export type VerticalAlign = typeof VERTICAL_ALIGN[VerticalAlignKeys];
type _ExcalidrawElementBase = Readonly<{
id: string;
x: number;
y: number;
strokeColor: string;
backgroundColor: string;
fillStyle: FillStyle;
strokeWidth: number;
strokeStyle: StrokeStyle;
strokeSharpness: StrokeSharpness;
roughness: number;
opacity: number;
width: number;
height: number;
angle: number;
/** Random integer used to seed shape generation so that the roughjs shape
doesn't differ across renders. */
seed: number;
/** Integer that is sequentially incremented on each change. Used to reconcile
elements during collaboration or when saving to server. */
version: number;
/** Random integer that is regenerated on each change.
Used for deterministic reconciliation of updates during collaboration,
in case the versions (see above) are identical. */
versionNonce: number;
isDeleted: boolean;
/** List of groups the element belongs to.
Ordered from deepest to shallowest. */
groupIds: readonly GroupId[];
/** other elements that are bound to this element */
boundElements:
| readonly Readonly<{
id: ExcalidrawLinearElement["id"];
type: "arrow" | "text";
}>[]
| null;
/** epoch (ms) timestamp of last element update */
updated: number;
link: string | null;
}>;
export type ExcalidrawSelectionElement = _ExcalidrawElementBase & {
type: "selection";
};
export type ExcalidrawRectangleElement = _ExcalidrawElementBase & {
type: "rectangle";
};
export type ExcalidrawDiamondElement = _ExcalidrawElementBase & {
type: "diamond";
};
export type ExcalidrawEllipseElement = _ExcalidrawElementBase & {
type: "ellipse";
};
export type ExcalidrawImageElement = _ExcalidrawElementBase &
Readonly<{
type: "image";
fileId: FileId | null;
/** whether respective file is persisted */
status: "pending" | "saved" | "error";
/** X and Y scale factors <-1, 1>, used for image axis flipping */
scale: [number, number];
}>;
export type InitializedExcalidrawImageElement = MarkNonNullable<
ExcalidrawImageElement,
"fileId"
>;
/**
* These are elements that don't have any additional properties.
*/
export type ExcalidrawGenericElement =
| ExcalidrawSelectionElement
| ExcalidrawRectangleElement
|
* ExcalidrawElement should be JSON serializable and (eventually) contain
* no computed data. The list of all ExcalidrawElements should be shareable
* between peers and contain no state local to the peer.
*/
export type ExcalidrawElement =
| ExcalidrawGenericElement
| ExcalidrawTextElement
| ExcalidrawLinearElement
| ExcalidrawFreeDrawElement
| ExcalidrawImageElement;
export type NonDeleted<TElement extends ExcalidrawElement> = TElement & {
isDeleted: boolean;
};
export type NonDeletedExcalidrawElement = NonDeleted<ExcalidrawElement>;
export type ExcalidrawTextElement = _ExcalidrawElementBase &
Readonly<{
type: "text";
fontSize: number;
fontFamily: FontFamilyValues;
text: string;
baseline: number;
textAlign: TextAlign;
verticalAlign: VerticalAlign;
containerId: ExcalidrawGenericElement["id"] | null;
originalText: string;
}>;
export type ExcalidrawBindableElement =
| ExcalidrawRectangleElement
| ExcalidrawDiamondElement
| ExcalidrawEllipseElement
| ExcalidrawTextElement
| ExcalidrawImageElement;
export type ExcalidrawTextContainer =
| ExcalidrawRectangleElement
| ExcalidrawDiamondElement
| ExcalidrawEllipseElement
| ExcalidrawImageElement;
export type ExcalidrawTextElementWithContainer = {
containerId: ExcalidrawTextContainer["id"];
} & ExcalidrawTextElement;
export type PointBinding = {
elementId: ExcalidrawBindableElement["id"];
focus: number;
gap: number;
};
export type Arrowhead = "arrow" | "bar" | "dot" | "triangle";
export type ExcalidrawLinearElement = _ExcalidrawElementBase &
Readonly<{
type: "line" | "arrow";
points: readonly Point[];
lastCommittedPoint: Point | null;
startBinding: PointBinding | null;
endBinding: PointBinding | null;
startArrowhead: Arrowhead | null;
endArrowhead: Arrowhead | null;
}>;
export type ExcalidrawFreeDrawElement = _ExcalidrawElementBase &
Readonly<{
type: "freedraw";
points: readonly Point[];
pressures: readonly number[];
simulatePressure: boolean;
lastCommittedPoint: Point | null;
}>;
export type FileId = string & { _brand: "FileId" };
|
| ExcalidrawDiamondElement
| ExcalidrawEllipseElement;
/**
|
travis_test.py
|
"""
The following code is intended to be run only by travis for continuius intengration and testing
purposes. For implementation examples see notebooks in the examples folder.
"""
from PIL import Image, ImageDraw
import torch
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
import numpy as np
import pandas as pd
from time import time
import sys, os
import glob
from models.mtcnn import MTCNN, fixed_image_standardization
from models.inception_resnet_v1 import InceptionResnetV1, get_torch_home
#### CLEAR ALL OUTPUT FILES ####
checkpoints = glob.glob(os.path.join(get_torch_home(), 'checkpoints/*'))
for c in checkpoints:
print('Removing {}'.format(c))
os.remove(c)
crop_files = glob.glob('data/test_images_aligned/**/*.png')
for c in crop_files:
print('Removing {}'.format(c))
os.remove(c)
#### TEST EXAMPLE IPYNB'S ####
os.system('jupyter nbconvert --to script --stdout examples/infer.ipynb examples/finetune.ipynb > examples/tmptest.py')
os.chdir('examples')
try:
import examples.tmptest
except:
import tmptest
os.chdir('..')
#### TEST MTCNN ####
def get_image(path, trans):
img = Image.open(path)
img = trans(img)
return img
trans = transforms.Compose([
transforms.Resize(512)
])
trans_cropped = transforms.Compose([
np.float32,
transforms.ToTensor(),
fixed_image_standardization
])
dataset = datasets.ImageFolder('data/test_images', transform=trans)
dataset.idx_to_class = {k: v for v, k in dataset.class_to_idx.items()}
mtcnn_pt = MTCNN(device=torch.device('cpu'))
names = []
aligned = []
aligned_fromfile = []
for img, idx in dataset:
name = dataset.idx_to_class[idx]
start = time()
img_align = mtcnn_pt(img, save_path='data/test_images_aligned/{}/1.png'.format(name))
print('MTCNN time: {:6f} seconds'.format(time() - start))
# Comparison between types
img_box = mtcnn_pt.detect(img)[0]
assert (img_box - mtcnn_pt.detect(np.array(img))[0]).sum() < 1e-2
assert (img_box - mtcnn_pt.detect(torch.as_tensor(np.array(img)))[0]).sum() < 1e-2
# Batching test
assert (img_box - mtcnn_pt.detect([img, img])[0]).sum() < 1e-2
assert (img_box - mtcnn_pt.detect(np.array([np.array(img), np.array(img)]))[0]).sum() < 1e-2
assert (img_box - mtcnn_pt.detect(torch.as_tensor([np.array(img), np.array(img)]))[0]).sum() < 1e-2
# Box selection
mtcnn_pt.selection_method = 'probability'
print('\nprobability - ', mtcnn_pt.detect(img))
mtcnn_pt.selection_method = 'largest'
print('largest - ', mtcnn_pt.detect(img))
mtcnn_pt.selection_method = 'largest_over_theshold'
print('largest_over_theshold - ', mtcnn_pt.detect(img))
mtcnn_pt.selection_method = 'center_weighted_size'
print('center_weighted_size - ', mtcnn_pt.detect(img))
if img_align is not None:
names.append(name)
aligned.append(img_align)
aligned_fromfile.append(get_image('data/test_images_aligned/{}/1.png'.format(name), trans_cropped))
aligned = torch.stack(aligned)
aligned_fromfile = torch.stack(aligned_fromfile)
#### TEST EMBEDDINGS ####
expected = [
[
[0.000000, 1.482895, 0.886342, 1.438450, 1.437583],
[1.482895, 0.000000, 1.345686, 1.029880, 1.061939],
[0.886342, 1.345686, 0.000000, 1.363125, 1.338803],
[1.438450, 1.029880, 1.363125, 0.000000, 1.066040],
[1.437583, 1.061939, 1.338803, 1.066040, 0.000000]
],
[
[0.000000, 1.430769, 0.992931, 1.414197, 1.329544],
[1.430769, 0.000000, 1.253911, 1.144899, 1.079755],
[0.992931, 1.253911, 0.000000, 1.358875, 1.337322],
[1.414197, 1.144899, 1.358875, 0.000000, 1.204118],
[1.329544, 1.079755, 1.337322, 1.204118, 0.000000]
]
]
for i, ds in enumerate(['vggface2', 'casia-webface']):
resnet_pt = InceptionResnetV1(pretrained=ds).eval()
start = time()
embs = resnet_pt(aligned)
print('\nResnet time: {:6f} seconds\n'.format(time() - start))
embs_fromfile = resnet_pt(aligned_fromfile)
dists = [[(emb - e).norm().item() for e in embs] for emb in embs]
dists_fromfile = [[(emb - e).norm().item() for e in embs_fromfile] for emb in embs_fromfile]
print('\nOutput:')
print(pd.DataFrame(dists, columns=names, index=names))
print('\nOutput (from file):')
print(pd.DataFrame(dists_fromfile, columns=names, index=names))
print('\nExpected:')
print(pd.DataFrame(expected[i], columns=names, index=names))
total_error = (torch.tensor(dists) - torch.tensor(expected[i])).norm()
total_error_fromfile = (torch.tensor(dists_fromfile) - torch.tensor(expected[i])).norm()
print('\nTotal error: {}, {}'.format(total_error, total_error_fromfile))
if sys.platform != 'win32':
|
#### TEST CLASSIFICATION ####
resnet_pt = InceptionResnetV1(pretrained=ds, classify=True).eval()
prob = resnet_pt(aligned)
#### MULTI-FACE TEST ####
mtcnn = MTCNN(keep_all=True)
img = Image.open('data/multiface.jpg')
boxes, probs = mtcnn.detect(img)
draw = ImageDraw.Draw(img)
for i, box in enumerate(boxes):
draw.rectangle(box.tolist())
mtcnn(img, save_path='data/tmp.png')
#### MTCNN TYPES TEST ####
img = Image.open('data/multiface.jpg')
mtcnn = MTCNN(keep_all=True)
boxes_ref, _ = mtcnn.detect(img)
_ = mtcnn(img)
mtcnn = MTCNN(keep_all=True).double()
boxes_test, _ = mtcnn.detect(img)
_ = mtcnn(img)
box_diff = boxes_ref[np.argsort(boxes_ref[:,1])] - boxes_test[np.argsort(boxes_test[:,1])]
total_error = np.sum(np.abs(box_diff))
print('\nfp64 Total box error: {}'.format(total_error))
assert total_error < 1e-2
# half is not supported on CPUs, only GPUs
if torch.cuda.is_available():
mtcnn = MTCNN(keep_all=True, device='cuda').half()
boxes_test, _ = mtcnn.detect(img)
_ = mtcnn(img)
box_diff = boxes_ref[np.argsort(boxes_ref[:,1])] - boxes_test[np.argsort(boxes_test[:,1])]
print('fp16 Total box error: {}'.format(np.sum(np.abs(box_diff))))
# test new automatic multi precision to compare
if hasattr(torch.cuda, 'amp'):
with torch.cuda.amp.autocast():
mtcnn = MTCNN(keep_all=True, device='cuda')
boxes_test, _ = mtcnn.detect(img)
_ = mtcnn(img)
box_diff = boxes_ref[np.argsort(boxes_ref[:,1])] - boxes_test[np.argsort(boxes_test[:,1])]
print('AMP total box error: {}'.format(np.sum(np.abs(box_diff))))
#### MULTI-IMAGE TEST ####
mtcnn = MTCNN(keep_all=True)
img = [
Image.open('data/multiface.jpg'),
Image.open('data/multiface.jpg')
]
batch_boxes, batch_probs = mtcnn.detect(img)
mtcnn(img, save_path=['data/tmp1.png', 'data/tmp1.png'])
tmp_files = glob.glob('data/tmp*')
for f in tmp_files:
os.remove(f)
#### NO-FACE TEST ####
img = Image.new('RGB', (512, 512))
mtcnn(img)
mtcnn(img, return_prob=True)
|
assert total_error < 1e-4
assert total_error_fromfile < 1e-4
|
openapi_generated.go
|
// +build !ignore_autogenerated
// Copyright PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by openapi-gen. DO NOT EDIT.
// This file was autogenerated by openapi-gen. Do not edit it manually!
package v1alpha1
import (
spec "github.com/go-openapi/spec"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
common "k8s.io/kube-openapi/pkg/common"
)
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return map[string]common.OpenAPIDefinition{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BRConfig": schema_pkg_apis_pingcap_v1alpha1_BRConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Backup": schema_pkg_apis_pingcap_v1alpha1_Backup(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupList": schema_pkg_apis_pingcap_v1alpha1_BackupList(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupSchedule": schema_pkg_apis_pingcap_v1alpha1_BackupSchedule(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupScheduleList": schema_pkg_apis_pingcap_v1alpha1_BackupScheduleList(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupScheduleSpec": schema_pkg_apis_pingcap_v1alpha1_BackupScheduleSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupSpec": schema_pkg_apis_pingcap_v1alpha1_BackupSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BasicAutoScalerSpec": schema_pkg_apis_pingcap_v1alpha1_BasicAutoScalerSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Binlog": schema_pkg_apis_pingcap_v1alpha1_Binlog(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ComponentSpec": schema_pkg_apis_pingcap_v1alpha1_ComponentSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.FileLogConfig": schema_pkg_apis_pingcap_v1alpha1_FileLogConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GcsStorageProvider": schema_pkg_apis_pingcap_v1alpha1_GcsStorageProvider(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.HelperSpec": schema_pkg_apis_pingcap_v1alpha1_HelperSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Log": schema_pkg_apis_pingcap_v1alpha1_Log(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.MonitorContainer": schema_pkg_apis_pingcap_v1alpha1_MonitorContainer(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.OpenTracing": schema_pkg_apis_pingcap_v1alpha1_OpenTracing(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.OpenTracingReporter": schema_pkg_apis_pingcap_v1alpha1_OpenTracingReporter(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.OpenTracingSampler": schema_pkg_apis_pingcap_v1alpha1_OpenTracingSampler(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDConfig": schema_pkg_apis_pingcap_v1alpha1_PDConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDLogConfig": schema_pkg_apis_pingcap_v1alpha1_PDLogConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDMetricConfig": schema_pkg_apis_pingcap_v1alpha1_PDMetricConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDNamespaceConfig": schema_pkg_apis_pingcap_v1alpha1_PDNamespaceConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDReplicationConfig": schema_pkg_apis_pingcap_v1alpha1_PDReplicationConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDScheduleConfig": schema_pkg_apis_pingcap_v1alpha1_PDScheduleConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSchedulerConfig": schema_pkg_apis_pingcap_v1alpha1_PDSchedulerConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSecurityConfig": schema_pkg_apis_pingcap_v1alpha1_PDSecurityConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDServerConfig": schema_pkg_apis_pingcap_v1alpha1_PDServerConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSpec": schema_pkg_apis_pingcap_v1alpha1_PDSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDStoreLabel": schema_pkg_apis_pingcap_v1alpha1_PDStoreLabel(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Performance": schema_pkg_apis_pingcap_v1alpha1_Performance(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PessimisticTxn": schema_pkg_apis_pingcap_v1alpha1_PessimisticTxn(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PlanCache": schema_pkg_apis_pingcap_v1alpha1_PlanCache(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Plugin": schema_pkg_apis_pingcap_v1alpha1_Plugin(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PreparedPlanCache": schema_pkg_apis_pingcap_v1alpha1_PreparedPlanCache(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ProxyProtocol": schema_pkg_apis_pingcap_v1alpha1_ProxyProtocol(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PumpSpec": schema_pkg_apis_pingcap_v1alpha1_PumpSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Restore": schema_pkg_apis_pingcap_v1alpha1_Restore(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.RestoreList": schema_pkg_apis_pingcap_v1alpha1_RestoreList(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.RestoreSpec": schema_pkg_apis_pingcap_v1alpha1_RestoreSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider": schema_pkg_apis_pingcap_v1alpha1_S3StorageProvider(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Security": schema_pkg_apis_pingcap_v1alpha1_Security(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec": schema_pkg_apis_pingcap_v1alpha1_ServiceSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Status": schema_pkg_apis_pingcap_v1alpha1_Status(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StmtSummary": schema_pkg_apis_pingcap_v1alpha1_StmtSummary(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StorageProvider": schema_pkg_apis_pingcap_v1alpha1_StorageProvider(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBAccessConfig": schema_pkg_apis_pingcap_v1alpha1_TiDBAccessConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBConfig": schema_pkg_apis_pingcap_v1alpha1_TiDBConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBServiceSpec": schema_pkg_apis_pingcap_v1alpha1_TiDBServiceSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSlowLogTailerSpec": schema_pkg_apis_pingcap_v1alpha1_TiDBSlowLogTailerSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSpec": schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVBlockCacheConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVBlockCacheConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCfConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVCfConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVClient": schema_pkg_apis_pingcap_v1alpha1_TiKVClient(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCoprocessorConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVCoprocessorConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCoprocessorReadPoolConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVCoprocessorReadPoolConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVDbConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVDbConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVGCConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVGCConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVImportConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVImportConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVPDConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVPDConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVRaftDBConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVRaftDBConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVRaftstoreConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVRaftstoreConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVReadPoolConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVReadPoolConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSecurityConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVSecurityConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVServerConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVServerConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSpec": schema_pkg_apis_pingcap_v1alpha1_TiKVSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVStorageConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVStorageConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVStorageReadPoolConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVStorageReadPoolConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVTitanCfConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVTitanCfConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVTitanDBConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVTitanDBConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbAutoScalerSpec": schema_pkg_apis_pingcap_v1alpha1_TidbAutoScalerSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbCluster": schema_pkg_apis_pingcap_v1alpha1_TidbCluster(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterAutoScaler": schema_pkg_apis_pingcap_v1alpha1_TidbClusterAutoScaler(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterAutoScalerList": schema_pkg_apis_pingcap_v1alpha1_TidbClusterAutoScalerList(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterAutoScalerSpec": schema_pkg_apis_pingcap_v1alpha1_TidbClusterAutoScalerSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterList": schema_pkg_apis_pingcap_v1alpha1_TidbClusterList(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterRef": schema_pkg_apis_pingcap_v1alpha1_TidbClusterRef(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterSpec": schema_pkg_apis_pingcap_v1alpha1_TidbClusterSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbInitializer": schema_pkg_apis_pingcap_v1alpha1_TidbInitializer(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbInitializerList": schema_pkg_apis_pingcap_v1alpha1_TidbInitializerList(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbInitializerSpec": schema_pkg_apis_pingcap_v1alpha1_TidbInitializerSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbInitializerStatus": schema_pkg_apis_pingcap_v1alpha1_TidbInitializerStatus(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitor": schema_pkg_apis_pingcap_v1alpha1_TidbMonitor(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitorList": schema_pkg_apis_pingcap_v1alpha1_TidbMonitorList(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitorRef": schema_pkg_apis_pingcap_v1alpha1_TidbMonitorRef(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitorSpec": schema_pkg_apis_pingcap_v1alpha1_TidbMonitorSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TikvAutoScalerSpec": schema_pkg_apis_pingcap_v1alpha1_TikvAutoScalerSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TxnLocalLatches": schema_pkg_apis_pingcap_v1alpha1_TxnLocalLatches(ref),
"k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource": schema_k8sio_api_core_v1_AWSElasticBlockStoreVolumeSource(ref),
"k8s.io/api/core/v1.Affinity": schema_k8sio_api_core_v1_Affinity(ref),
"k8s.io/api/core/v1.AttachedVolume": schema_k8sio_api_core_v1_AttachedVolume(ref),
"k8s.io/api/core/v1.AvoidPods": schema_k8sio_api_core_v1_AvoidPods(ref),
"k8s.io/api/core/v1.AzureDiskVolumeSource": schema_k8sio_api_core_v1_AzureDiskVolumeSource(ref),
"k8s.io/api/core/v1.AzureFilePersistentVolumeSource": schema_k8sio_api_core_v1_AzureFilePersistentVolumeSource(ref),
"k8s.io/api/core/v1.AzureFileVolumeSource": schema_k8sio_api_core_v1_AzureFileVolumeSource(ref),
"k8s.io/api/core/v1.Binding": schema_k8sio_api_core_v1_Binding(ref),
"k8s.io/api/core/v1.CSIPersistentVolumeSource": schema_k8sio_api_core_v1_CSIPersistentVolumeSource(ref),
"k8s.io/api/core/v1.CSIVolumeSource": schema_k8sio_api_core_v1_CSIVolumeSource(ref),
"k8s.io/api/core/v1.Capabilities": schema_k8sio_api_core_v1_Capabilities(ref),
"k8s.io/api/core/v1.CephFSPersistentVolumeSource": schema_k8sio_api_core_v1_CephFSPersistentVolumeSource(ref),
"k8s.io/api/core/v1.CephFSVolumeSource": schema_k8sio_api_core_v1_CephFSVolumeSource(ref),
"k8s.io/api/core/v1.CinderPersistentVolumeSource": schema_k8sio_api_core_v1_CinderPersistentVolumeSource(ref),
"k8s.io/api/core/v1.CinderVolumeSource": schema_k8sio_api_core_v1_CinderVolumeSource(ref),
"k8s.io/api/core/v1.ClientIPConfig": schema_k8sio_api_core_v1_ClientIPConfig(ref),
"k8s.io/api/core/v1.ComponentCondition": schema_k8sio_api_core_v1_ComponentCondition(ref),
"k8s.io/api/core/v1.ComponentStatus": schema_k8sio_api_core_v1_ComponentStatus(ref),
"k8s.io/api/core/v1.ComponentStatusList": schema_k8sio_api_core_v1_ComponentStatusList(ref),
"k8s.io/api/core/v1.ConfigMap": schema_k8sio_api_core_v1_ConfigMap(ref),
"k8s.io/api/core/v1.ConfigMapEnvSource": schema_k8sio_api_core_v1_ConfigMapEnvSource(ref),
"k8s.io/api/core/v1.ConfigMapKeySelector": schema_k8sio_api_core_v1_ConfigMapKeySelector(ref),
"k8s.io/api/core/v1.ConfigMapList": schema_k8sio_api_core_v1_ConfigMapList(ref),
"k8s.io/api/core/v1.ConfigMapNodeConfigSource": schema_k8sio_api_core_v1_ConfigMapNodeConfigSource(ref),
"k8s.io/api/core/v1.ConfigMapProjection": schema_k8sio_api_core_v1_ConfigMapProjection(ref),
"k8s.io/api/core/v1.ConfigMapVolumeSource": schema_k8sio_api_core_v1_ConfigMapVolumeSource(ref),
"k8s.io/api/core/v1.Container": schema_k8sio_api_core_v1_Container(ref),
"k8s.io/api/core/v1.ContainerImage": schema_k8sio_api_core_v1_ContainerImage(ref),
"k8s.io/api/core/v1.ContainerPort": schema_k8sio_api_core_v1_ContainerPort(ref),
"k8s.io/api/core/v1.ContainerState": schema_k8sio_api_core_v1_ContainerState(ref),
"k8s.io/api/core/v1.ContainerStateRunning": schema_k8sio_api_core_v1_ContainerStateRunning(ref),
"k8s.io/api/core/v1.ContainerStateTerminated": schema_k8sio_api_core_v1_ContainerStateTerminated(ref),
"k8s.io/api/core/v1.ContainerStateWaiting": schema_k8sio_api_core_v1_ContainerStateWaiting(ref),
"k8s.io/api/core/v1.ContainerStatus": schema_k8sio_api_core_v1_ContainerStatus(ref),
"k8s.io/api/core/v1.DaemonEndpoint": schema_k8sio_api_core_v1_DaemonEndpoint(ref),
"k8s.io/api/core/v1.DownwardAPIProjection": schema_k8sio_api_core_v1_DownwardAPIProjection(ref),
"k8s.io/api/core/v1.DownwardAPIVolumeFile": schema_k8sio_api_core_v1_DownwardAPIVolumeFile(ref),
"k8s.io/api/core/v1.DownwardAPIVolumeSource": schema_k8sio_api_core_v1_DownwardAPIVolumeSource(ref),
"k8s.io/api/core/v1.EmptyDirVolumeSource": schema_k8sio_api_core_v1_EmptyDirVolumeSource(ref),
"k8s.io/api/core/v1.EndpointAddress": schema_k8sio_api_core_v1_EndpointAddress(ref),
"k8s.io/api/core/v1.EndpointPort": schema_k8sio_api_core_v1_EndpointPort(ref),
"k8s.io/api/core/v1.EndpointSubset": schema_k8sio_api_core_v1_EndpointSubset(ref),
"k8s.io/api/core/v1.Endpoints": schema_k8sio_api_core_v1_Endpoints(ref),
"k8s.io/api/core/v1.EndpointsList": schema_k8sio_api_core_v1_EndpointsList(ref),
"k8s.io/api/core/v1.EnvFromSource": schema_k8sio_api_core_v1_EnvFromSource(ref),
"k8s.io/api/core/v1.EnvVar": schema_k8sio_api_core_v1_EnvVar(ref),
"k8s.io/api/core/v1.EnvVarSource": schema_k8sio_api_core_v1_EnvVarSource(ref),
"k8s.io/api/core/v1.EphemeralContainer": schema_k8sio_api_core_v1_EphemeralContainer(ref),
"k8s.io/api/core/v1.EphemeralContainerCommon": schema_k8sio_api_core_v1_EphemeralContainerCommon(ref),
"k8s.io/api/core/v1.EphemeralContainers": schema_k8sio_api_core_v1_EphemeralContainers(ref),
"k8s.io/api/core/v1.Event": schema_k8sio_api_core_v1_Event(ref),
"k8s.io/api/core/v1.EventList": schema_k8sio_api_core_v1_EventList(ref),
"k8s.io/api/core/v1.EventSeries": schema_k8sio_api_core_v1_EventSeries(ref),
"k8s.io/api/core/v1.EventSource": schema_k8sio_api_core_v1_EventSource(ref),
"k8s.io/api/core/v1.ExecAction": schema_k8sio_api_core_v1_ExecAction(ref),
"k8s.io/api/core/v1.FCVolumeSource": schema_k8sio_api_core_v1_FCVolumeSource(ref),
"k8s.io/api/core/v1.FlexPersistentVolumeSource": schema_k8sio_api_core_v1_FlexPersistentVolumeSource(ref),
"k8s.io/api/core/v1.FlexVolumeSource": schema_k8sio_api_core_v1_FlexVolumeSource(ref),
"k8s.io/api/core/v1.FlockerVolumeSource": schema_k8sio_api_core_v1_FlockerVolumeSource(ref),
"k8s.io/api/core/v1.GCEPersistentDiskVolumeSource": schema_k8sio_api_core_v1_GCEPersistentDiskVolumeSource(ref),
"k8s.io/api/core/v1.GitRepoVolumeSource": schema_k8sio_api_core_v1_GitRepoVolumeSource(ref),
"k8s.io/api/core/v1.GlusterfsPersistentVolumeSource": schema_k8sio_api_core_v1_GlusterfsPersistentVolumeSource(ref),
"k8s.io/api/core/v1.GlusterfsVolumeSource": schema_k8sio_api_core_v1_GlusterfsVolumeSource(ref),
"k8s.io/api/core/v1.HTTPGetAction": schema_k8sio_api_core_v1_HTTPGetAction(ref),
"k8s.io/api/core/v1.HTTPHeader": schema_k8sio_api_core_v1_HTTPHeader(ref),
"k8s.io/api/core/v1.Handler": schema_k8sio_api_core_v1_Handler(ref),
"k8s.io/api/core/v1.HostAlias": schema_k8sio_api_core_v1_HostAlias(ref),
"k8s.io/api/core/v1.HostPathVolumeSource": schema_k8sio_api_core_v1_HostPathVolumeSource(ref),
"k8s.io/api/core/v1.ISCSIPersistentVolumeSource": schema_k8sio_api_core_v1_ISCSIPersistentVolumeSource(ref),
"k8s.io/api/core/v1.ISCSIVolumeSource": schema_k8sio_api_core_v1_ISCSIVolumeSource(ref),
"k8s.io/api/core/v1.KeyToPath": schema_k8sio_api_core_v1_KeyToPath(ref),
"k8s.io/api/core/v1.Lifecycle": schema_k8sio_api_core_v1_Lifecycle(ref),
"k8s.io/api/core/v1.LimitRange": schema_k8sio_api_core_v1_LimitRange(ref),
"k8s.io/api/core/v1.LimitRangeItem": schema_k8sio_api_core_v1_LimitRangeItem(ref),
"k8s.io/api/core/v1.LimitRangeList": schema_k8sio_api_core_v1_LimitRangeList(ref),
"k8s.io/api/core/v1.LimitRangeSpec": schema_k8sio_api_core_v1_LimitRangeSpec(ref),
"k8s.io/api/core/v1.List": schema_k8sio_api_core_v1_List(ref),
"k8s.io/api/core/v1.LoadBalancerIngress": schema_k8sio_api_core_v1_LoadBalancerIngress(ref),
"k8s.io/api/core/v1.LoadBalancerStatus": schema_k8sio_api_core_v1_LoadBalancerStatus(ref),
"k8s.io/api/core/v1.LocalObjectReference": schema_k8sio_api_core_v1_LocalObjectReference(ref),
"k8s.io/api/core/v1.LocalVolumeSource": schema_k8sio_api_core_v1_LocalVolumeSource(ref),
"k8s.io/api/core/v1.NFSVolumeSource": schema_k8sio_api_core_v1_NFSVolumeSource(ref),
"k8s.io/api/core/v1.Namespace": schema_k8sio_api_core_v1_Namespace(ref),
"k8s.io/api/core/v1.NamespaceCondition": schema_k8sio_api_core_v1_NamespaceCondition(ref),
"k8s.io/api/core/v1.NamespaceList": schema_k8sio_api_core_v1_NamespaceList(ref),
"k8s.io/api/core/v1.NamespaceSpec": schema_k8sio_api_core_v1_NamespaceSpec(ref),
"k8s.io/api/core/v1.NamespaceStatus": schema_k8sio_api_core_v1_NamespaceStatus(ref),
"k8s.io/api/core/v1.Node": schema_k8sio_api_core_v1_Node(ref),
"k8s.io/api/core/v1.NodeAddress": schema_k8sio_api_core_v1_NodeAddress(ref),
"k8s.io/api/core/v1.NodeAffinity": schema_k8sio_api_core_v1_NodeAffinity(ref),
"k8s.io/api/core/v1.NodeCondition": schema_k8sio_api_core_v1_NodeCondition(ref),
"k8s.io/api/core/v1.NodeConfigSource": schema_k8sio_api_core_v1_NodeConfigSource(ref),
"k8s.io/api/core/v1.NodeConfigStatus": schema_k8sio_api_core_v1_NodeConfigStatus(ref),
"k8s.io/api/core/v1.NodeDaemonEndpoints": schema_k8sio_api_core_v1_NodeDaemonEndpoints(ref),
"k8s.io/api/core/v1.NodeList": schema_k8sio_api_core_v1_NodeList(ref),
"k8s.io/api/core/v1.NodeProxyOptions": schema_k8sio_api_core_v1_NodeProxyOptions(ref),
"k8s.io/api/core/v1.NodeResources": schema_k8sio_api_core_v1_NodeResources(ref),
"k8s.io/api/core/v1.NodeSelector": schema_k8sio_api_core_v1_NodeSelector(ref),
"k8s.io/api/core/v1.NodeSelectorRequirement": schema_k8sio_api_core_v1_NodeSelectorRequirement(ref),
"k8s.io/api/core/v1.NodeSelectorTerm": schema_k8sio_api_core_v1_NodeSelectorTerm(ref),
"k8s.io/api/core/v1.NodeSpec": schema_k8sio_api_core_v1_NodeSpec(ref),
"k8s.io/api/core/v1.NodeStatus": schema_k8sio_api_core_v1_NodeStatus(ref),
"k8s.io/api/core/v1.NodeSystemInfo": schema_k8sio_api_core_v1_NodeSystemInfo(ref),
"k8s.io/api/core/v1.ObjectFieldSelector": schema_k8sio_api_core_v1_ObjectFieldSelector(ref),
"k8s.io/api/core/v1.ObjectReference": schema_k8sio_api_core_v1_ObjectReference(ref),
"k8s.io/api/core/v1.PersistentVolume": schema_k8sio_api_core_v1_PersistentVolume(ref),
"k8s.io/api/core/v1.PersistentVolumeClaim": schema_k8sio_api_core_v1_PersistentVolumeClaim(ref),
"k8s.io/api/core/v1.PersistentVolumeClaimCondition": schema_k8sio_api_core_v1_PersistentVolumeClaimCondition(ref),
"k8s.io/api/core/v1.PersistentVolumeClaimList": schema_k8sio_api_core_v1_PersistentVolumeClaimList(ref),
"k8s.io/api/core/v1.PersistentVolumeClaimSpec": schema_k8sio_api_core_v1_PersistentVolumeClaimSpec(ref),
"k8s.io/api/core/v1.PersistentVolumeClaimStatus": schema_k8sio_api_core_v1_PersistentVolumeClaimStatus(ref),
"k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource": schema_k8sio_api_core_v1_PersistentVolumeClaimVolumeSource(ref),
"k8s.io/api/core/v1.PersistentVolumeList": schema_k8sio_api_core_v1_PersistentVolumeList(ref),
"k8s.io/api/core/v1.PersistentVolumeSource": schema_k8sio_api_core_v1_PersistentVolumeSource(ref),
"k8s.io/api/core/v1.PersistentVolumeSpec": schema_k8sio_api_core_v1_PersistentVolumeSpec(ref),
"k8s.io/api/core/v1.PersistentVolumeStatus": schema_k8sio_api_core_v1_PersistentVolumeStatus(ref),
"k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource": schema_k8sio_api_core_v1_PhotonPersistentDiskVolumeSource(ref),
"k8s.io/api/core/v1.Pod": schema_k8sio_api_core_v1_Pod(ref),
"k8s.io/api/core/v1.PodAffinity": schema_k8sio_api_core_v1_PodAffinity(ref),
"k8s.io/api/core/v1.PodAffinityTerm": schema_k8sio_api_core_v1_PodAffinityTerm(ref),
"k8s.io/api/core/v1.PodAntiAffinity": schema_k8sio_api_core_v1_PodAntiAffinity(ref),
"k8s.io/api/core/v1.PodAttachOptions": schema_k8sio_api_core_v1_PodAttachOptions(ref),
"k8s.io/api/core/v1.PodCondition": schema_k8sio_api_core_v1_PodCondition(ref),
"k8s.io/api/core/v1.PodDNSConfig": schema_k8sio_api_core_v1_PodDNSConfig(ref),
"k8s.io/api/core/v1.PodDNSConfigOption": schema_k8sio_api_core_v1_PodDNSConfigOption(ref),
"k8s.io/api/core/v1.PodExecOptions": schema_k8sio_api_core_v1_PodExecOptions(ref),
"k8s.io/api/core/v1.PodIP": schema_k8sio_api_core_v1_PodIP(ref),
"k8s.io/api/core/v1.PodList": schema_k8sio_api_core_v1_PodList(ref),
"k8s.io/api/core/v1.PodLogOptions": schema_k8sio_api_core_v1_PodLogOptions(ref),
"k8s.io/api/core/v1.PodPortForwardOptions": schema_k8sio_api_core_v1_PodPortForwardOptions(ref),
"k8s.io/api/core/v1.PodProxyOptions": schema_k8sio_api_core_v1_PodProxyOptions(ref),
"k8s.io/api/core/v1.PodReadinessGate": schema_k8sio_api_core_v1_PodReadinessGate(ref),
"k8s.io/api/core/v1.PodSecurityContext": schema_k8sio_api_core_v1_PodSecurityContext(ref),
"k8s.io/api/core/v1.PodSignature": schema_k8sio_api_core_v1_PodSignature(ref),
"k8s.io/api/core/v1.PodSpec": schema_k8sio_api_core_v1_PodSpec(ref),
"k8s.io/api/core/v1.PodStatus": schema_k8sio_api_core_v1_PodStatus(ref),
"k8s.io/api/core/v1.PodStatusResult": schema_k8sio_api_core_v1_PodStatusResult(ref),
"k8s.io/api/core/v1.PodTemplate": schema_k8sio_api_core_v1_PodTemplate(ref),
"k8s.io/api/core/v1.PodTemplateList": schema_k8sio_api_core_v1_PodTemplateList(ref),
"k8s.io/api/core/v1.PodTemplateSpec": schema_k8sio_api_core_v1_PodTemplateSpec(ref),
"k8s.io/api/core/v1.PortworxVolumeSource": schema_k8sio_api_core_v1_PortworxVolumeSource(ref),
"k8s.io/api/core/v1.PreferAvoidPodsEntry": schema_k8sio_api_core_v1_PreferAvoidPodsEntry(ref),
"k8s.io/api/core/v1.PreferredSchedulingTerm": schema_k8sio_api_core_v1_PreferredSchedulingTerm(ref),
"k8s.io/api/core/v1.Probe": schema_k8sio_api_core_v1_Probe(ref),
"k8s.io/api/core/v1.ProjectedVolumeSource": schema_k8sio_api_core_v1_ProjectedVolumeSource(ref),
"k8s.io/api/core/v1.QuobyteVolumeSource": schema_k8sio_api_core_v1_QuobyteVolumeSource(ref),
"k8s.io/api/core/v1.RBDPersistentVolumeSource": schema_k8sio_api_core_v1_RBDPersistentVolumeSource(ref),
"k8s.io/api/core/v1.RBDVolumeSource": schema_k8sio_api_core_v1_RBDVolumeSource(ref),
"k8s.io/api/core/v1.RangeAllocation": schema_k8sio_api_core_v1_RangeAllocation(ref),
"k8s.io/api/core/v1.ReplicationController": schema_k8sio_api_core_v1_ReplicationController(ref),
"k8s.io/api/core/v1.ReplicationControllerCondition": schema_k8sio_api_core_v1_ReplicationControllerCondition(ref),
"k8s.io/api/core/v1.ReplicationControllerList": schema_k8sio_api_core_v1_ReplicationControllerList(ref),
"k8s.io/api/core/v1.ReplicationControllerSpec": schema_k8sio_api_core_v1_ReplicationControllerSpec(ref),
"k8s.io/api/core/v1.ReplicationControllerStatus": schema_k8sio_api_core_v1_ReplicationControllerStatus(ref),
"k8s.io/api/core/v1.ResourceFieldSelector": schema_k8sio_api_core_v1_ResourceFieldSelector(ref),
"k8s.io/api/core/v1.ResourceQuota": schema_k8sio_api_core_v1_ResourceQuota(ref),
"k8s.io/api/core/v1.ResourceQuotaList": schema_k8sio_api_core_v1_ResourceQuotaList(ref),
"k8s.io/api/core/v1.ResourceQuotaSpec": schema_k8sio_api_core_v1_ResourceQuotaSpec(ref),
"k8s.io/api/core/v1.ResourceQuotaStatus": schema_k8sio_api_core_v1_ResourceQuotaStatus(ref),
"k8s.io/api/core/v1.ResourceRequirements": schema_k8sio_api_core_v1_ResourceRequirements(ref),
"k8s.io/api/core/v1.SELinuxOptions": schema_k8sio_api_core_v1_SELinuxOptions(ref),
"k8s.io/api/core/v1.ScaleIOPersistentVolumeSource": schema_k8sio_api_core_v1_ScaleIOPersistentVolumeSource(ref),
"k8s.io/api/core/v1.ScaleIOVolumeSource": schema_k8sio_api_core_v1_ScaleIOVolumeSource(ref),
"k8s.io/api/core/v1.ScopeSelector": schema_k8sio_api_core_v1_ScopeSelector(ref),
"k8s.io/api/core/v1.ScopedResourceSelectorRequirement": schema_k8sio_api_core_v1_ScopedResourceSelectorRequirement(ref),
"k8s.io/api/core/v1.Secret": schema_k8sio_api_core_v1_Secret(ref),
"k8s.io/api/core/v1.SecretEnvSource": schema_k8sio_api_core_v1_SecretEnvSource(ref),
"k8s.io/api/core/v1.SecretKeySelector": schema_k8sio_api_core_v1_SecretKeySelector(ref),
"k8s.io/api/core/v1.SecretList": schema_k8sio_api_core_v1_SecretList(ref),
"k8s.io/api/core/v1.SecretProjection": schema_k8sio_api_core_v1_SecretProjection(ref),
"k8s.io/api/core/v1.SecretReference": schema_k8sio_api_core_v1_SecretReference(ref),
"k8s.io/api/core/v1.SecretVolumeSource": schema_k8sio_api_core_v1_SecretVolumeSource(ref),
"k8s.io/api/core/v1.SecurityContext": schema_k8sio_api_core_v1_SecurityContext(ref),
"k8s.io/api/core/v1.SerializedReference": schema_k8sio_api_core_v1_SerializedReference(ref),
"k8s.io/api/core/v1.Service": schema_k8sio_api_core_v1_Service(ref),
"k8s.io/api/core/v1.ServiceAccount": schema_k8sio_api_core_v1_ServiceAccount(ref),
"k8s.io/api/core/v1.ServiceAccountList": schema_k8sio_api_core_v1_ServiceAccountList(ref),
"k8s.io/api/core/v1.ServiceAccountTokenProjection": schema_k8sio_api_core_v1_ServiceAccountTokenProjection(ref),
"k8s.io/api/core/v1.ServiceList": schema_k8sio_api_core_v1_ServiceList(ref),
"k8s.io/api/core/v1.ServicePort": schema_k8sio_api_core_v1_ServicePort(ref),
"k8s.io/api/core/v1.ServiceProxyOptions": schema_k8sio_api_core_v1_ServiceProxyOptions(ref),
"k8s.io/api/core/v1.ServiceSpec": schema_k8sio_api_core_v1_ServiceSpec(ref),
"k8s.io/api/core/v1.ServiceStatus": schema_k8sio_api_core_v1_ServiceStatus(ref),
"k8s.io/api/core/v1.SessionAffinityConfig": schema_k8sio_api_core_v1_SessionAffinityConfig(ref),
"k8s.io/api/core/v1.StorageOSPersistentVolumeSource": schema_k8sio_api_core_v1_StorageOSPersistentVolumeSource(ref),
"k8s.io/api/core/v1.StorageOSVolumeSource": schema_k8sio_api_core_v1_StorageOSVolumeSource(ref),
"k8s.io/api/core/v1.Sysctl": schema_k8sio_api_core_v1_Sysctl(ref),
"k8s.io/api/core/v1.TCPSocketAction": schema_k8sio_api_core_v1_TCPSocketAction(ref),
"k8s.io/api/core/v1.Taint": schema_k8sio_api_core_v1_Taint(ref),
"k8s.io/api/core/v1.Toleration": schema_k8sio_api_core_v1_Toleration(ref),
"k8s.io/api/core/v1.TopologySelectorLabelRequirement": schema_k8sio_api_core_v1_TopologySelectorLabelRequirement(ref),
"k8s.io/api/core/v1.TopologySelectorTerm": schema_k8sio_api_core_v1_TopologySelectorTerm(ref),
"k8s.io/api/core/v1.TopologySpreadConstraint": schema_k8sio_api_core_v1_TopologySpreadConstraint(ref),
"k8s.io/api/core/v1.TypedLocalObjectReference": schema_k8sio_api_core_v1_TypedLocalObjectReference(ref),
"k8s.io/api/core/v1.Volume": schema_k8sio_api_core_v1_Volume(ref),
"k8s.io/api/core/v1.VolumeDevice": schema_k8sio_api_core_v1_VolumeDevice(ref),
"k8s.io/api/core/v1.VolumeMount": schema_k8sio_api_core_v1_VolumeMount(ref),
"k8s.io/api/core/v1.VolumeNodeAffinity": schema_k8sio_api_core_v1_VolumeNodeAffinity(ref),
"k8s.io/api/core/v1.VolumeProjection": schema_k8sio_api_core_v1_VolumeProjection(ref),
"k8s.io/api/core/v1.VolumeSource": schema_k8sio_api_core_v1_VolumeSource(ref),
"k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource": schema_k8sio_api_core_v1_VsphereVirtualDiskVolumeSource(ref),
"k8s.io/api/core/v1.WeightedPodAffinityTerm": schema_k8sio_api_core_v1_WeightedPodAffinityTerm(ref),
"k8s.io/api/core/v1.WindowsSecurityContextOptions": schema_k8sio_api_core_v1_WindowsSecurityContextOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup": schema_pkg_apis_meta_v1_APIGroup(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": schema_pkg_apis_meta_v1_APIGroupList(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIResource": schema_pkg_apis_meta_v1_APIResource(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIResourceList": schema_pkg_apis_meta_v1_APIResourceList(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIVersions": schema_pkg_apis_meta_v1_APIVersions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.CreateOptions": schema_pkg_apis_meta_v1_CreateOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.DeleteOptions": schema_pkg_apis_meta_v1_DeleteOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Duration": schema_pkg_apis_meta_v1_Duration(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ExportOptions": schema_pkg_apis_meta_v1_ExportOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1": schema_pkg_apis_meta_v1_FieldsV1(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GetOptions": schema_pkg_apis_meta_v1_GetOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupKind": schema_pkg_apis_meta_v1_GroupKind(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupResource": schema_pkg_apis_meta_v1_GroupResource(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersion": schema_pkg_apis_meta_v1_GroupVersion(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery": schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionKind": schema_pkg_apis_meta_v1_GroupVersionKind(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionResource": schema_pkg_apis_meta_v1_GroupVersionResource(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.InternalEvent": schema_pkg_apis_meta_v1_InternalEvent(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector": schema_pkg_apis_meta_v1_LabelSelector(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement": schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.List": schema_pkg_apis_meta_v1_List(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta": schema_pkg_apis_meta_v1_ListMeta(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ListOptions": schema_pkg_apis_meta_v1_ListOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry": schema_pkg_apis_meta_v1_ManagedFieldsEntry(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime": schema_pkg_apis_meta_v1_MicroTime(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta": schema_pkg_apis_meta_v1_ObjectMeta(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference": schema_pkg_apis_meta_v1_OwnerReference(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata": schema_pkg_apis_meta_v1_PartialObjectMetadata(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadataList": schema_pkg_apis_meta_v1_PartialObjectMetadataList(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Patch": schema_pkg_apis_meta_v1_Patch(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.PatchOptions": schema_pkg_apis_meta_v1_PatchOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions": schema_pkg_apis_meta_v1_Preconditions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.RootPaths": schema_pkg_apis_meta_v1_RootPaths(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR": schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Status": schema_pkg_apis_meta_v1_Status(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause": schema_pkg_apis_meta_v1_StatusCause(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails": schema_pkg_apis_meta_v1_StatusDetails(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Table": schema_pkg_apis_meta_v1_Table(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition": schema_pkg_apis_meta_v1_TableColumnDefinition(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TableOptions": schema_pkg_apis_meta_v1_TableOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TableRow": schema_pkg_apis_meta_v1_TableRow(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition": schema_pkg_apis_meta_v1_TableRowCondition(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Time": schema_pkg_apis_meta_v1_Time(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Timestamp": schema_pkg_apis_meta_v1_Timestamp(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta": schema_pkg_apis_meta_v1_TypeMeta(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.UpdateOptions": schema_pkg_apis_meta_v1_UpdateOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.WatchEvent": schema_pkg_apis_meta_v1_WatchEvent(ref),
}
}
func schema_pkg_apis_pingcap_v1alpha1_BRConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "BRConfig contains config for BR",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pd": {
SchemaProps: spec.SchemaProps{
Description: "PDAddress is the PD address of the tidb cluster",
Type: []string{"string"},
Format: "",
},
},
"db": {
SchemaProps: spec.SchemaProps{
Description: "DB is the specific DB which will be backed-up or restored",
Type: []string{"string"},
Format: "",
},
},
"table": {
SchemaProps: spec.SchemaProps{
Description: "Table is the specific table which will be backed-up or restored",
Type: []string{"string"},
Format: "",
},
},
"ca": {
SchemaProps: spec.SchemaProps{
Description: "CA is the CA certificate path for TLS connection",
Type: []string{"string"},
Format: "",
},
},
"cert": {
SchemaProps: spec.SchemaProps{
Description: "Cert is the certificate path for TLS connection",
Type: []string{"string"},
Format: "",
},
},
"key": {
SchemaProps: spec.SchemaProps{
Description: "Key is the private key path for TLS connection",
Type: []string{"string"},
Format: "",
},
},
"logLevel": {
SchemaProps: spec.SchemaProps{
Description: "LogLevel is the log level",
Type: []string{"string"},
Format: "",
},
},
"statusAddr": {
SchemaProps: spec.SchemaProps{
Description: "StatusAddr is the HTTP listening address for the status report service. Set to empty string to disable",
Type: []string{"string"},
Format: "",
},
},
"concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Concurrency is the size of thread pool on each node that execute the backup task",
Type: []string{"integer"},
Format: "int64",
},
},
"rateLimit": {
SchemaProps: spec.SchemaProps{
Description: "RateLimit is the rate limit of the backup task, MB/s per node",
Type: []string{"integer"},
Format: "int32",
},
},
"timeAgo": {
SchemaProps: spec.SchemaProps{
Description: "TimeAgo is the history version of the backup task, e.g. 1m, 1h",
Type: []string{"string"},
Format: "",
},
},
"checksum": {
SchemaProps: spec.SchemaProps{
Description: "Checksum specifies whether to run checksum after backup",
Type: []string{"boolean"},
Format: "",
},
},
"sendCredToTikv": {
SchemaProps: spec.SchemaProps{
Description: "SendCredToTikv specifies whether to send credentials to TiKV",
Type: []string{"boolean"},
Format: "",
},
},
"onLine": {
SchemaProps: spec.SchemaProps{
Description: "OnLine specifies whether online during restore",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"pd"},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_Backup(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Backup is a backup of tidb cluster.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupSpec"),
},
},
},
Required: []string{"spec"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupSpec"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_BackupList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "BackupList contains a list of Backup.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Backup"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Backup"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_BackupSchedule(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "BackupSchedule is a backup schedule of tidb cluster.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupScheduleSpec"),
},
},
},
Required: []string{"spec"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupScheduleSpec"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_BackupScheduleList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "BackupScheduleList contains a list of BackupSchedule.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupSchedule"),
},
},
},
},
},
},
Required: []string{"metadata", "items"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupSchedule", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_BackupScheduleSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "BackupScheduleSpec contains the backup schedule specification for a tidb cluster.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"schedule": {
SchemaProps: spec.SchemaProps{
Description: "Schedule specifies the cron string used for backup scheduling.",
Type: []string{"string"},
Format: "",
},
},
"pause": {
SchemaProps: spec.SchemaProps{
Description: "Pause means paused backupSchedule",
Type: []string{"boolean"},
Format: "",
},
},
"maxBackups": {
SchemaProps: spec.SchemaProps{
Description: "MaxBackups is to specify how many backups we want to keep 0 is magic number to indicate un-limited backups.",
Type: []string{"integer"},
Format: "int32",
},
},
"maxReservedTime": {
SchemaProps: spec.SchemaProps{
Description: "MaxReservedTime is to specify how long backups we want to keep.",
Type: []string{"string"},
Format: "",
},
},
"backupTemplate": {
SchemaProps: spec.SchemaProps{
Description: "BackupTemplate is the specification of the backup structure to get scheduled.",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupSpec"),
},
},
"storageClassName": {
SchemaProps: spec.SchemaProps{
Description: "The storageClassName of the persistent volume for Backup data storage if not storage class name set in BackupSpec. Defaults to Kubernetes default storage class.",
Type: []string{"string"},
Format: "",
},
},
"storageSize": {
SchemaProps: spec.SchemaProps{
Description: "StorageSize is the request storage size for backup job",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"schedule", "backupTemplate"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupSpec"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_BackupSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "BackupSpec contains the backup specification for a tidb cluster.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"from": {
SchemaProps: spec.SchemaProps{
Description: "From is the tidb cluster that needs to backup.",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBAccessConfig"),
},
},
"backupType": {
SchemaProps: spec.SchemaProps{
Description: "Type is the backup type for tidb cluster.",
Type: []string{"string"},
Format: "",
},
},
"s3": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider"),
},
},
"gcs": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GcsStorageProvider"),
},
},
"storageClassName": {
SchemaProps: spec.SchemaProps{
Description: "The storageClassName of the persistent volume for Backup data storage. Defaults to Kubernetes default storage class.",
Type: []string{"string"},
Format: "",
},
},
"storageSize": {
SchemaProps: spec.SchemaProps{
Description: "StorageSize is the request storage size for backup job",
Type: []string{"string"},
Format: "",
},
},
"br": {
SchemaProps: spec.SchemaProps{
Description: "BRConfig is the configs for BR",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BRConfig"),
},
},
"tolerations": {
SchemaProps: spec.SchemaProps{
Description: "Base tolerations of backup Pods, components may add more tolerations upon this respectively",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"affinity": {
SchemaProps: spec.SchemaProps{
Description: "Affinity of backup Pods",
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BRConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GcsStorageProvider", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBAccessConfig", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Toleration"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_BasicAutoScalerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "BasicAutoScalerSpec describes the basic spec for auto-scaling",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"maxReplicas": {
SchemaProps: spec.SchemaProps{
Description: "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale out. It cannot be less than minReplicas.",
Type: []string{"integer"},
Format: "int32",
},
},
"minReplicas": {
SchemaProps: spec.SchemaProps{
Description: "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. Scaling is active as long as at least one metric value is available.",
Type: []string{"integer"},
Format: "int32",
},
},
"scaleInIntervalSeconds": {
SchemaProps: spec.SchemaProps{
Description: "ScaleInIntervalSeconds represents the duration seconds between each auto-scaling-in If not set, the default ScaleInIntervalSeconds will be set to 500",
Type: []string{"integer"},
Format: "int32",
},
},
"scaleOutIntervalSeconds": {
SchemaProps: spec.SchemaProps{
Description: "ScaleOutIntervalSeconds represents the duration seconds between each auto-scaling-out If not set, the default ScaleOutIntervalSeconds will be set to 300",
Type: []string{"integer"},
Format: "int32",
},
},
"metrics": {
SchemaProps: spec.SchemaProps{
Description: "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/autoscaling/v2beta2.MetricSpec"),
},
},
},
},
},
"metricsTimeDuration": {
SchemaProps: spec.SchemaProps{
Description: "MetricsTimeDuration describe the Time duration to be queried in the Prometheus",
Type: []string{"string"},
Format: "",
},
},
"scaleOutThreshold": {
SchemaProps: spec.SchemaProps{
Description: "ScaleOutThreshold describe the consecutive threshold for the auto-scaling, if the consecutive counts of the scale-out result in auto-scaling reach this number, the auto-scaling would be performed. If not set, the default value is 3.",
Type: []string{"integer"},
Format: "int32",
},
},
"scaleInThreshold": {
SchemaProps: spec.SchemaProps{
Description: "ScaleInThreshold describe the consecutive threshold for the auto-scaling, if the consecutive counts of the scale-in result in auto-scaling reach this number, the auto-scaling would be performed. If not set, the default value is 5.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"maxReplicas"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v2beta2.MetricSpec"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_Binlog(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Binlog is the config for binlog.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"write-timeout": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 15s",
Type: []string{"string"},
Format: "",
},
},
"ignore-error": {
SchemaProps: spec.SchemaProps{
Description: "If IgnoreError is true, when writing binlog meets error, TiDB would ignore the error.",
Type: []string{"boolean"},
Format: "",
},
},
"binlog-socket": {
SchemaProps: spec.SchemaProps{
Description: "Use socket file to write binlog, for compatible with kafka version tidb-binlog.",
Type: []string{"string"},
Format: "",
},
},
"strategy": {
SchemaProps: spec.SchemaProps{
Description: "The strategy for sending binlog to pump, value can be \"range,omitempty\" or \"hash,omitempty\" now. Optional: Defaults to range",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_ComponentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ComponentSpec is the base spec of each component, the fields should always accessed by the Basic<Component>Spec() method to respect the cluster-level properties",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"version": {
SchemaProps: spec.SchemaProps{
Description: "Version of the component. Override the cluster-level version if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"hostNetwork": {
SchemaProps: spec.SchemaProps{
Description: "Whether Hostnetwork of the component is enabled. Override the cluster-level setting if present Optional: Defaults to cluster-level setting",
Type: []string{"boolean"},
Format: "",
},
},
"affinity": {
SchemaProps: spec.SchemaProps{
Description: "Affinity of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
"priorityClassName": {
SchemaProps: spec.SchemaProps{
Description: "PriorityClassName of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"schedulerName": {
SchemaProps: spec.SchemaProps{
Description: "SchedulerName of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "NodeSelector of the component. Merged into the cluster-level nodeSelector if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations of the component. Merged into the cluster-level annotations if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"tolerations": {
SchemaProps: spec.SchemaProps{
Description: "Tolerations of the component. Override the cluster-level tolerations if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"podSecurityContext": {
SchemaProps: spec.SchemaProps{
Description: "PodSecurityContext of the component",
Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
},
},
"configUpdateStrategy": {
SchemaProps: spec.SchemaProps{
Description: "ConfigUpdateStrategy of the component. Override the cluster-level updateStrategy if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_FileLogConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"filename": {
SchemaProps: spec.SchemaProps{
Description: "Log filename, leave empty to disable file log.",
Type: []string{"string"},
Format: "",
},
},
"log-rotate": {
SchemaProps: spec.SchemaProps{
Description: "Is log rotate enabled.",
Type: []string{"boolean"},
Format: "",
},
},
"max-size": {
SchemaProps: spec.SchemaProps{
Description: "Max size for a single file, in MB.",
Type: []string{"integer"},
Format: "int32",
},
},
"max-days": {
SchemaProps: spec.SchemaProps{
Description: "Max log keep days, default is never deleting.",
Type: []string{"integer"},
Format: "int32",
},
},
"max-backups": {
SchemaProps: spec.SchemaProps{
Description: "Maximum number of old log files to retain.",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_GcsStorageProvider(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GcsStorageProvider represents the google cloud storage for storing backups.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"projectId": {
SchemaProps: spec.SchemaProps{
Description: "ProjectId represents the project that organizes all your Google Cloud Platform resources",
Type: []string{"string"},
Format: "",
},
},
"location": {
SchemaProps: spec.SchemaProps{
Description: "Location in which the gcs bucket is located.",
Type: []string{"string"},
Format: "",
},
},
"path": {
SchemaProps: spec.SchemaProps{
Description: "Path is the full path where the backup is saved. The format of the path must be: \"<bucket-name>/<path-to-backup-file>\"",
Type: []string{"string"},
Format: "",
},
},
"bucket": {
SchemaProps: spec.SchemaProps{
Description: "Bucket in which to store the backup data.",
Type: []string{"string"},
Format: "",
},
},
"storageClass": {
SchemaProps: spec.SchemaProps{
Description: "StorageClass represents the storage class",
Type: []string{"string"},
Format: "",
},
},
"objectAcl": {
SchemaProps: spec.SchemaProps{
Description: "ObjectAcl represents the access control list for new objects",
Type: []string{"string"},
Format: "",
},
},
"bucketAcl": {
SchemaProps: spec.SchemaProps{
Description: "BucketAcl represents the access control list for new buckets",
Type: []string{"string"},
Format: "",
},
},
"secretName": {
SchemaProps: spec.SchemaProps{
Description: "SecretName is the name of secret which stores the gcs service account credentials JSON .",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"projectId", "secretName"},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_HelperSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "HelperSpec contains details of helper component",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"image": {
SchemaProps: spec.SchemaProps{
Description: "Image used to tail slow log and set kernel parameters if necessary, must have `tail` and `sysctl` installed Optional: Defaults to busybox:1.26.2",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present Optional: Defaults to the cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_Log(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Log is the log section of config.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"level": {
SchemaProps: spec.SchemaProps{
Description: "Log level. Optional: Defaults to info",
Type: []string{"string"},
Format: "",
},
},
"format": {
SchemaProps: spec.SchemaProps{
Description: "Log format. one of json, text, or console. Optional: Defaults to text",
Type: []string{"string"},
Format: "",
},
},
"disable-timestamp": {
SchemaProps: spec.SchemaProps{
Description: "Disable automatic timestamps in output.",
Type: []string{"boolean"},
Format: "",
},
},
"file": {
SchemaProps: spec.SchemaProps{
Description: "File log config.",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.FileLogConfig"),
},
},
"slow-threshold": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 300",
Type: []string{"integer"},
Format: "int64",
},
},
"expensive-threshold": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 10000",
Type: []string{"integer"},
Format: "int32",
},
},
"query-log-max-len": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2048",
Type: []string{"integer"},
Format: "int64",
},
},
"record-plan-in-slow-log": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 1",
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.FileLogConfig"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_MonitorContainer(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "MonitorContainer is the common attributes of the container of monitoring",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"limits": {
SchemaProps: spec.SchemaProps{
Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"requests": {
SchemaProps: spec.SchemaProps{
Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"baseImage": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_OpenTracing(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "OpenTracing is the opentracing section of the config.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"enable": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false",
Type: []string{"boolean"},
Format: "",
},
},
"sampler": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.OpenTracingSampler"),
},
},
"reporter": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.OpenTracingReporter"),
},
},
"rpc-metrics": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.OpenTracingReporter", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.OpenTracingSampler"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_OpenTracingReporter(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "OpenTracingReporter is the config for opentracing reporter. See https://godoc.org/github.com/uber/jaeger-client-go/config#ReporterConfig",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"queue-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"buffer-flush-interval": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"log-spans": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"local-agent-host-port": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_OpenTracingSampler(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "OpenTracingSampler is the config for opentracing sampler. See https://godoc.org/github.com/uber/jaeger-client-go/config#SamplerConfig",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"param": {
SchemaProps: spec.SchemaProps{
Type: []string{"number"},
Format: "double",
},
},
"sampling-server-url": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"max-operations": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"sampling-refresh-interval": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PDConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PDConfig is the configuration of pd-server",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"force-new-cluster": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"enable-grpc-gateway": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"lease": {
SchemaProps: spec.SchemaProps{
Description: "LeaderLease time, if leader doesn't update its TTL in etcd after lease time, etcd will expire the leader key and other servers can campaign the leader again. Etcd only supports seconds TTL, so here is second too. Optional: Defaults to 3",
Type: []string{"integer"},
Format: "int64",
},
},
"log": {
SchemaProps: spec.SchemaProps{
Description: "Log related config.",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDLogConfig"),
},
},
"log-file": {
SchemaProps: spec.SchemaProps{
Description: "Backward compatibility.",
Type: []string{"string"},
Format: "",
},
},
"log-level": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"tso-save-interval": {
SchemaProps: spec.SchemaProps{
Description: "TsoSaveInterval is the interval to save timestamp. Optional: Defaults to 3s",
Type: []string{"string"},
Format: "",
},
},
"metric": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDMetricConfig"),
},
},
"schedule": {
SchemaProps: spec.SchemaProps{
Description: "Immutable, change should be made through pd-ctl after cluster creation",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDScheduleConfig"),
},
},
"replication": {
SchemaProps: spec.SchemaProps{
Description: "Immutable, change should be made through pd-ctl after cluster creation",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDReplicationConfig"),
},
},
"namespace": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDNamespaceConfig"),
},
},
},
},
},
"pd-server": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDServerConfig"),
},
},
"cluster-version": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"quota-backend-bytes": {
SchemaProps: spec.SchemaProps{
Description: "QuotaBackendBytes Raise alarms when backend size exceeds the given quota. 0 means use the default quota. the default size is 2GB, the maximum is 8GB.",
Type: []string{"string"},
Format: "",
},
},
"auto-compaction-mode": {
SchemaProps: spec.SchemaProps{
Description: "AutoCompactionMode is either 'periodic' or 'revision'. The default value is 'periodic'.",
Type: []string{"string"},
Format: "",
},
},
"auto-compaction-retention-v2": {
SchemaProps: spec.SchemaProps{
Description: "AutoCompactionRetention is either duration string with time unit (e.g. '5m' for 5-minute), or revision unit (e.g. '5000'). If no time unit is provided and compaction mode is 'periodic', the unit defaults to hour. For example, '5' translates into 5-hour. The default retention is 1 hour. Before etcd v3.3.x, the type of retention is int. We add 'v2' suffix to make it backward compatible.",
Type: []string{"string"},
Format: "",
},
},
"tikv-interval": {
SchemaProps: spec.SchemaProps{
Description: "TickInterval is the interval for etcd Raft tick.",
Type: []string{"string"},
Format: "",
},
},
"election-interval": {
SchemaProps: spec.SchemaProps{
Description: "ElectionInterval is the interval for etcd Raft election.",
Type: []string{"string"},
Format: "",
},
},
"enable-prevote": {
SchemaProps: spec.SchemaProps{
Description: "Prevote is true to enable Raft Pre-Vote. If enabled, Raft runs an additional election phase to check whether it would get enough votes to win an election, thus minimizing disruptions. Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"security": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSecurityConfig"),
},
},
"label-property": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDStoreLabel"),
},
},
},
},
},
},
},
},
"namespace-classifier": {
SchemaProps: spec.SchemaProps{
Description: "NamespaceClassifier is for classifying stores/regions into different namespaces. Optional: Defaults to true",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDLogConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDMetricConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDNamespaceConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDReplicationConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDScheduleConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSecurityConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDServerConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDStoreLabel"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PDLogConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PDLogConfig serializes log related config in toml/json.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"level": {
SchemaProps: spec.SchemaProps{
Description: "Log level. Optional: Defaults to info",
Type: []string{"string"},
Format: "",
},
},
"format": {
SchemaProps: spec.SchemaProps{
Description: "Log format. one of json, text, or console.",
Type: []string{"string"},
Format: "",
},
},
"disable-timestamp": {
SchemaProps: spec.SchemaProps{
Description: "Disable automatic timestamps in output.",
Type: []string{"boolean"},
Format: "",
},
},
"file": {
SchemaProps: spec.SchemaProps{
Description: "File log config.",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.FileLogConfig"),
},
},
"development": {
SchemaProps: spec.SchemaProps{
Description: "Development puts the logger in development mode, which changes the behavior of DPanicLevel and takes stacktraces more liberally.",
Type: []string{"boolean"},
Format: "",
},
},
"disable-caller": {
SchemaProps: spec.SchemaProps{
Description: "DisableCaller stops annotating logs with the calling function's file name and line number. By default, all logs are annotated.",
Type: []string{"boolean"},
Format: "",
},
},
"disable-stacktrace": {
SchemaProps: spec.SchemaProps{
Description: "DisableStacktrace completely disables automatic stacktrace capturing. By default, stacktraces are captured for WarnLevel and above logs in development and ErrorLevel and above in production.",
Type: []string{"boolean"},
Format: "",
},
},
"disable-error-verbose": {
SchemaProps: spec.SchemaProps{
Description: "DisableErrorVerbose stops annotating logs with the full verbose error message.",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.FileLogConfig"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PDMetricConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"job": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"address": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"interval": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PDNamespaceConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PDNamespaceConfig is to overwrite the global setting for specific namespace",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"leader-schedule-limit": {
SchemaProps: spec.SchemaProps{
Description: "LeaderScheduleLimit is the max coexist leader schedules.",
Type: []string{"integer"},
Format: "int64",
},
},
"region-schedule-limit": {
SchemaProps: spec.SchemaProps{
Description: "RegionScheduleLimit is the max coexist region schedules.",
Type: []string{"integer"},
Format: "int64",
},
},
"replica-schedule-limit": {
SchemaProps: spec.SchemaProps{
Description: "ReplicaScheduleLimit is the max coexist replica schedules.",
Type: []string{"integer"},
Format: "int64",
},
},
"merge-schedule-limit": {
SchemaProps: spec.SchemaProps{
Description: "MergeScheduleLimit is the max coexist merge schedules.",
Type: []string{"integer"},
Format: "int64",
},
},
"hot-region-schedule-limit": {
SchemaProps: spec.SchemaProps{
Description: "HotRegionScheduleLimit is the max coexist hot region schedules.",
Type: []string{"integer"},
Format: "int64",
},
},
"max-replicas": {
SchemaProps: spec.SchemaProps{
Description: "MaxReplicas is the number of replicas for each region.",
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PDReplicationConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PDReplicationConfig is the replication configuration.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"max-replicas": {
SchemaProps: spec.SchemaProps{
Description: "MaxReplicas is the number of replicas for each region. Immutable, change should be made through pd-ctl after cluster creation Optional: Defaults to 3",
Type: []string{"integer"},
Format: "int64",
},
},
"strictly-match-label": {
SchemaProps: spec.SchemaProps{
Description: "StrictlyMatchLabel strictly checks if the label of TiKV is matched with LocaltionLabels. Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PDScheduleConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ScheduleConfig is the schedule configuration.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"max-snapshot-count": {
SchemaProps: spec.SchemaProps{
Description: "If the snapshot count of one store is greater than this value, it will never be used as a source or target store. Immutable, change should be made through pd-ctl after cluster creation Optional: Defaults to 3",
Type: []string{"integer"},
Format: "int64",
},
},
"max-pending-peer-count": {
SchemaProps: spec.SchemaProps{
Description: "Immutable, change should be made through pd-ctl after cluster creation Optional: Defaults to 16",
Type: []string{"integer"},
Format: "int64",
},
},
"max-merge-region-size": {
SchemaProps: spec.SchemaProps{
Description: "If both the size of region is smaller than MaxMergeRegionSize and the number of rows in region is smaller than MaxMergeRegionKeys, it will try to merge with adjacent regions. Immutable, change should be made through pd-ctl after cluster creation Optional: Defaults to 20",
Type: []string{"integer"},
Format: "int64",
},
},
"max-merge-region-keys": {
SchemaProps: spec.SchemaProps{
Description: "Immutable, change should be made through pd-ctl after cluster creation Optional: Defaults to 200000",
Type: []string{"integer"},
Format: "int64",
},
},
"split-merge-interval": {
SchemaProps: spec.SchemaProps{
Description: "SplitMergeInterval is the minimum interval time to permit merge after split. Immutable, change should be made through pd-ctl after cluster creation Optional: Defaults to 1h",
Type: []string{"string"},
Format: "",
},
},
"patrol-region-interval": {
SchemaProps: spec.SchemaProps{
Description: "PatrolRegionInterval is the interval for scanning region during patrol. Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"string"},
Format: "",
},
},
"max-store-down-time": {
SchemaProps: spec.SchemaProps{
Description: "MaxStoreDownTime is the max duration after which a store will be considered to be down if it hasn't reported heartbeats. Immutable, change should be made through pd-ctl after cluster creation Optional: Defaults to 30m",
Type: []string{"string"},
Format: "",
},
},
"leader-schedule-limit": {
SchemaProps: spec.SchemaProps{
Description: "LeaderScheduleLimit is the max coexist leader schedules. Immutable, change should be made through pd-ctl after cluster creation Optional: Defaults to 4",
Type: []string{"integer"},
Format: "int64",
},
},
"region-schedule-limit": {
SchemaProps: spec.SchemaProps{
Description: "RegionScheduleLimit is the max coexist region schedules. Immutable, change should be made through pd-ctl after cluster creation Optional: Defaults to 2048",
Type: []string{"integer"},
Format: "int64",
},
},
"replica-schedule-limit": {
SchemaProps: spec.SchemaProps{
Description: "ReplicaScheduleLimit is the max coexist replica schedules. Immutable, change should be made through pd-ctl after cluster creation Optional: Defaults to 64",
Type: []string{"integer"},
Format: "int64",
},
},
"merge-schedule-limit": {
SchemaProps: spec.SchemaProps{
Description: "MergeScheduleLimit is the max coexist merge schedules. Immutable, change should be made through pd-ctl after cluster creation Optional: Defaults to 8",
Type: []string{"integer"},
Format: "int64",
},
},
"hot-region-schedule-limit": {
SchemaProps: spec.SchemaProps{
Description: "HotRegionScheduleLimit is the max coexist hot region schedules. Immutable, change should be made through pd-ctl after cluster creation Optional: Defaults to 4",
Type: []string{"integer"},
Format: "int64",
},
},
"hot-region-cache-hits-threshold": {
SchemaProps: spec.SchemaProps{
Description: "HotRegionCacheHitThreshold is the cache hits threshold of the hot region. If the number of times a region hits the hot cache is greater than this threshold, it is considered a hot region. Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"integer"},
Format: "int64",
},
},
"tolerant-size-ratio": {
SchemaProps: spec.SchemaProps{
Description: "TolerantSizeRatio is the ratio of buffer size for balance scheduler. Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"number"},
Format: "double",
},
},
"low-space-ratio": {
SchemaProps: spec.SchemaProps{
Description: "\n high space stage transition stage low space stage\n |--------------------|-----------------------------|-------------------------|\n ^ ^ ^ ^\n 0 HighSpaceRatio * capacity LowSpaceRatio * capacity capacity\n\nLowSpaceRatio is the lowest usage ratio of store which regraded as low space. When in low space, store region score increases to very large and varies inversely with available size. Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"number"},
Format: "double",
},
},
"high-space-ratio": {
SchemaProps: spec.SchemaProps{
Description: "HighSpaceRatio is the highest usage ratio of store which regraded as high space. High space means there is a lot of spare capacity, and store region score varies directly with used size. Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"number"},
Format: "double",
},
},
"disable-raft-learner": {
SchemaProps: spec.SchemaProps{
Description: "DisableLearner is the option to disable using AddLearnerNode instead of AddNode Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"string"},
Format: "",
},
},
"disable-remove-down-replica": {
SchemaProps: spec.SchemaProps{
Description: "DisableRemoveDownReplica is the option to prevent replica checker from removing down replicas. Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"string"},
Format: "",
},
},
"disable-replace-offline-replica": {
SchemaProps: spec.SchemaProps{
Description: "DisableReplaceOfflineReplica is the option to prevent replica checker from repalcing offline replicas. Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"string"},
Format: "",
},
},
"disable-make-up-replica": {
SchemaProps: spec.SchemaProps{
Description: "DisableMakeUpReplica is the option to prevent replica checker from making up replicas when replica count is less than expected. Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"string"},
Format: "",
},
},
"disable-remove-extra-replica": {
SchemaProps: spec.SchemaProps{
Description: "DisableRemoveExtraReplica is the option to prevent replica checker from removing extra replicas. Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"string"},
Format: "",
},
},
"disable-location-replacement": {
SchemaProps: spec.SchemaProps{
Description: "DisableLocationReplacement is the option to prevent replica checker from moving replica to a better location. Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"string"},
Format: "",
},
},
"disable-namespace-relocation": {
SchemaProps: spec.SchemaProps{
Description: "DisableNamespaceRelocation is the option to prevent namespace checker from moving replica to the target namespace. Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"string"},
Format: "",
},
},
"schedulers-v2": {
SchemaProps: spec.SchemaProps{
Description: "Schedulers support for loding customized schedulers Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSchedulerConfig"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSchedulerConfig"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PDSchedulerConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PDSchedulerConfig is customized scheduler configuration",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"string"},
Format: "",
},
},
"args": {
SchemaProps: spec.SchemaProps{
Description: "Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"disable": {
SchemaProps: spec.SchemaProps{
Description: "Immutable, change should be made through pd-ctl after cluster creation",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PDSecurityConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PDSecurityConfig is the configuration for supporting tls.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"cacert-path": {
SchemaProps: spec.SchemaProps{
Description: "CAPath is the path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty",
Type: []string{"string"},
Format: "",
},
},
"cert-path": {
SchemaProps: spec.SchemaProps{
Description: "CertPath is the path of file that contains X509 certificate in PEM format.",
Type: []string{"string"},
Format: "",
},
},
"key-path": {
SchemaProps: spec.SchemaProps{
Description: "KeyPath is the path of file that contains X509 key in PEM format.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PDServerConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PDServerConfig is the configuration for pd server.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"use-region-storage": {
SchemaProps: spec.SchemaProps{
Description: "UseRegionStorage enables the independent region storage.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PDSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PDSpec contains details of PD members",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"version": {
SchemaProps: spec.SchemaProps{
Description: "Version of the component. Override the cluster-level version if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"hostNetwork": {
SchemaProps: spec.SchemaProps{
Description: "Whether Hostnetwork of the component is enabled. Override the cluster-level setting if present Optional: Defaults to cluster-level setting",
Type: []string{"boolean"},
Format: "",
},
},
"affinity": {
SchemaProps: spec.SchemaProps{
Description: "Affinity of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
"priorityClassName": {
SchemaProps: spec.SchemaProps{
Description: "PriorityClassName of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"schedulerName": {
SchemaProps: spec.SchemaProps{
Description: "SchedulerName of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "NodeSelector of the component. Merged into the cluster-level nodeSelector if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations of the component. Merged into the cluster-level annotations if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"tolerations": {
SchemaProps: spec.SchemaProps{
Description: "Tolerations of the component. Override the cluster-level tolerations if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"podSecurityContext": {
SchemaProps: spec.SchemaProps{
Description: "PodSecurityContext of the component",
Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
},
},
"configUpdateStrategy": {
SchemaProps: spec.SchemaProps{
Description: "ConfigUpdateStrategy of the component. Override the cluster-level updateStrategy if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"limits": {
SchemaProps: spec.SchemaProps{
Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"requests": {
SchemaProps: spec.SchemaProps{
Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"replicas": {
SchemaProps: spec.SchemaProps{
Description: "The desired ready replicas",
Type: []string{"integer"},
Format: "int32",
},
},
"baseImage": {
SchemaProps: spec.SchemaProps{
Description: "Base image of the component, image tag is now allowed during validation",
Type: []string{"string"},
Format: "",
},
},
"service": {
SchemaProps: spec.SchemaProps{
Description: "Service defines a Kubernetes service of PD cluster. Optional: Defaults to `.spec.services` in favor of backward compatibility",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec"),
},
},
"storageClassName": {
SchemaProps: spec.SchemaProps{
Description: "The storageClassName of the persistent volume for PD data storage. Defaults to Kubernetes default storage class.",
Type: []string{"string"},
Format: "",
},
},
"config": {
SchemaProps: spec.SchemaProps{
Description: "Config is the Configuration of pd-servers",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDConfig"),
},
},
},
Required: []string{"replicas"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PDStoreLabel(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PDStoreLabel is the config item of LabelPropertyConfig.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"key": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_Performance(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Performance is the performance section of the config.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"max-procs": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"max-memory": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 0",
Type: []string{"integer"},
Format: "int64",
},
},
"tcp-keep-alive": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"cross-join": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"stats-lease": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 3s",
Type: []string{"string"},
Format: "",
},
},
"run-auto-analyze": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"stmt-count-limit": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 5000",
Type: []string{"integer"},
Format: "int32",
},
},
"feedback-probability": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 0.05",
Type: []string{"number"},
Format: "double",
},
},
"query-feedback-limit": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 1024",
Type: []string{"integer"},
Format: "int32",
},
},
"pseudo-estimate-ratio": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 0.8",
Type: []string{"number"},
Format: "double",
},
},
"force-priority": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to NO_PRIORITY",
Type: []string{"string"},
Format: "",
},
},
"bind-info-lease": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 3s",
Type: []string{"string"},
Format: "",
},
},
"txn-entry-count-limit": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 300000",
Type: []string{"integer"},
Format: "int64",
},
},
"txn-total-size-limit": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 104857600",
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PessimisticTxn(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PessimisticTxn is the config for pessimistic transaction.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"enable": {
SchemaProps: spec.SchemaProps{
Description: "Enable must be true for 'begin lock' or session variable to start a pessimistic transaction. Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"max-retry-count": {
SchemaProps: spec.SchemaProps{
Description: "The max count of retry for a single statement in a pessimistic transaction. Optional: Defaults to 256",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PlanCache(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PlanCache is the PlanCache section of the config.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"enabled": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"capacity": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"shards": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_Plugin(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Plugin is the config for plugin",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"dir": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"load": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PreparedPlanCache(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PreparedPlanCache is the PreparedPlanCache section of the config.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"enabled": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false",
Type: []string{"boolean"},
Format: "",
},
},
"capacity": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 100",
Type: []string{"integer"},
Format: "int32",
},
},
"memory-guard-ratio": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 0.1",
Type: []string{"number"},
Format: "double",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_ProxyProtocol(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ProxyProtocol is the PROXY protocol section of the config.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"networks": {
SchemaProps: spec.SchemaProps{
Description: "PROXY protocol acceptable client networks. Empty *string means disable PROXY protocol, * means all networks.",
Type: []string{"string"},
Format: "",
},
},
"header-timeout": {
SchemaProps: spec.SchemaProps{
Description: "PROXY protocol header read timeout, Unit is second.",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_PumpSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PumpSpec contains details of Pump members",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"version": {
SchemaProps: spec.SchemaProps{
Description: "Version of the component. Override the cluster-level version if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"hostNetwork": {
SchemaProps: spec.SchemaProps{
Description: "Whether Hostnetwork of the component is enabled. Override the cluster-level setting if present Optional: Defaults to cluster-level setting",
Type: []string{"boolean"},
Format: "",
},
},
"affinity": {
SchemaProps: spec.SchemaProps{
Description: "Affinity of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
"priorityClassName": {
SchemaProps: spec.SchemaProps{
Description: "PriorityClassName of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"schedulerName": {
SchemaProps: spec.SchemaProps{
Description: "SchedulerName of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "NodeSelector of the component. Merged into the cluster-level nodeSelector if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations of the component. Merged into the cluster-level annotations if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"tolerations": {
SchemaProps: spec.SchemaProps{
Description: "Tolerations of the component. Override the cluster-level tolerations if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"podSecurityContext": {
SchemaProps: spec.SchemaProps{
Description: "PodSecurityContext of the component",
Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
},
},
"configUpdateStrategy": {
SchemaProps: spec.SchemaProps{
Description: "ConfigUpdateStrategy of the component. Override the cluster-level updateStrategy if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"limits": {
SchemaProps: spec.SchemaProps{
Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"requests": {
SchemaProps: spec.SchemaProps{
Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"replicas": {
SchemaProps: spec.SchemaProps{
Description: "The desired ready replicas",
Type: []string{"integer"},
Format: "int32",
},
},
"baseImage": {
SchemaProps: spec.SchemaProps{
Description: "Base image of the component, image tag is now allowed during validation",
Type: []string{"string"},
Format: "",
},
},
"storageClassName": {
SchemaProps: spec.SchemaProps{
Description: "The storageClassName of the persistent volume for Pump data storage. Defaults to Kubernetes default storage class.",
Type: []string{"string"},
Format: "",
},
},
"config": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
},
},
},
},
Required: []string{"replicas"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_Restore(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Restore represents the restoration of backup of a tidb cluster.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.RestoreSpec"),
},
},
},
Required: []string{"spec"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.RestoreSpec"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_RestoreList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "RestoreList contains a list of Restore.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Restore"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Restore"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_RestoreSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "RestoreSpec contains the specification for a restore of a tidb cluster backup.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"to": {
SchemaProps: spec.SchemaProps{
Description: "To is the tidb cluster that needs to restore.",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBAccessConfig"),
},
},
"backupType": {
SchemaProps: spec.SchemaProps{
Description: "Type is the backup type for tidb cluster.",
Type: []string{"string"},
Format: "",
},
},
"s3": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider"),
},
},
"gcs": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GcsStorageProvider"),
},
},
"storageClassName": {
SchemaProps: spec.SchemaProps{
Description: "The storageClassName of the persistent volume for Restore data storage. Defaults to Kubernetes default storage class.",
Type: []string{"string"},
Format: "",
},
},
"storageSize": {
SchemaProps: spec.SchemaProps{
Description: "StorageSize is the request storage size for backup job",
Type: []string{"string"},
Format: "",
},
},
"br": {
SchemaProps: spec.SchemaProps{
Description: "BR is the configs for BR.",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BRConfig"),
},
},
"tolerations": {
SchemaProps: spec.SchemaProps{
Description: "Base tolerations of restore Pods, components may add more tolerations upon this respectively",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"affinity": {
SchemaProps: spec.SchemaProps{
Description: "Affinity of restore Pods",
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BRConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GcsStorageProvider", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBAccessConfig", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Toleration"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_S3StorageProvider(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "S3StorageProvider represents a S3 compliant storage for storing backups.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"provider": {
SchemaProps: spec.SchemaProps{
Description: "Provider represents the specific storage provider that implements the S3 interface",
Type: []string{"string"},
Format: "",
},
},
"region": {
SchemaProps: spec.SchemaProps{
Description: "Region in which the S3 compatible bucket is located.",
Type: []string{"string"},
Format: "",
},
},
"path": {
SchemaProps: spec.SchemaProps{
Description: "Path is the full path where the backup is saved. The format of the path must be: \"<bucket-name>/<path-to-backup-file>\"",
Type: []string{"string"},
Format: "",
},
},
"bucket": {
SchemaProps: spec.SchemaProps{
Description: "Bucket in which to store the backup data.",
Type: []string{"string"},
Format: "",
},
},
"endpoint": {
SchemaProps: spec.SchemaProps{
Description: "Endpoint of S3 compatible storage service",
Type: []string{"string"},
Format: "",
},
},
"storageClass": {
SchemaProps: spec.SchemaProps{
Description: "StorageClass represents the storage class",
Type: []string{"string"},
Format: "",
},
},
"acl": {
SchemaProps: spec.SchemaProps{
Description: "Acl represents access control permissions for this bucket",
Type: []string{"string"},
Format: "",
},
},
"secretName": {
SchemaProps: spec.SchemaProps{
Description: "SecretName is the name of secret which stores S3 compliant storage access key and secret key.",
Type: []string{"string"},
Format: "",
},
},
"prefix": {
SchemaProps: spec.SchemaProps{
Description: "Prefix for the keys.",
Type: []string{"string"},
Format: "",
},
},
"sse": {
SchemaProps: spec.SchemaProps{
Description: "SSE Sever-Side Encryption.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"provider", "secretName"},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_Security(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Security is the security section of the config.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"skip-grant-table": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"ssl-ca": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"ssl-cert": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"ssl-key": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"cluster-ssl-ca": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"cluster-ssl-cert": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"cluster-ssl-key": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_ServiceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type of the real kubernetes service",
Type: []string{"string"},
Format: "",
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Additional annotations of the kubernetes service object",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"loadBalancerIP": {
SchemaProps: spec.SchemaProps{
Description: "LoadBalancerIP is the loadBalancerIP of service Optional: Defaults to omitted",
Type: []string{"string"},
Format: "",
},
},
"clusterIP": {
SchemaProps: spec.SchemaProps{
Description: "ClusterIP is the clusterIP of service",
Type: []string{"string"},
Format: "",
},
},
"portName": {
SchemaProps: spec.SchemaProps{
Description: "PortName is the name of service port",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_Status(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Status is the status section of the config.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"report-status": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"metrics-addr": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"metrics-interval": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 15",
Type: []string{"integer"},
Format: "int32",
},
},
"record-db-qps": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_StmtSummary(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StmtSummary is the config for statement summary.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"max-stmt-count": {
SchemaProps: spec.SchemaProps{
Description: "The maximum number of statements kept in memory. Optional: Defaults to 100",
Type: []string{"integer"},
Format: "int32",
},
},
"max-sql-length": {
SchemaProps: spec.SchemaProps{
Description: "The maximum length of displayed normalized SQL and sample SQL. Optional: Defaults to 4096",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_StorageProvider(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StorageProvider defines the configuration for storing a backup in backend storage.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"s3": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider"),
},
},
"gcs": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GcsStorageProvider"),
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GcsStorageProvider", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiDBAccessConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiDBAccessConfig defines the configuration for access tidb cluster",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"host": {
SchemaProps: spec.SchemaProps{
Description: "Host is the tidb cluster access address",
Type: []string{"string"},
Format: "",
},
},
"port": {
SchemaProps: spec.SchemaProps{
Description: "Port is the port number to use for connecting tidb cluster",
Type: []string{"integer"},
Format: "int32",
},
},
"user": {
SchemaProps: spec.SchemaProps{
Description: "User is the user for login tidb cluster",
Type: []string{"string"},
Format: "",
},
},
"secretName": {
SchemaProps: spec.SchemaProps{
Description: "SecretName is the name of secret which stores tidb cluster's password.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"host", "secretName"},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiDBConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiDBConfig is the configuration of tidb-server",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"cors": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"socket": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"lease": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 45s",
Type: []string{"string"},
Format: "",
},
},
"run-ddl": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"split-table": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"token-limit": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 1000",
Type: []string{"integer"},
Format: "int32",
},
},
"oom-action": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to log",
Type: []string{"string"},
Format: "",
},
},
"mem-quota-query": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 34359738368",
Type: []string{"integer"},
Format: "int64",
},
},
"enable-streaming": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false",
Type: []string{"boolean"},
Format: "",
},
},
"enable-batch-dml": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false",
Type: []string{"boolean"},
Format: "",
},
},
"txn-local-latches": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TxnLocalLatches"),
},
},
"lower-case-table-names": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"log": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Log"),
},
},
"security": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Security"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Status"),
},
},
"performance": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Performance"),
},
},
"prepared-plan-cache": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PreparedPlanCache"),
},
},
"opentracing": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.OpenTracing"),
},
},
"proxy-protocol": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ProxyProtocol"),
},
},
"tikv-client": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVClient"),
},
},
"binlog": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Binlog"),
},
},
"compatible-kill-query": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"plugin": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Plugin"),
},
},
"pessimistic-txn": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PessimisticTxn"),
},
},
"check-mb4-value-in-utf8": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"alter-primary-key": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false",
Type: []string{"boolean"},
Format: "",
},
},
"treat-old-version-utf8-as-utf8mb4": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"split-region-max-num": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 1000",
Type: []string{"integer"},
Format: "int64",
},
},
"stmt-summary": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StmtSummary"),
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Binlog", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Log", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.OpenTracing", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Performance", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PessimisticTxn", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Plugin", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PreparedPlanCache", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ProxyProtocol", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Security", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Status", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StmtSummary", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVClient", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TxnLocalLatches"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiDBServiceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"externalTrafficPolicy": {
SchemaProps: spec.SchemaProps{
Description: "ExternalTrafficPolicy of the service Optional: Defaults to omitted",
Type: []string{"string"},
Format: "",
},
},
"exposeStatus": {
SchemaProps: spec.SchemaProps{
Description: "Whether expose the status port Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiDBSlowLogTailerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiDBSlowLogTailerSpec represents an optional log tailer sidecar with TiDB",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"limits": {
SchemaProps: spec.SchemaProps{
Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"requests": {
SchemaProps: spec.SchemaProps{
Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiDBSpec contains details of TiDB members",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"version": {
SchemaProps: spec.SchemaProps{
Description: "Version of the component. Override the cluster-level version if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"hostNetwork": {
SchemaProps: spec.SchemaProps{
Description: "Whether Hostnetwork of the component is enabled. Override the cluster-level setting if present Optional: Defaults to cluster-level setting",
Type: []string{"boolean"},
Format: "",
},
},
"affinity": {
SchemaProps: spec.SchemaProps{
Description: "Affinity of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
"priorityClassName": {
SchemaProps: spec.SchemaProps{
Description: "PriorityClassName of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"schedulerName": {
SchemaProps: spec.SchemaProps{
Description: "SchedulerName of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "NodeSelector of the component. Merged into the cluster-level nodeSelector if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations of the component. Merged into the cluster-level annotations if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"tolerations": {
SchemaProps: spec.SchemaProps{
Description: "Tolerations of the component. Override the cluster-level tolerations if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"podSecurityContext": {
SchemaProps: spec.SchemaProps{
Description: "PodSecurityContext of the component",
Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
},
},
"configUpdateStrategy": {
SchemaProps: spec.SchemaProps{
Description: "ConfigUpdateStrategy of the component. Override the cluster-level updateStrategy if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"limits": {
SchemaProps: spec.SchemaProps{
Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"requests": {
SchemaProps: spec.SchemaProps{
Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"replicas": {
SchemaProps: spec.SchemaProps{
Description: "The desired ready replicas",
Type: []string{"integer"},
Format: "int32",
},
},
"baseImage": {
SchemaProps: spec.SchemaProps{
Description: "Base image of the component, image tag is now allowed during validation",
Type: []string{"string"},
Format: "",
},
},
"service": {
SchemaProps: spec.SchemaProps{
Description: "Service defines a Kubernetes service of TiDB cluster. Optional: No kubernetes service will be created by default.",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBServiceSpec"),
},
},
"binlogEnabled": {
SchemaProps: spec.SchemaProps{
Description: "Whether enable TiDB Binlog, it is encouraged to not set this field and rely on the default behavior Optional: Defaults to true if PumpSpec is non-nil, otherwise false",
Type: []string{"boolean"},
Format: "",
},
},
"maxFailoverCount": {
SchemaProps: spec.SchemaProps{
Description: "MaxFailoverCount limit the max replicas could be added in failover, 0 means unlimited Optional: Defaults to 0",
Type: []string{"integer"},
Format: "int32",
},
},
"separateSlowLog": {
SchemaProps: spec.SchemaProps{
Description: "Whether output the slow log in an separate sidecar container Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"tlsClient": {
SchemaProps: spec.SchemaProps{
Description: "Whether enable the TLS connection between the SQL client and TiDB server Optional: Defaults to nil",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBTLSClient"),
},
},
"slowLogTailer": {
SchemaProps: spec.SchemaProps{
Description: "The spec of the slow log tailer sidecar",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSlowLogTailerSpec"),
},
},
"plugins": {
SchemaProps: spec.SchemaProps{
Description: "Plugins is a list of plugins that are loaded by TiDB server, empty means plugin disabled",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"config": {
SchemaProps: spec.SchemaProps{
Description: "Config is the Configuration of tidb-servers",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBConfig"),
},
},
},
Required: []string{"replicas"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBServiceSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSlowLogTailerSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBTLSClient", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVBlockCacheConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiKVBlockCacheConfig is the config of a block cache",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"shared": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"capacity": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"num-shard-bits": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"strict-capacity-limit": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"high-pri-pool-ratio": {
SchemaProps: spec.SchemaProps{
Type: []string{"number"},
Format: "double",
},
},
"memory-allocator": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVCfConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiKVCfConfig is the config of a cf",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"block-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"block-cache-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"disable-block-cache": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"cache-index-and-filter-blocks": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"pin-l0-filter-and-index-blocks": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"use-bloom-filter": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"optimize-filters-for-hits": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"whole-key-filtering": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"bloom-filter-bits-per-key": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"block-based-bloom-filter": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"read-amp-bytes-per-bit": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"compression-per-level": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"write-buffer-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"max-write-buffer-number": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"min-write-buffer-number-to-merge": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"max-bytes-for-level-base": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"target-file-size-base": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"level0-file-num-compaction-trigger": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"level0-slowdown-writes-trigger": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"level0-stop-writes-trigger": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"max-compaction-bytes": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"compaction-pri": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"dynamic-level-bytes": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"num-levels": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"max-bytes-for-level-multiplier": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"compaction-style": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"disable-auto-compactions": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"soft-pending-compaction-bytes-limit": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"hard-pending-compaction-bytes-limit": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"force-consistency-checks": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"prop-size-index-distance": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"prop-keys-index-distance": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"enable-doubly-skiplist": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"titan": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVTitanCfConfig"),
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVTitanCfConfig"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVClient(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiKVClient is the config for tikv client.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"grpc-connection-count": {
SchemaProps: spec.SchemaProps{
Description: "GrpcConnectionCount is the max gRPC connections that will be established with each tikv-server. Optional: Defaults to 16",
Type: []string{"integer"},
Format: "int32",
},
},
"grpc-keepalive-time": {
SchemaProps: spec.SchemaProps{
Description: "After a duration of this time in seconds if the client doesn't see any activity it pings the server to see if the transport is still alive. Optional: Defaults to 10",
Type: []string{"integer"},
Format: "int32",
},
},
"grpc-keepalive-timeout": {
SchemaProps: spec.SchemaProps{
Description: "After having pinged for keepalive check, the client waits for a duration of Timeout in seconds and if no activity is seen even after that the connection is closed. Optional: Defaults to 3",
Type: []string{"integer"},
Format: "int32",
},
},
"commit-timeout": {
SchemaProps: spec.SchemaProps{
Description: "CommitTimeout is the max time which command 'commit' will wait. Optional: Defaults to 41s",
Type: []string{"string"},
Format: "",
},
},
"max-txn-time-use": {
SchemaProps: spec.SchemaProps{
Description: "MaxTxnTimeUse is the max time a Txn may use (in seconds) from its startTS to commitTS. Optional: Defaults to 590",
Type: []string{"integer"},
Format: "int32",
},
},
"max-batch-size": {
SchemaProps: spec.SchemaProps{
Description: "MaxBatchSize is the max batch size when calling batch commands API. Optional: Defaults to 128",
Type: []string{"integer"},
Format: "int32",
},
},
"overload-threshold": {
SchemaProps: spec.SchemaProps{
Description: "If TiKV load is greater than this, TiDB will wait for a while to avoid little batch. Optional: Defaults to 200",
Type: []string{"integer"},
Format: "int32",
},
},
"max-batch-wait-time": {
SchemaProps: spec.SchemaProps{
Description: "MaxBatchWaitTime in nanosecond is the max wait time for batch. Optional: Defaults to 0",
Type: []string{"integer"},
Format: "int64",
},
},
"batch-wait-size": {
SchemaProps: spec.SchemaProps{
Description: "BatchWaitSize is the max wait size for batch. Optional: Defaults to 8",
Type: []string{"integer"},
Format: "int32",
},
},
"region-cache-ttl": {
SchemaProps: spec.SchemaProps{
Description: "If a Region has not been accessed for more than the given duration (in seconds), it will be reloaded from the PD. Optional: Defaults to 600",
Type: []string{"integer"},
Format: "int32",
},
},
"store-limit": {
SchemaProps: spec.SchemaProps{
Description: "If a store has been up to the limit, it will return error for successive request to prevent the store occupying too much token in dispatching level. Optional: Defaults to 0",
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiKVConfig is the configuration of TiKV.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"log-level": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to info",
Type: []string{"string"},
Format: "",
},
},
"log-file": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"log-rotation-timespan": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 24h",
Type: []string{"string"},
Format: "",
},
},
"panic-when-unexpected-key-or-data": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"server": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVServerConfig"),
},
},
"storage": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVStorageConfig"),
},
},
"raftstore": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVRaftstoreConfig"),
},
},
"rocksdb": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVDbConfig"),
},
},
"coprocessor": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCoprocessorConfig"),
},
},
"readpool": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVReadPoolConfig"),
},
},
"raftdb": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVRaftDBConfig"),
},
},
"import": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVImportConfig"),
},
},
"gc": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVGCConfig"),
},
},
"pd": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVPDConfig"),
},
},
"security": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSecurityConfig"),
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCoprocessorConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVDbConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVGCConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVImportConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVPDConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVRaftDBConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVRaftstoreConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVReadPoolConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSecurityConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVServerConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVStorageConfig"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVCoprocessorConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiKVCoprocessorConfig is the configuration of TiKV Coprocessor component.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"split-region-on-table": {
SchemaProps: spec.SchemaProps{
Description: "When it is set to `true`, TiKV will try to split a Region with table prefix if that Region crosses tables. It is recommended to turn off this option if there will be a large number of tables created. Optional: Defaults to false optional",
Type: []string{"boolean"},
Format: "",
},
},
"batch-split-limit": {
SchemaProps: spec.SchemaProps{
Description: "One split check produces several split keys in batch. This config limits the number of produced split keys in one batch. optional",
Type: []string{"integer"},
Format: "int64",
},
},
"region-max-size": {
SchemaProps: spec.SchemaProps{
Description: "When Region [a,e) size exceeds `region_max_size`, it will be split into several Regions [a,b), [b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be `region_split_size` (or a little larger). See also: region-split-size Optional: Defaults to 144MB optional",
Type: []string{"string"},
Format: "",
},
},
"region-split-size": {
SchemaProps: spec.SchemaProps{
Description: "When Region [a,e) size exceeds `region_max_size`, it will be split into several Regions [a,b), [b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be `region_split_size` (or a little larger). See also: region-max-size Optional: Defaults to 96MB optional",
Type: []string{"string"},
Format: "",
},
},
"region-max-keys": {
SchemaProps: spec.SchemaProps{
Description: "When the number of keys in Region [a,e) exceeds the `region_max_keys`, it will be split into several Regions [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b), [b,c), [c,d) will be `region_split_keys`. See also: region-split-keys Optional: Defaults to 1440000 optional",
Type: []string{"integer"},
Format: "int64",
},
},
"region-split-keys": {
SchemaProps: spec.SchemaProps{
Description: "When the number of keys in Region [a,e) exceeds the `region_max_keys`, it will be split into several Regions [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b), [b,c), [c,d) will be `region_split_keys`. See also: region-max-keys Optional: Defaults to 960000 optional",
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVCoprocessorReadPoolConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"high_concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 8",
Type: []string{"integer"},
Format: "int64",
},
},
"normal_concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 8",
Type: []string{"integer"},
Format: "int64",
},
},
"low_concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 8",
Type: []string{"integer"},
Format: "int64",
},
},
"max_tasks_per_worker_high": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2000",
Type: []string{"integer"},
Format: "int64",
},
},
"max_tasks_per_worker_normal": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2000",
Type: []string{"integer"},
Format: "int64",
},
},
"max_tasks_per_worker_low": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2000",
Type: []string{"integer"},
Format: "int64",
},
},
"stack_size": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 10MB",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVDbConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiKVDbConfig is the rocksdb config.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"wal-recovery-mode": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2",
Type: []string{"integer"},
Format: "int64",
},
},
"wal-ttl-seconds": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"wal-size-limit": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"max-total-wal-size": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 4GB",
Type: []string{"string"},
Format: "",
},
},
"max-background-jobs": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 8",
Type: []string{"integer"},
Format: "int64",
},
},
"max-manifest-file-size": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 128MB",
Type: []string{"string"},
Format: "",
},
},
"create-if-missing": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"max-open-files": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 40960",
Type: []string{"integer"},
Format: "int64",
},
},
"enable-statistics": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"stats-dump-period": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 10m",
Type: []string{"string"},
Format: "",
},
},
"compaction-readahead-size": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 0",
Type: []string{"string"},
Format: "",
},
},
"info-log-max-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"info-log-roll-time": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"info-log-keep-log-file-num": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"info-log-dir": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"rate-bytes-per-sec": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"rate-limiter-mode": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"auto-tuned": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"bytes-per-sync": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"wal-bytes-per-sync": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"max-sub-compactions": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 3",
Type: []string{"integer"},
Format: "int64",
},
},
"writable-file-max-buffer-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"use-direct-io-for-flush-and-compaction": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"enable-pipelined-write": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"defaultcf": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCfConfig"),
},
},
"writecf": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCfConfig"),
},
},
"lockcf": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCfConfig"),
},
},
"raftcf": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCfConfig"),
},
},
"titan": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVTitanDBConfig"),
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCfConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVTitanDBConfig"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVGCConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
" batch_keys": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 512",
Type: []string{"integer"},
Format: "int64",
},
},
" max_write_bytes_per_sec": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVImportConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"import_dir": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"num_threads": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"num_import_jobs": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"num_import_sst_jobs": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"max_prepare_duration": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"region_split_size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"stream_channel_window": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"max_open_engines": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"upload_speed_limit": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVPDConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"endpoints": {
SchemaProps: spec.SchemaProps{
Description: "The PD endpoints for the client.\n\nDefault is empty.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"retry_interval": {
SchemaProps: spec.SchemaProps{
Description: "The interval at which to retry a PD connection initialization.\n\nDefault is 300ms. Optional: Defaults to 300ms",
Type: []string{"string"},
Format: "",
},
},
"retry_max_count": {
SchemaProps: spec.SchemaProps{
Description: "The maximum number of times to retry a PD connection initialization.\n\nDefault is isize::MAX, represented by -1. Optional: Defaults to -1",
Type: []string{"integer"},
Format: "int64",
},
},
"retry_log_every": {
SchemaProps: spec.SchemaProps{
Description: "If the client observes the same error message on retry, it can repeat the message only every `n` times.\n\nDefault is 10. Set to 1 to disable this feature. Optional: Defaults to 10",
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVRaftDBConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"wal_recovery_mode": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"wal_dir": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"wal_ttl_seconds": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"wal_size_limit": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"max_total_wal_size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"max_background_jobs": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"max_manifest_file_size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"create_if_missing": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"max_open_files": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"enable_statistics": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"stats_dump_period": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"compaction_readahead_size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"info_log_max_size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"info_log_roll_time": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"info_log_keep_log_file_num": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"info_log_dir": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"max_sub_compactions": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"writable_file_max_buffer_size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"use_direct_io_for_flush_and_compaction": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"enable_pipelined_write": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"allow_concurrent_memtable_write": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"bytes_per_sync": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"wal_bytes_per_sync": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"defaultcf": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCfConfig"),
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCfConfig"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVRaftstoreConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiKVRaftstoreConfig is the configuration of TiKV raftstore component.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"sync-log": {
SchemaProps: spec.SchemaProps{
Description: "true for high reliability, prevent data loss when power failure. Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"prevote": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
"raft-base-tick-interval": {
SchemaProps: spec.SchemaProps{
Description: "raft-base-tick-interval is a base tick interval (ms).",
Type: []string{"string"},
Format: "",
},
},
"raft-heartbeat-ticks": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"raft-election-timeout-ticks": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"raft-entry-max-size": {
SchemaProps: spec.SchemaProps{
Description: "When the entry exceed the max size, reject to propose it. Optional: Defaults to 8MB",
Type: []string{"string"},
Format: "",
},
},
"raft-log-gc-tick-interval": {
SchemaProps: spec.SchemaProps{
Description: "Interval to gc unnecessary raft log (ms). Optional: Defaults to 10s",
Type: []string{"string"},
Format: "",
},
},
"raft-log-gc-threshold": {
SchemaProps: spec.SchemaProps{
Description: "A threshold to gc stale raft log, must >= 1. Optional: Defaults to 50",
Type: []string{"integer"},
Format: "int64",
},
},
"raft-log-gc-count-limit": {
SchemaProps: spec.SchemaProps{
Description: "When entry count exceed this value, gc will be forced trigger. Optional: Defaults to 72000",
Type: []string{"integer"},
Format: "int64",
},
},
"raft-log-gc-size-limit": {
SchemaProps: spec.SchemaProps{
Description: "When the approximate size of raft log entries exceed this value gc will be forced trigger. Optional: Defaults to 72MB",
Type: []string{"string"},
Format: "",
},
},
"raft-entry-cache-life-time": {
SchemaProps: spec.SchemaProps{
Description: "When a peer is not responding for this time, leader will not keep entry cache for it.",
Type: []string{"string"},
Format: "",
},
},
"raft-reject-transfer-leader-duration": {
SchemaProps: spec.SchemaProps{
Description: "When a peer is newly added, reject transferring leader to the peer for a while.",
Type: []string{"string"},
Format: "",
},
},
"split-region-check-tick-interval": {
SchemaProps: spec.SchemaProps{
Description: "Interval (ms) to check region whether need to be split or not. Optional: Defaults to 10s",
Type: []string{"string"},
Format: "",
},
},
"region-split-check-diff": {
SchemaProps: spec.SchemaProps{
Description: "/ When size change of region exceed the diff since last check, it / will be checked again whether it should be split. Optional: Defaults to 6MB",
Type: []string{"string"},
Format: "",
},
},
"region-compact-check-interval": {
SchemaProps: spec.SchemaProps{
Description: "/ Interval (ms) to check whether start compaction for a region. Optional: Defaults to 5m",
Type: []string{"string"},
Format: "",
},
},
"clean-stale-peer-delay": {
SchemaProps: spec.SchemaProps{
Description: "delay time before deleting a stale peer Optional: Defaults to 10m",
Type: []string{"string"},
Format: "",
},
},
"region-compact-check-step": {
SchemaProps: spec.SchemaProps{
Description: "/ Number of regions for each time checking. Optional: Defaults to 100",
Type: []string{"integer"},
Format: "int64",
},
},
"region-compact-min-tombstones": {
SchemaProps: spec.SchemaProps{
Description: "/ Minimum number of tombstones to trigger manual compaction. Optional: Defaults to 10000",
Type: []string{"integer"},
Format: "int64",
},
},
"region-compact-tombstones-percent": {
SchemaProps: spec.SchemaProps{
Description: "/ Minimum percentage of tombstones to trigger manual compaction. / Should between 1 and 100. Optional: Defaults to 30",
Type: []string{"integer"},
Format: "int64",
},
},
"pd-heartbeat-tick-interval": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 60s",
Type: []string{"string"},
Format: "",
},
},
"pd-store-heartbeat-tick-interval": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 10s",
Type: []string{"string"},
Format: "",
},
},
"snap-mgr-gc-tick-interval": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"snap-gc-timeout": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"lock-cf-compact-interval": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 10m",
Type: []string{"string"},
Format: "",
},
},
"lock-cf-compact-bytes-threshold": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 256MB",
Type: []string{"string"},
Format: "",
},
},
"notify-capacity": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"messages-per-tick": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"max-peer-down-duration": {
SchemaProps: spec.SchemaProps{
Description: "/ When a peer is not active for max-peer-down-duration / the peer is considered to be down and is reported to PD. Optional: Defaults to 5m",
Type: []string{"string"},
Format: "",
},
},
"max-leader-missing-duration": {
SchemaProps: spec.SchemaProps{
Description: "/ If the leader of a peer is missing for longer than max-leader-missing-duration / the peer would ask pd to confirm whether it is valid in any region. / If the peer is stale and is not valid in any region, it will destroy itself.",
Type: []string{"string"},
Format: "",
},
},
"abnormal-leader-missing-duration": {
SchemaProps: spec.SchemaProps{
Description: "/ Similar to the max-leader-missing-duration, instead it will log warnings and / try to alert monitoring systems, if there is any.",
Type: []string{"string"},
Format: "",
},
},
"peer-stale-state-check-interval": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"leader-transfer-max-log-lag": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"snap-apply-batch-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"consistency-check-interval": {
SchemaProps: spec.SchemaProps{
Description: "Interval (ms) to check region whether the data is consistent. Optional: Defaults to 0",
Type: []string{"string"},
Format: "",
},
},
"report-region-flow-interval": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"raft-store-max-leader-lease": {
SchemaProps: spec.SchemaProps{
Description: "The lease provided by a successfully proposed and applied entry.",
Type: []string{"string"},
Format: "",
},
},
"right-derive-when-split": {
SchemaProps: spec.SchemaProps{
Description: "Right region derive origin region id when split.",
Type: []string{"boolean"},
Format: "",
},
},
"allow-remove-leader": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"merge-max-log-gap": {
SchemaProps: spec.SchemaProps{
Description: "/ Max log gap allowed to propose merge.",
Type: []string{"integer"},
Format: "int64",
},
},
"merge-check-tick-interval": {
SchemaProps: spec.SchemaProps{
Description: "/ Interval to re-propose merge.",
Type: []string{"string"},
Format: "",
},
},
"use-delete-range": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"cleanup-import-sst-interval": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 10m",
Type: []string{"string"},
Format: "",
},
},
"apply-max-batch-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"apply-pool-size": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2",
Type: []string{"integer"},
Format: "int64",
},
},
"store-max-batch-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"store-pool-size": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2",
Type: []string{"integer"},
Format: "int64",
},
},
"hibernate-regions": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVReadPoolConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"coprocessor": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCoprocessorReadPoolConfig"),
},
},
"storage": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVStorageReadPoolConfig"),
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCoprocessorReadPoolConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVStorageReadPoolConfig"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVSecurityConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"ca-path": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"cert-path": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"key-path": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"override_ssl_target": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"cipher_file": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVServerConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiKVServerConfig is the configuration of TiKV server.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"status-thread-pool-size": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 1",
Type: []string{"string"},
Format: "",
},
},
"grpc-compression-type": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to none",
Type: []string{"string"},
Format: "",
},
},
"grpc-concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 4",
Type: []string{"integer"},
Format: "int32",
},
},
"grpc-concurrent-stream": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 1024",
Type: []string{"integer"},
Format: "int32",
},
},
"grpc_memory_pool_quota": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 32G",
Type: []string{"string"},
Format: "",
},
},
"grpc-raft-conn-num": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 10",
Type: []string{"integer"},
Format: "int32",
},
},
"grpc-stream-initial-window-size": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2MB",
Type: []string{"string"},
Format: "",
},
},
"grpc-keepalive-time": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 10s",
Type: []string{"string"},
Format: "",
},
},
"grpc-keepalive-timeout": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 3s",
Type: []string{"string"},
Format: "",
},
},
"concurrent-send-snap-limit": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 32",
Type: []string{"integer"},
Format: "int32",
},
},
"concurrent-recv-snap-limit": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 32",
Type: []string{"integer"},
Format: "int32",
},
},
"end-point-recursion-limit": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 1000",
Type: []string{"integer"},
Format: "int32",
},
},
"end-point-stream-channel-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"end-point-batch-row-limit": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"end-point-stream-batch-row-limit": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"end-point-enable-batch-if-possible": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"end-point-request-max-handle-duration": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"snap-max-write-bytes-per-sec": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 100MB",
Type: []string{"string"},
Format: "",
},
},
"snap-max-total-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"stats-concurrency": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"heavy-load-threshold": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
"heavy-load-wait-duration": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 60s",
Type: []string{"string"},
Format: "",
},
},
"labels": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiKVSpec contains details of TiKV members",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"version": {
SchemaProps: spec.SchemaProps{
Description: "Version of the component. Override the cluster-level version if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"hostNetwork": {
SchemaProps: spec.SchemaProps{
Description: "Whether Hostnetwork of the component is enabled. Override the cluster-level setting if present Optional: Defaults to cluster-level setting",
Type: []string{"boolean"},
Format: "",
},
},
"affinity": {
SchemaProps: spec.SchemaProps{
Description: "Affinity of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
"priorityClassName": {
SchemaProps: spec.SchemaProps{
Description: "PriorityClassName of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"schedulerName": {
SchemaProps: spec.SchemaProps{
Description: "SchedulerName of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "NodeSelector of the component. Merged into the cluster-level nodeSelector if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations of the component. Merged into the cluster-level annotations if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"tolerations": {
SchemaProps: spec.SchemaProps{
Description: "Tolerations of the component. Override the cluster-level tolerations if non-empty Optional: Defaults to cluster-level setting",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"podSecurityContext": {
SchemaProps: spec.SchemaProps{
Description: "PodSecurityContext of the component",
Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
},
},
"configUpdateStrategy": {
SchemaProps: spec.SchemaProps{
Description: "ConfigUpdateStrategy of the component. Override the cluster-level updateStrategy if present Optional: Defaults to cluster-level setting",
Type: []string{"string"},
Format: "",
},
},
"limits": {
SchemaProps: spec.SchemaProps{
Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"requests": {
SchemaProps: spec.SchemaProps{
Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"replicas": {
SchemaProps: spec.SchemaProps{
Description: "The desired ready replicas",
Type: []string{"integer"},
Format: "int32",
},
},
"baseImage": {
SchemaProps: spec.SchemaProps{
Description: "Base image of the component, image tag is now allowed during validation",
Type: []string{"string"},
Format: "",
},
},
"privileged": {
SchemaProps: spec.SchemaProps{
Description: "Whether create the TiKV container in privileged mode, it is highly discouraged to enable this in critical environment. Optional: defaults to false",
Type: []string{"boolean"},
Format: "",
},
},
"maxFailoverCount": {
SchemaProps: spec.SchemaProps{
Description: "MaxFailoverCount limit the max replicas could be added in failover, 0 means unlimited Optional: Defaults to 0",
Type: []string{"integer"},
Format: "int32",
},
},
"storageClassName": {
SchemaProps: spec.SchemaProps{
Description: "The storageClassName of the persistent volume for TiKV data storage. Defaults to Kubernetes default storage class.",
Type: []string{"string"},
Format: "",
},
},
"config": {
SchemaProps: spec.SchemaProps{
Description: "Config is the Configuration of tikv-servers",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVConfig"),
},
},
},
Required: []string{"replicas"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVConfig", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVStorageConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiKVStorageConfig is the config of storage",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"max-key-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"scheduler-notify-capacity": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"scheduler-concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2048000",
Type: []string{"integer"},
Format: "int64",
},
},
"scheduler-worker-pool-size": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 4",
Type: []string{"integer"},
Format: "int64",
},
},
"scheduler-pending-write-threshold": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 100MB",
Type: []string{"string"},
Format: "",
},
},
"block-cache": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVBlockCacheConfig"),
},
},
},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVBlockCacheConfig"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVStorageReadPoolConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"high_concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 4",
Type: []string{"integer"},
Format: "int64",
},
},
"normal_concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 4",
Type: []string{"integer"},
Format: "int64",
},
},
"low_concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 4",
Type: []string{"integer"},
Format: "int64",
},
},
"max_tasks_per_worker_high": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2000",
Type: []string{"integer"},
Format: "int64",
},
},
"max_tasks_per_worker_normal": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2000",
Type: []string{"integer"},
Format: "int64",
},
},
"max_tasks_per_worker_low": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2000",
Type: []string{"integer"},
Format: "int64",
},
},
"stack_size": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 10MB",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVTitanCfConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiKVTitanCfConfig is the titian config.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"min-blob-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"blob-file-compression": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"blob-cache-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"min-gc-batch-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"max-gc-batch-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"discardable-ratio": {
SchemaProps: spec.SchemaProps{
Type: []string{"number"},
Format: "double",
},
},
"sample-ratio": {
SchemaProps: spec.SchemaProps{
Type: []string{"number"},
Format: "double",
},
},
"merge-small-file-threshold": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"blob-run-mode": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TiKVTitanDBConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TiKVTitanDBConfig is the config a titian db.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"enabled": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"dirname": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"disable-gc": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"max-background-gc": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"purge-obsolete-files-period": {
SchemaProps: spec.SchemaProps{
Description: "The value of this field will be truncated to seconds.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbAutoScalerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbAutoScalerSpec describes the spec for tidb auto-scaling",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"maxReplicas": {
SchemaProps: spec.SchemaProps{
Description: "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale out. It cannot be less than minReplicas.",
Type: []string{"integer"},
Format: "int32",
},
},
"minReplicas": {
SchemaProps: spec.SchemaProps{
Description: "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. Scaling is active as long as at least one metric value is available.",
Type: []string{"integer"},
Format: "int32",
},
},
"scaleInIntervalSeconds": {
SchemaProps: spec.SchemaProps{
Description: "ScaleInIntervalSeconds represents the duration seconds between each auto-scaling-in If not set, the default ScaleInIntervalSeconds will be set to 500",
Type: []string{"integer"},
Format: "int32",
},
},
"scaleOutIntervalSeconds": {
SchemaProps: spec.SchemaProps{
Description: "ScaleOutIntervalSeconds represents the duration seconds between each auto-scaling-out If not set, the default ScaleOutIntervalSeconds will be set to 300",
Type: []string{"integer"},
Format: "int32",
},
},
"metrics": {
SchemaProps: spec.SchemaProps{
Description: "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/autoscaling/v2beta2.MetricSpec"),
},
},
},
},
},
"metricsTimeDuration": {
SchemaProps: spec.SchemaProps{
Description: "MetricsTimeDuration describe the Time duration to be queried in the Prometheus",
Type: []string{"string"},
Format: "",
},
},
"scaleOutThreshold": {
SchemaProps: spec.SchemaProps{
Description: "ScaleOutThreshold describe the consecutive threshold for the auto-scaling, if the consecutive counts of the scale-out result in auto-scaling reach this number, the auto-scaling would be performed. If not set, the default value is 3.",
Type: []string{"integer"},
Format: "int32",
},
},
"scaleInThreshold": {
SchemaProps: spec.SchemaProps{
Description: "ScaleInThreshold describe the consecutive threshold for the auto-scaling, if the consecutive counts of the scale-in result in auto-scaling reach this number, the auto-scaling would be performed. If not set, the default value is 5.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"maxReplicas"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v2beta2.MetricSpec"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbCluster(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbCluster is the control script's spec",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec defines the behavior of a tidb cluster",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterSpec"),
},
},
},
Required: []string{"spec"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterSpec"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbClusterAutoScaler(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbClusterAutoScaler is the control script's spec",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec describes the state of the TidbClusterAutoScaler",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterAutoScalerSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status describe the status of the TidbClusterAutoScaler",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterAutoSclaerStatus"),
},
},
},
Required: []string{"spec", "status"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterAutoScalerSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterAutoSclaerStatus"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbClusterAutoScalerList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbClusterAutoScalerList is TidbClusterAutoScaler list",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterAutoScaler"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterAutoScaler"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbClusterAutoScalerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbAutoScalerSpec describes the state of the TidbClusterAutoScaler",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"cluster": {
SchemaProps: spec.SchemaProps{
Description: "TidbClusterRef describe the target TidbCluster",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterRef"),
},
},
"metricsUrl": {
SchemaProps: spec.SchemaProps{
Description: "We used prometheus to fetch the metrics resources until the pd could provide it. MetricsUrl represents the url to fetch the metrics info",
Type: []string{"string"},
Format: "",
},
},
"monitor": {
SchemaProps: spec.SchemaProps{
Description: "TidbMonitorRef describe the target TidbMonitor, when MetricsUrl and Monitor are both set, Operator will use MetricsUrl",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitorRef"),
},
},
"tikv": {
SchemaProps: spec.SchemaProps{
Description: "TiKV represents the auto-scaling spec for tikv",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TikvAutoScalerSpec"),
},
},
"tidb": {
SchemaProps: spec.SchemaProps{
Description: "TiDB represents the auto-scaling spec for tidb",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbAutoScalerSpec"),
},
},
},
Required: []string{"cluster"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbAutoScalerSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterRef", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitorRef", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TikvAutoScalerSpec"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbClusterList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbClusterList is TidbCluster list",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbCluster"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbCluster"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbClusterRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbClusterRef reference to a TidbCluster",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"namespace": {
SchemaProps: spec.SchemaProps{
Description: "Namespace is the namespace that TidbCluster object locates, default to the same namespace with TidbMonitor",
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of TidbCluster object",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbClusterSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbClusterSpec describes the attributes that a user creates on a tidb cluster",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pd": {
SchemaProps: spec.SchemaProps{
Description: "PD cluster spec",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSpec"),
},
},
"tidb": {
SchemaProps: spec.SchemaProps{
Description: "TiDB cluster spec",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSpec"),
},
},
"tikv": {
SchemaProps: spec.SchemaProps{
Description: "TiKV cluster spec",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSpec"),
},
},
"pump": {
SchemaProps: spec.SchemaProps{
Description: "Pump cluster spec",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PumpSpec"),
},
},
"helper": {
SchemaProps: spec.SchemaProps{
Description: "Helper spec",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.HelperSpec"),
},
},
"version": {
SchemaProps: spec.SchemaProps{
Description: "TiDB cluster version",
Type: []string{"string"},
Format: "",
},
},
"schedulerName": {
SchemaProps: spec.SchemaProps{
Description: "SchedulerName of TiDB cluster Pods",
Type: []string{"string"},
Format: "",
},
},
"pvReclaimPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Persistent volume reclaim policy applied to the PVs that consumed by TiDB cluster",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "ImagePullPolicy of TiDB cluster Pods",
Type: []string{"string"},
Format: "",
},
},
"configUpdateStrategy": {
SchemaProps: spec.SchemaProps{
Description: "ConfigUpdateStrategy determines how the configuration change is applied to the cluster. UpdateStrategyInPlace will update the ConfigMap of configuration in-place and an extra rolling-update of the cluster component is needed to reload the configuration change. UpdateStrategyRollingUpdate will create a new ConfigMap with the new configuration and rolling-update the related components to use the new ConfigMap, that is, the new configuration will be applied automatically.",
Type: []string{"string"},
Format: "",
},
},
"enablePVReclaim": {
SchemaProps: spec.SchemaProps{
Description: "Whether enable PVC reclaim for orphan PVC left by statefulset scale-in Optional: Defaults to false",
Type: []string{"boolean"},
Format: "",
},
},
"enableTLSCluster": {
SchemaProps: spec.SchemaProps{
Description: "Enable TLS connection between TiDB server components Optional: Defaults to false",
Type: []string{"boolean"},
Format: "",
},
},
"hostNetwork": {
SchemaProps: spec.SchemaProps{
Description: "Whether Hostnetwork is enabled for TiDB cluster Pods Optional: Defaults to false",
Type: []string{"boolean"},
Format: "",
},
},
"affinity": {
SchemaProps: spec.SchemaProps{
Description: "Affinity of TiDB cluster Pods",
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
"priorityClassName": {
SchemaProps: spec.SchemaProps{
Description: "PriorityClassName of TiDB cluster Pods Optional: Defaults to omitted",
Type: []string{"string"},
Format: "",
},
},
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "Base node selectors of TiDB cluster Pods, components may add or override selectors upon this respectively",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Base annotations of TiDB cluster Pods, components may add or override selectors upon this respectively",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"tolerations": {
SchemaProps: spec.SchemaProps{
Description: "Base tolerations of TiDB cluster Pods, components may add more tolerations upon this respectively",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"timezone": {
SchemaProps: spec.SchemaProps{
Description: "Time zone of TiDB cluster Pods Optional: Defaults to UTC",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"pd", "tidb", "tikv"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.HelperSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PumpSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Toleration"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbInitializer(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbInitializer is a TiDB cluster initializing job",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec defines the desired state of TidbInitializer",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbInitializerSpec"),
},
},
},
Required: []string{"spec"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbInitializerSpec"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbInitializerList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbInitializer"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbInitializer"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbInitializerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbInitializer spec encode the desired state of tidb initializer Job",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"image": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"cluster": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterRef"),
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"permitHost": {
SchemaProps: spec.SchemaProps{
Description: "permitHost is the host which will only be allowed to connect to the TiDB.",
Type: []string{"string"},
Format: "",
},
},
"initSql": {
SchemaProps: spec.SchemaProps{
Description: "InitSql is the SQL statements executed after the TiDB cluster is bootstrapped.",
Type: []string{"string"},
Format: "",
},
},
"initSqlConfigMap": {
SchemaProps: spec.SchemaProps{
Description: "InitSqlConfigMapName reference a configmap that provide init-sql, take high precedence than initSql if set",
Type: []string{"string"},
Format: "",
},
},
"passwordSecret": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"timezone": {
SchemaProps: spec.SchemaProps{
Description: "Time zone of TiDB initializer Pods",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"image", "cluster"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterRef", "k8s.io/api/core/v1.ResourceRequirements"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbInitializerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"conditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "The latest available observations of an object's current state. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/batch/v1.JobCondition"),
},
},
},
},
},
"startTime": {
SchemaProps: spec.SchemaProps{
Description: "Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"completionTime": {
SchemaProps: spec.SchemaProps{
Description: "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"active": {
SchemaProps: spec.SchemaProps{
Description: "The number of actively running pods.",
Type: []string{"integer"},
Format: "int32",
},
},
"succeeded": {
SchemaProps: spec.SchemaProps{
Description: "The number of pods which reached phase Succeeded.",
Type: []string{"integer"},
Format: "int32",
},
},
"failed": {
SchemaProps: spec.SchemaProps{
Description: "The number of pods which reached phase Failed.",
Type: []string{"integer"},
Format: "int32",
},
},
"phase": {
SchemaProps: spec.SchemaProps{
Description: "Phase is a user readable state inferred from the underlying Job status and TidbCluster status",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/batch/v1.JobCondition", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbMonitor(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbMonitor encode the spec and status of the monitoring component of a TiDB cluster",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec defines the desired state of TidbMonitor",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitorSpec"),
},
},
},
Required: []string{"spec"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitorSpec"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbMonitorList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbMonitorList is TidbMonitor list",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitor"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitor"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbMonitorRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbMonitorRef reference to a TidbMonitor",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"namespace": {
SchemaProps: spec.SchemaProps{
Description: "Namespace is the namespace that TidbMonitor object locates, default to the same namespace with TidbClusterAutoScaler",
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of TidbMonitor object",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TidbMonitorSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TidbMonitor spec encode the desired state of tidb monitoring component",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"clusters": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterRef"),
},
},
},
},
},
"prometheus": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PrometheusSpec"),
},
},
"grafana": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GrafanaSpec"),
},
},
"reloader": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ReloaderSpec"),
},
},
"initializer": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.InitializerSpec"),
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"persistent": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"storageClassName": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"storage": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"tolerations": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"kubePrometheusURL": {
SchemaProps: spec.SchemaProps{
Description: "kubePrometheusURL is where tidb-monitoring get the common metrics of kube-prometheus. Ref: https://github.com/coreos/kube-prometheus",
Type: []string{"string"},
Format: "",
},
},
"alertmanagerURL": {
SchemaProps: spec.SchemaProps{
Description: "alertmanagerURL is where tidb-monitoring push alerts to. Ref: https://prometheus.io/docs/alerting/alertmanager/",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"clusters", "prometheus", "reloader", "initializer"},
},
},
Dependencies: []string{
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GrafanaSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.InitializerSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PrometheusSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ReloaderSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterRef", "k8s.io/api/core/v1.Toleration"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TikvAutoScalerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TikvAutoScalerSpec describes the spec for tikv auto-scaling",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"maxReplicas": {
SchemaProps: spec.SchemaProps{
Description: "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale out. It cannot be less than minReplicas.",
Type: []string{"integer"},
Format: "int32",
},
},
"minReplicas": {
SchemaProps: spec.SchemaProps{
Description: "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. Scaling is active as long as at least one metric value is available.",
Type: []string{"integer"},
Format: "int32",
},
},
"scaleInIntervalSeconds": {
SchemaProps: spec.SchemaProps{
Description: "ScaleInIntervalSeconds represents the duration seconds between each auto-scaling-in If not set, the default ScaleInIntervalSeconds will be set to 500",
Type: []string{"integer"},
Format: "int32",
},
},
"scaleOutIntervalSeconds": {
SchemaProps: spec.SchemaProps{
Description: "ScaleOutIntervalSeconds represents the duration seconds between each auto-scaling-out If not set, the default ScaleOutIntervalSeconds will be set to 300",
Type: []string{"integer"},
Format: "int32",
},
},
"metrics": {
SchemaProps: spec.SchemaProps{
Description: "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/autoscaling/v2beta2.MetricSpec"),
},
},
},
},
},
"metricsTimeDuration": {
SchemaProps: spec.SchemaProps{
Description: "MetricsTimeDuration describe the Time duration to be queried in the Prometheus",
Type: []string{"string"},
Format: "",
},
},
"scaleOutThreshold": {
SchemaProps: spec.SchemaProps{
Description: "ScaleOutThreshold describe the consecutive threshold for the auto-scaling, if the consecutive counts of the scale-out result in auto-scaling reach this number, the auto-scaling would be performed. If not set, the default value is 3.",
Type: []string{"integer"},
Format: "int32",
},
},
"scaleInThreshold": {
SchemaProps: spec.SchemaProps{
Description: "ScaleInThreshold describe the consecutive threshold for the auto-scaling, if the consecutive counts of the scale-in result in auto-scaling reach this number, the auto-scaling would be performed. If not set, the default value is 5.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"maxReplicas"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v2beta2.MetricSpec"},
}
}
func schema_pkg_apis_pingcap_v1alpha1_TxnLocalLatches(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TxnLocalLatches is the TxnLocalLatches section of the config.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"enabled": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"capacity": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_AWSElasticBlockStoreVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"volumeID": {
SchemaProps: spec.SchemaProps{
Description: "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
Type: []string{"string"},
Format: "",
},
},
"partition": {
SchemaProps: spec.SchemaProps{
Description: "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).",
Type: []string{"integer"},
Format: "int32",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"volumeID"},
},
},
}
}
func schema_k8sio_api_core_v1_Affinity(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Affinity is a group of affinity scheduling rules.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"nodeAffinity": {
SchemaProps: spec.SchemaProps{
Description: "Describes node affinity scheduling rules for the pod.",
Ref: ref("k8s.io/api/core/v1.NodeAffinity"),
},
},
"podAffinity": {
SchemaProps: spec.SchemaProps{
Description: "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).",
Ref: ref("k8s.io/api/core/v1.PodAffinity"),
},
},
"podAntiAffinity": {
SchemaProps: spec.SchemaProps{
Description: "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).",
Ref: ref("k8s.io/api/core/v1.PodAntiAffinity"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.NodeAffinity", "k8s.io/api/core/v1.PodAffinity", "k8s.io/api/core/v1.PodAntiAffinity"},
}
}
func schema_k8sio_api_core_v1_AttachedVolume(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "AttachedVolume describes a volume attached to a node",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the attached volume",
Type: []string{"string"},
Format: "",
},
},
"devicePath": {
SchemaProps: spec.SchemaProps{
Description: "DevicePath represents the device path where the volume should be available",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "devicePath"},
},
},
}
}
func schema_k8sio_api_core_v1_AvoidPods(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "AvoidPods describes pods that should avoid this node. This is the value for a Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and will eventually become a field of NodeStatus.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"preferAvoidPods": {
SchemaProps: spec.SchemaProps{
Description: "Bounded-sized list of signatures of pods that should avoid this node, sorted in timestamp order from oldest to newest. Size of the slice is unspecified.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.PreferAvoidPodsEntry"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PreferAvoidPodsEntry"},
}
}
func schema_k8sio_api_core_v1_AzureDiskVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"diskName": {
SchemaProps: spec.SchemaProps{
Description: "The Name of the data disk in the blob storage",
Type: []string{"string"},
Format: "",
},
},
"diskURI": {
SchemaProps: spec.SchemaProps{
Description: "The URI the data disk in the blob storage",
Type: []string{"string"},
Format: "",
},
},
"cachingMode": {
SchemaProps: spec.SchemaProps{
Description: "Host Caching mode: None, Read Only, Read Write.",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
Type: []string{"boolean"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"diskName", "diskURI"},
},
},
}
}
func schema_k8sio_api_core_v1_AzureFilePersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"secretName": {
SchemaProps: spec.SchemaProps{
Description: "the name of secret that contains Azure Storage Account Name and Key",
Type: []string{"string"},
Format: "",
},
},
"shareName": {
SchemaProps: spec.SchemaProps{
Description: "Share Name",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
Type: []string{"boolean"},
Format: "",
},
},
"secretNamespace": {
SchemaProps: spec.SchemaProps{
Description: "the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"secretName", "shareName"},
},
},
}
}
func schema_k8sio_api_core_v1_AzureFileVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"secretName": {
SchemaProps: spec.SchemaProps{
Description: "the name of secret that contains Azure Storage Account Name and Key",
Type: []string{"string"},
Format: "",
},
},
"shareName": {
SchemaProps: spec.SchemaProps{
Description: "Share Name",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"secretName", "shareName"},
},
},
}
}
func schema_k8sio_api_core_v1_Binding(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"target": {
SchemaProps: spec.SchemaProps{
Description: "The target object that you want to bind to the standard object.",
Ref: ref("k8s.io/api/core/v1.ObjectReference"),
},
},
},
Required: []string{"target"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_CSIPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents storage that is managed by an external CSI volume driver (Beta feature)",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"driver": {
SchemaProps: spec.SchemaProps{
Description: "Driver is the name of the driver to use for this volume. Required.",
Type: []string{"string"},
Format: "",
},
},
"volumeHandle": {
SchemaProps: spec.SchemaProps{
Description: "VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).",
Type: []string{"boolean"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".",
Type: []string{"string"},
Format: "",
},
},
"volumeAttributes": {
SchemaProps: spec.SchemaProps{
Description: "Attributes of the volume to publish.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"controllerPublishSecretRef": {
SchemaProps: spec.SchemaProps{
Description: "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
Ref: ref("k8s.io/api/core/v1.SecretReference"),
},
},
"nodeStageSecretRef": {
SchemaProps: spec.SchemaProps{
Description: "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
Ref: ref("k8s.io/api/core/v1.SecretReference"),
},
},
"nodePublishSecretRef": {
SchemaProps: spec.SchemaProps{
Description: "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
Ref: ref("k8s.io/api/core/v1.SecretReference"),
},
},
"controllerExpandSecretRef": {
SchemaProps: spec.SchemaProps{
Description: "ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
Ref: ref("k8s.io/api/core/v1.SecretReference"),
},
},
},
Required: []string{"driver", "volumeHandle"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.SecretReference"},
}
}
func schema_k8sio_api_core_v1_CSIVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a source location of a volume to mount, managed by an external CSI driver",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"driver": {
SchemaProps: spec.SchemaProps{
Description: "Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Specifies a read-only configuration for the volume. Defaults to false (read/write).",
Type: []string{"boolean"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.",
Type: []string{"string"},
Format: "",
},
},
"volumeAttributes": {
SchemaProps: spec.SchemaProps{
Description: "VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"nodePublishSecretRef": {
SchemaProps: spec.SchemaProps{
Description: "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.",
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
},
Required: []string{"driver"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LocalObjectReference"},
}
}
func schema_k8sio_api_core_v1_Capabilities(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Adds and removes POSIX capabilities from running containers.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"add": {
SchemaProps: spec.SchemaProps{
Description: "Added capabilities",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"drop": {
SchemaProps: spec.SchemaProps{
Description: "Removed capabilities",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_CephFSPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"monitors": {
SchemaProps: spec.SchemaProps{
Description: "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"path": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Used as the mounted root, rather than the full Ceph tree, default is /",
Type: []string{"string"},
Format: "",
},
},
"user": {
SchemaProps: spec.SchemaProps{
Description: "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
Type: []string{"string"},
Format: "",
},
},
"secretFile": {
SchemaProps: spec.SchemaProps{
Description: "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
Type: []string{"string"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
Ref: ref("k8s.io/api/core/v1.SecretReference"),
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"monitors"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.SecretReference"},
}
}
func schema_k8sio_api_core_v1_CephFSVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"monitors": {
SchemaProps: spec.SchemaProps{
Description: "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"path": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Used as the mounted root, rather than the full Ceph tree, default is /",
Type: []string{"string"},
Format: "",
},
},
"user": {
SchemaProps: spec.SchemaProps{
Description: "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
Type: []string{"string"},
Format: "",
},
},
"secretFile": {
SchemaProps: spec.SchemaProps{
Description: "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
Type: []string{"string"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"monitors"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LocalObjectReference"},
}
}
func schema_k8sio_api_core_v1_CinderPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"volumeID": {
SchemaProps: spec.SchemaProps{
Description: "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
Type: []string{"boolean"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "Optional: points to a secret object containing parameters used to connect to OpenStack.",
Ref: ref("k8s.io/api/core/v1.SecretReference"),
},
},
},
Required: []string{"volumeID"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.SecretReference"},
}
}
func schema_k8sio_api_core_v1_CinderVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"volumeID": {
SchemaProps: spec.SchemaProps{
Description: "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
Type: []string{"boolean"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "Optional: points to a secret object containing parameters used to connect to OpenStack.",
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
},
Required: []string{"volumeID"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LocalObjectReference"},
}
}
func schema_k8sio_api_core_v1_ClientIPConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ClientIPConfig represents the configurations of Client IP based session affinity.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"timeoutSeconds": {
SchemaProps: spec.SchemaProps{
Description: "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_ComponentCondition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Information about the condition of a component.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type of condition for a component. Valid value: \"Healthy\"",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "Message about the condition for a component. For example, information about a health check.",
Type: []string{"string"},
Format: "",
},
},
"error": {
SchemaProps: spec.SchemaProps{
Description: "Condition error code for a component. For example, a health check error code.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status"},
},
},
}
}
func schema_k8sio_api_core_v1_ComponentStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ComponentStatus (and ComponentStatusList) holds the cluster validation info.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"conditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of component conditions observed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ComponentCondition"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ComponentCondition", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_ComponentStatusList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Status of all the conditions for the component as a list of ComponentStatus objects.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of ComponentStatus objects.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ComponentStatus"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ComponentStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_ConfigMap(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ConfigMap holds configuration data for pods to consume.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"data": {
SchemaProps: spec.SchemaProps{
Description: "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"binaryData": {
SchemaProps: spec.SchemaProps{
Description: "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "byte",
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_ConfigMapEnvSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
Type: []string{"string"},
Format: "",
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Specify whether the ConfigMap must be defined",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_ConfigMapKeySelector(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Selects a key from a ConfigMap.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
Type: []string{"string"},
Format: "",
},
},
"key": {
SchemaProps: spec.SchemaProps{
Description: "The key to select.",
Type: []string{"string"},
Format: "",
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Specify whether the ConfigMap or its key must be defined",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"key"},
},
},
}
}
func schema_k8sio_api_core_v1_ConfigMapList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ConfigMapList is a resource containing a list of ConfigMap objects.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "Items is the list of ConfigMaps.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ConfigMap"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ConfigMap", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_ConfigMapNodeConfigSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"namespace": {
SchemaProps: spec.SchemaProps{
Description: "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.",
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.",
Type: []string{"string"},
Format: "",
},
},
"uid": {
SchemaProps: spec.SchemaProps{
Description: "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.",
Type: []string{"string"},
Format: "",
},
},
"kubeletConfigKey": {
SchemaProps: spec.SchemaProps{
Description: "KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"namespace", "name", "kubeletConfigKey"},
},
},
}
}
func schema_k8sio_api_core_v1_ConfigMapProjection(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
Type: []string{"string"},
Format: "",
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.KeyToPath"),
},
},
},
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Specify whether the ConfigMap or its keys must be defined",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.KeyToPath"},
}
}
func schema_k8sio_api_core_v1_ConfigMapVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
Type: []string{"string"},
Format: "",
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.KeyToPath"),
},
},
},
},
},
"defaultMode": {
SchemaProps: spec.SchemaProps{
Description: "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
Type: []string{"integer"},
Format: "int32",
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Specify whether the ConfigMap or its keys must be defined",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.KeyToPath"},
}
}
func schema_k8sio_api_core_v1_Container(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A single application container that you want to run within a pod.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"image": {
SchemaProps: spec.SchemaProps{
Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
Type: []string{"string"},
Format: "",
},
},
"command": {
SchemaProps: spec.SchemaProps{
Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"args": {
SchemaProps: spec.SchemaProps{
Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"workingDir": {
SchemaProps: spec.SchemaProps{
Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"ports": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-map-keys": []interface{}{
"containerPort",
"protocol",
},
"x-kubernetes-list-type": "map",
"x-kubernetes-patch-merge-key": "containerPort",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ContainerPort"),
},
},
},
},
},
"envFrom": {
SchemaProps: spec.SchemaProps{
Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.EnvFromSource"),
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables to set in the container. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"volumeMounts": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "mountPath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.VolumeMount"),
},
},
},
},
},
"volumeDevices": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "devicePath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "volumeDevices is the list of block devices to be used by the container. This is a beta feature.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.VolumeDevice"),
},
},
},
},
},
"livenessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"readinessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"startupProbe": {
SchemaProps: spec.SchemaProps{
Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is an alpha feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"lifecycle": {
SchemaProps: spec.SchemaProps{
Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
Ref: ref("k8s.io/api/core/v1.Lifecycle"),
},
},
"terminationMessagePath": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"terminationMessagePolicy": {
SchemaProps: spec.SchemaProps{
Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
Type: []string{"string"},
Format: "",
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
Ref: ref("k8s.io/api/core/v1.SecurityContext"),
},
},
"stdin": {
SchemaProps: spec.SchemaProps{
Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.",
Type: []string{"boolean"},
Format: "",
},
},
"stdinOnce": {
SchemaProps: spec.SchemaProps{
Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
Type: []string{"boolean"},
Format: "",
},
},
"tty": {
SchemaProps: spec.SchemaProps{
Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"},
}
}
func schema_k8sio_api_core_v1_ContainerImage(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Describe a container image",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"names": {
SchemaProps: spec.SchemaProps{
Description: "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"sizeBytes": {
SchemaProps: spec.SchemaProps{
Description: "The size of the image in bytes.",
Type: []string{"integer"},
Format: "int64",
},
},
},
Required: []string{"names"},
},
},
}
}
func schema_k8sio_api_core_v1_ContainerPort(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ContainerPort represents a network port in a single container.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.",
Type: []string{"string"},
Format: "",
},
},
"hostPort": {
SchemaProps: spec.SchemaProps{
Description: "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.",
Type: []string{"integer"},
Format: "int32",
},
},
"containerPort": {
SchemaProps: spec.SchemaProps{
Description: "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.",
Type: []string{"integer"},
Format: "int32",
},
},
"protocol": {
SchemaProps: spec.SchemaProps{
Description: "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".",
Type: []string{"string"},
Format: "",
},
},
"hostIP": {
SchemaProps: spec.SchemaProps{
Description: "What host IP to bind the external port to.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"containerPort"},
},
},
}
}
func schema_k8sio_api_core_v1_ContainerState(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"waiting": {
SchemaProps: spec.SchemaProps{
Description: "Details about a waiting container",
Ref: ref("k8s.io/api/core/v1.ContainerStateWaiting"),
},
},
"running": {
SchemaProps: spec.SchemaProps{
Description: "Details about a running container",
Ref: ref("k8s.io/api/core/v1.ContainerStateRunning"),
},
},
"terminated": {
SchemaProps: spec.SchemaProps{
Description: "Details about a terminated container",
Ref: ref("k8s.io/api/core/v1.ContainerStateTerminated"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"},
}
}
func schema_k8sio_api_core_v1_ContainerStateRunning(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ContainerStateRunning is a running state of a container.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"startedAt": {
SchemaProps: spec.SchemaProps{
Description: "Time at which the container was last (re-)started",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_core_v1_ContainerStateTerminated(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ContainerStateTerminated is a terminated state of a container.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"exitCode": {
SchemaProps: spec.SchemaProps{
Description: "Exit status from the last termination of the container",
Type: []string{"integer"},
Format: "int32",
},
},
"signal": {
SchemaProps: spec.SchemaProps{
Description: "Signal from the last termination of the container",
Type: []string{"integer"},
Format: "int32",
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "(brief) reason from the last termination of the container",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "Message regarding the last termination of the container",
Type: []string{"string"},
Format: "",
},
},
"startedAt": {
SchemaProps: spec.SchemaProps{
Description: "Time at which previous execution of the container started",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"finishedAt": {
SchemaProps: spec.SchemaProps{
Description: "Time at which the container last terminated",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"containerID": {
SchemaProps: spec.SchemaProps{
Description: "Container's ID in the format 'docker://<container_id>'",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"exitCode"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_core_v1_ContainerStateWaiting(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ContainerStateWaiting is a waiting state of a container.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"reason": {
SchemaProps: spec.SchemaProps{
Description: "(brief) reason the container is not yet running.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "Message regarding why the container is not yet running.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_ContainerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ContainerStatus contains details for the current status of this container.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"state": {
SchemaProps: spec.SchemaProps{
Description: "Details about the container's current condition.",
Ref: ref("k8s.io/api/core/v1.ContainerState"),
},
},
"lastState": {
SchemaProps: spec.SchemaProps{
Description: "Details about the container's last termination condition.",
Ref: ref("k8s.io/api/core/v1.ContainerState"),
},
},
"ready": {
SchemaProps: spec.SchemaProps{
Description: "Specifies whether the container has passed its readiness probe.",
Type: []string{"boolean"},
Format: "",
},
},
"restartCount": {
SchemaProps: spec.SchemaProps{
Description: "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.",
Type: []string{"integer"},
Format: "int32",
},
},
"image": {
SchemaProps: spec.SchemaProps{
Description: "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images",
Type: []string{"string"},
Format: "",
},
},
"imageID": {
SchemaProps: spec.SchemaProps{
Description: "ImageID of the container's image.",
Type: []string{"string"},
Format: "",
},
},
"containerID": {
SchemaProps: spec.SchemaProps{
Description: "Container's ID in the format 'docker://<container_id>'.",
Type: []string{"string"},
Format: "",
},
},
"started": {
SchemaProps: spec.SchemaProps{
Description: "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"name", "ready", "restartCount", "image", "imageID"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ContainerState"},
}
}
func schema_k8sio_api_core_v1_DaemonEndpoint(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DaemonEndpoint contains information about a single Daemon endpoint.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"Port": {
SchemaProps: spec.SchemaProps{
Description: "Port number of the given endpoint.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"Port"},
},
},
}
}
func schema_k8sio_api_core_v1_DownwardAPIProjection(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"items": {
SchemaProps: spec.SchemaProps{
Description: "Items is a list of DownwardAPIVolume file",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeFile"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.DownwardAPIVolumeFile"},
}
}
func schema_k8sio_api_core_v1_DownwardAPIVolumeFile(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DownwardAPIVolumeFile represents information to create the file containing the pod field",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"path": {
SchemaProps: spec.SchemaProps{
Description: "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'",
Type: []string{"string"},
Format: "",
},
},
"fieldRef": {
SchemaProps: spec.SchemaProps{
Description: "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.",
Ref: ref("k8s.io/api/core/v1.ObjectFieldSelector"),
},
},
"resourceFieldRef": {
SchemaProps: spec.SchemaProps{
Description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.",
Ref: ref("k8s.io/api/core/v1.ResourceFieldSelector"),
},
},
"mode": {
SchemaProps: spec.SchemaProps{
Description: "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"path"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ObjectFieldSelector", "k8s.io/api/core/v1.ResourceFieldSelector"},
}
}
func schema_k8sio_api_core_v1_DownwardAPIVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"items": {
SchemaProps: spec.SchemaProps{
Description: "Items is a list of downward API volume file",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeFile"),
},
},
},
},
},
"defaultMode": {
SchemaProps: spec.SchemaProps{
Description: "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.DownwardAPIVolumeFile"},
}
}
func schema_k8sio_api_core_v1_EmptyDirVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"medium": {
SchemaProps: spec.SchemaProps{
Description: "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
Type: []string{"string"},
Format: "",
},
},
"sizeLimit": {
SchemaProps: spec.SchemaProps{
Description: "Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_core_v1_EndpointAddress(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EndpointAddress is a tuple that describes single IP address.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"ip": {
SchemaProps: spec.SchemaProps{
Description: "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.",
Type: []string{"string"},
Format: "",
},
},
"hostname": {
SchemaProps: spec.SchemaProps{
Description: "The Hostname of this endpoint",
Type: []string{"string"},
Format: "",
},
},
"nodeName": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.",
Type: []string{"string"},
Format: "",
},
},
"targetRef": {
SchemaProps: spec.SchemaProps{
Description: "Reference to object providing the endpoint.",
Ref: ref("k8s.io/api/core/v1.ObjectReference"),
},
},
},
Required: []string{"ip"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ObjectReference"},
}
}
func schema_k8sio_api_core_v1_EndpointPort(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EndpointPort is a tuple that describes a single port.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.",
Type: []string{"string"},
Format: "",
},
},
"port": {
SchemaProps: spec.SchemaProps{
Description: "The port number of the endpoint.",
Type: []string{"integer"},
Format: "int32",
},
},
"protocol": {
SchemaProps: spec.SchemaProps{
Description: "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"port"},
},
},
}
}
func schema_k8sio_api_core_v1_EndpointSubset(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"addresses": {
SchemaProps: spec.SchemaProps{
Description: "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.EndpointAddress"),
},
},
},
},
},
"notReadyAddresses": {
SchemaProps: spec.SchemaProps{
Description: "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.EndpointAddress"),
},
},
},
},
},
"ports": {
SchemaProps: spec.SchemaProps{
Description: "Port numbers available on the related IP addresses.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.EndpointPort"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.EndpointAddress", "k8s.io/api/core/v1.EndpointPort"},
}
}
func schema_k8sio_api_core_v1_Endpoints(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"subsets": {
SchemaProps: spec.SchemaProps{
Description: "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.EndpointSubset"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.EndpointSubset", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_EndpointsList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EndpointsList is a list of endpoints.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of endpoints.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Endpoints"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Endpoints", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_EnvFromSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EnvFromSource represents the source of a set of ConfigMaps",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"prefix": {
SchemaProps: spec.SchemaProps{
Description: "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.",
Type: []string{"string"},
Format: "",
},
},
"configMapRef": {
SchemaProps: spec.SchemaProps{
Description: "The ConfigMap to select from",
Ref: ref("k8s.io/api/core/v1.ConfigMapEnvSource"),
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "The Secret to select from",
Ref: ref("k8s.io/api/core/v1.SecretEnvSource"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ConfigMapEnvSource", "k8s.io/api/core/v1.SecretEnvSource"},
}
}
func schema_k8sio_api_core_v1_EnvVar(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EnvVar represents an environment variable present in a Container.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the environment variable. Must be a C_IDENTIFIER.",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".",
Type: []string{"string"},
Format: "",
},
},
"valueFrom": {
SchemaProps: spec.SchemaProps{
Description: "Source for the environment variable's value. Cannot be used if value is not empty.",
Ref: ref("k8s.io/api/core/v1.EnvVarSource"),
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.EnvVarSource"},
}
}
func schema_k8sio_api_core_v1_EnvVarSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EnvVarSource represents a source for the value of an EnvVar.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"fieldRef": {
SchemaProps: spec.SchemaProps{
Description: "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.",
Ref: ref("k8s.io/api/core/v1.ObjectFieldSelector"),
},
},
"resourceFieldRef": {
SchemaProps: spec.SchemaProps{
Description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.",
Ref: ref("k8s.io/api/core/v1.ResourceFieldSelector"),
},
},
"configMapKeyRef": {
SchemaProps: spec.SchemaProps{
Description: "Selects a key of a ConfigMap.",
Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"),
},
},
"secretKeyRef": {
SchemaProps: spec.SchemaProps{
Description: "Selects a key of a secret in the pod's namespace",
Ref: ref("k8s.io/api/core/v1.SecretKeySelector"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.ObjectFieldSelector", "k8s.io/api/core/v1.ResourceFieldSelector", "k8s.io/api/core/v1.SecretKeySelector"},
}
}
func schema_k8sio_api_core_v1_EphemeralContainer(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.",
Type: []string{"string"},
Format: "",
},
},
"image": {
SchemaProps: spec.SchemaProps{
Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images",
Type: []string{"string"},
Format: "",
},
},
"command": {
SchemaProps: spec.SchemaProps{
Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"args": {
SchemaProps: spec.SchemaProps{
Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"workingDir": {
SchemaProps: spec.SchemaProps{
Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"ports": {
SchemaProps: spec.SchemaProps{
Description: "Ports are not allowed for ephemeral containers.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ContainerPort"),
},
},
},
},
},
"envFrom": {
SchemaProps: spec.SchemaProps{
Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.EnvFromSource"),
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables to set in the container. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.",
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"volumeMounts": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "mountPath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.VolumeMount"),
},
},
},
},
},
"volumeDevices": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "devicePath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "volumeDevices is the list of block devices to be used by the container. This is a beta feature.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.VolumeDevice"),
},
},
},
},
},
"livenessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Probes are not allowed for ephemeral containers.",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"readinessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Probes are not allowed for ephemeral containers.",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"startupProbe": {
SchemaProps: spec.SchemaProps{
Description: "Probes are not allowed for ephemeral containers.",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"lifecycle": {
SchemaProps: spec.SchemaProps{
Description: "Lifecycle is not allowed for ephemeral containers.",
Ref: ref("k8s.io/api/core/v1.Lifecycle"),
},
},
"terminationMessagePath": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"terminationMessagePolicy": {
SchemaProps: spec.SchemaProps{
Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
Type: []string{"string"},
Format: "",
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext is not allowed for ephemeral containers.",
Ref: ref("k8s.io/api/core/v1.SecurityContext"),
},
},
"stdin": {
SchemaProps: spec.SchemaProps{
Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.",
Type: []string{"boolean"},
Format: "",
},
},
"stdinOnce": {
SchemaProps: spec.SchemaProps{
Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
Type: []string{"boolean"},
Format: "",
},
},
"tty": {
SchemaProps: spec.SchemaProps{
Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
Type: []string{"boolean"},
Format: "",
},
},
"targetContainerName": {
SchemaProps: spec.SchemaProps{
Description: "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"},
}
}
func schema_k8sio_api_core_v1_EphemeralContainerCommon(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EphemeralContainerCommon is a copy of all fields in Container to be inlined in EphemeralContainer. This separate type allows easy conversion from EphemeralContainer to Container and allows separate documentation for the fields of EphemeralContainer. When a new field is added to Container it must be added here as well.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.",
Type: []string{"string"},
Format: "",
},
},
"image": {
SchemaProps: spec.SchemaProps{
Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images",
Type: []string{"string"},
Format: "",
},
},
"command": {
SchemaProps: spec.SchemaProps{
Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"args": {
SchemaProps: spec.SchemaProps{
Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"workingDir": {
SchemaProps: spec.SchemaProps{
Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"ports": {
SchemaProps: spec.SchemaProps{
Description: "Ports are not allowed for ephemeral containers.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ContainerPort"),
},
},
},
},
},
"envFrom": {
SchemaProps: spec.SchemaProps{
Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.EnvFromSource"),
},
},
},
},
},
"env": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of environment variables to set in the container. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.EnvVar"),
},
},
},
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.",
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"volumeMounts": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "mountPath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.VolumeMount"),
},
},
},
},
},
"volumeDevices": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "devicePath",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "volumeDevices is the list of block devices to be used by the container. This is a beta feature.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.VolumeDevice"),
},
},
},
},
},
"livenessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Probes are not allowed for ephemeral containers.",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"readinessProbe": {
SchemaProps: spec.SchemaProps{
Description: "Probes are not allowed for ephemeral containers.",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"startupProbe": {
SchemaProps: spec.SchemaProps{
Description: "Probes are not allowed for ephemeral containers.",
Ref: ref("k8s.io/api/core/v1.Probe"),
},
},
"lifecycle": {
SchemaProps: spec.SchemaProps{
Description: "Lifecycle is not allowed for ephemeral containers.",
Ref: ref("k8s.io/api/core/v1.Lifecycle"),
},
},
"terminationMessagePath": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"terminationMessagePolicy": {
SchemaProps: spec.SchemaProps{
Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
Type: []string{"string"},
Format: "",
},
},
"imagePullPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
Type: []string{"string"},
Format: "",
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext is not allowed for ephemeral containers.",
Ref: ref("k8s.io/api/core/v1.SecurityContext"),
},
},
"stdin": {
SchemaProps: spec.SchemaProps{
Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.",
Type: []string{"boolean"},
Format: "",
},
},
"stdinOnce": {
SchemaProps: spec.SchemaProps{
Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
Type: []string{"boolean"},
Format: "",
},
},
"tty": {
SchemaProps: spec.SchemaProps{
Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"},
}
}
func schema_k8sio_api_core_v1_EphemeralContainers(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A list of ephemeral containers used with the Pod ephemeralcontainers subresource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"ephemeralContainers": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.EphemeralContainer"),
},
},
},
},
},
},
Required: []string{"ephemeralContainers"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.EphemeralContainer", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_Event(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Event is a report of an event somewhere in the cluster.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"involvedObject": {
SchemaProps: spec.SchemaProps{
Description: "The object that this event is about.",
Ref: ref("k8s.io/api/core/v1.ObjectReference"),
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "A human-readable description of the status of this operation.",
Type: []string{"string"},
Format: "",
},
},
"source": {
SchemaProps: spec.SchemaProps{
Description: "The component reporting this event. Should be a short machine understandable string.",
Ref: ref("k8s.io/api/core/v1.EventSource"),
},
},
"firstTimestamp": {
SchemaProps: spec.SchemaProps{
Description: "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"lastTimestamp": {
SchemaProps: spec.SchemaProps{
Description: "The time at which the most recent occurrence of this event was recorded.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"count": {
SchemaProps: spec.SchemaProps{
Description: "The number of times this event has occurred.",
Type: []string{"integer"},
Format: "int32",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type of this event (Normal, Warning), new types could be added in the future",
Type: []string{"string"},
Format: "",
},
},
"eventTime": {
SchemaProps: spec.SchemaProps{
Description: "Time when this Event was first observed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"),
},
},
"series": {
SchemaProps: spec.SchemaProps{
Description: "Data about the Event series this event represents or nil if it's a singleton Event.",
Ref: ref("k8s.io/api/core/v1.EventSeries"),
},
},
"action": {
SchemaProps: spec.SchemaProps{
Description: "What action was taken/failed regarding to the Regarding object.",
Type: []string{"string"},
Format: "",
},
},
"related": {
SchemaProps: spec.SchemaProps{
Description: "Optional secondary object for more complex actions.",
Ref: ref("k8s.io/api/core/v1.ObjectReference"),
},
},
"reportingComponent": {
SchemaProps: spec.SchemaProps{
Description: "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.",
Type: []string{"string"},
Format: "",
},
},
"reportingInstance": {
SchemaProps: spec.SchemaProps{
Description: "ID of the controller instance, e.g. `kubelet-xyzf`.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"metadata", "involvedObject"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.EventSeries", "k8s.io/api/core/v1.EventSource", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_core_v1_EventList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EventList is a list of events.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of events",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Event"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Event", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_EventSeries(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"count": {
SchemaProps: spec.SchemaProps{
Description: "Number of occurrences in this series up to the last heartbeat time",
Type: []string{"integer"},
Format: "int32",
},
},
"lastObservedTime": {
SchemaProps: spec.SchemaProps{
Description: "Time of the last occurrence observed",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"),
},
},
"state": {
SchemaProps: spec.SchemaProps{
Description: "State of this Series: Ongoing or Finished Deprecated. Planned removal for 1.18",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"},
}
}
func schema_k8sio_api_core_v1_EventSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "EventSource contains information for an event.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"component": {
SchemaProps: spec.SchemaProps{
Description: "Component from which the event is generated.",
Type: []string{"string"},
Format: "",
},
},
"host": {
SchemaProps: spec.SchemaProps{
Description: "Node name on which the event is generated.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_ExecAction(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ExecAction describes a \"run in container\" action.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"command": {
SchemaProps: spec.SchemaProps{
Description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_FCVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"targetWWNs": {
SchemaProps: spec.SchemaProps{
Description: "Optional: FC target worldwide names (WWNs)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"lun": {
SchemaProps: spec.SchemaProps{
Description: "Optional: FC target lun number",
Type: []string{"integer"},
Format: "int32",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
Type: []string{"boolean"},
Format: "",
},
},
"wwids": {
SchemaProps: spec.SchemaProps{
Description: "Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_FlexPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"driver": {
SchemaProps: spec.SchemaProps{
Description: "Driver is the name of the driver to use for this volume.",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.",
Type: []string{"string"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.",
Ref: ref("k8s.io/api/core/v1.SecretReference"),
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
Type: []string{"boolean"},
Format: "",
},
},
"options": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Extra command options if any.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"driver"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.SecretReference"},
}
}
func schema_k8sio_api_core_v1_FlexVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"driver": {
SchemaProps: spec.SchemaProps{
Description: "Driver is the name of the driver to use for this volume.",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.",
Type: []string{"string"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.",
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
Type: []string{"boolean"},
Format: "",
},
},
"options": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Extra command options if any.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"driver"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LocalObjectReference"},
}
}
func schema_k8sio_api_core_v1_FlockerVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"datasetName": {
SchemaProps: spec.SchemaProps{
Description: "Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated",
Type: []string{"string"},
Format: "",
},
},
"datasetUUID": {
SchemaProps: spec.SchemaProps{
Description: "UUID of the dataset. This is unique identifier of a Flocker dataset",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_GCEPersistentDiskVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pdName": {
SchemaProps: spec.SchemaProps{
Description: "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
Type: []string{"string"},
Format: "",
},
},
"partition": {
SchemaProps: spec.SchemaProps{
Description: "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
Type: []string{"integer"},
Format: "int32",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"pdName"},
},
},
}
}
func schema_k8sio_api_core_v1_GitRepoVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"repository": {
SchemaProps: spec.SchemaProps{
Description: "Repository URL",
Type: []string{"string"},
Format: "",
},
},
"revision": {
SchemaProps: spec.SchemaProps{
Description: "Commit hash for the specified revision.",
Type: []string{"string"},
Format: "",
},
},
"directory": {
SchemaProps: spec.SchemaProps{
Description: "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"repository"},
},
},
}
}
func schema_k8sio_api_core_v1_GlusterfsPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"endpoints": {
SchemaProps: spec.SchemaProps{
Description: "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
Type: []string{"string"},
Format: "",
},
},
"path": {
SchemaProps: spec.SchemaProps{
Description: "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
Type: []string{"boolean"},
Format: "",
},
},
"endpointsNamespace": {
SchemaProps: spec.SchemaProps{
Description: "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"endpoints", "path"},
},
},
}
}
func schema_k8sio_api_core_v1_GlusterfsVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"endpoints": {
SchemaProps: spec.SchemaProps{
Description: "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
Type: []string{"string"},
Format: "",
},
},
"path": {
SchemaProps: spec.SchemaProps{
Description: "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"endpoints", "path"},
},
},
}
}
func schema_k8sio_api_core_v1_HTTPGetAction(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "HTTPGetAction describes an action based on HTTP Get requests.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"path": {
SchemaProps: spec.SchemaProps{
Description: "Path to access on the HTTP server.",
Type: []string{"string"},
Format: "",
},
},
"port": {
SchemaProps: spec.SchemaProps{
Description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.",
Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"),
},
},
"host": {
SchemaProps: spec.SchemaProps{
Description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.",
Type: []string{"string"},
Format: "",
},
},
"scheme": {
SchemaProps: spec.SchemaProps{
Description: "Scheme to use for connecting to the host. Defaults to HTTP.",
Type: []string{"string"},
Format: "",
},
},
"httpHeaders": {
SchemaProps: spec.SchemaProps{
Description: "Custom headers to set in the request. HTTP allows repeated headers.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.HTTPHeader"),
},
},
},
},
},
},
Required: []string{"port"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.HTTPHeader", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"},
}
}
func schema_k8sio_api_core_v1_HTTPHeader(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "HTTPHeader describes a custom header to be used in HTTP probes",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "The header field name",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "The header field value",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "value"},
},
},
}
}
func schema_k8sio_api_core_v1_Handler(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Handler defines a specific action that should be taken",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"exec": {
SchemaProps: spec.SchemaProps{
Description: "One and only one of the following should be specified. Exec specifies the action to take.",
Ref: ref("k8s.io/api/core/v1.ExecAction"),
},
},
"httpGet": {
SchemaProps: spec.SchemaProps{
Description: "HTTPGet specifies the http request to perform.",
Ref: ref("k8s.io/api/core/v1.HTTPGetAction"),
},
},
"tcpSocket": {
SchemaProps: spec.SchemaProps{
Description: "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported",
Ref: ref("k8s.io/api/core/v1.TCPSocketAction"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ExecAction", "k8s.io/api/core/v1.HTTPGetAction", "k8s.io/api/core/v1.TCPSocketAction"},
}
}
func schema_k8sio_api_core_v1_HostAlias(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"ip": {
SchemaProps: spec.SchemaProps{
Description: "IP address of the host file entry.",
Type: []string{"string"},
Format: "",
},
},
"hostnames": {
SchemaProps: spec.SchemaProps{
Description: "Hostnames for the above IP address.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_HostPathVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"path": {
SchemaProps: spec.SchemaProps{
Description: "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"path"},
},
},
}
}
func schema_k8sio_api_core_v1_ISCSIPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"targetPortal": {
SchemaProps: spec.SchemaProps{
Description: "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
Type: []string{"string"},
Format: "",
},
},
"iqn": {
SchemaProps: spec.SchemaProps{
Description: "Target iSCSI Qualified Name.",
Type: []string{"string"},
Format: "",
},
},
"lun": {
SchemaProps: spec.SchemaProps{
Description: "iSCSI Target Lun number.",
Type: []string{"integer"},
Format: "int32",
},
},
"iscsiInterface": {
SchemaProps: spec.SchemaProps{
Description: "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
"portals": {
SchemaProps: spec.SchemaProps{
Description: "iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"chapAuthDiscovery": {
SchemaProps: spec.SchemaProps{
Description: "whether support iSCSI Discovery CHAP authentication",
Type: []string{"boolean"},
Format: "",
},
},
"chapAuthSession": {
SchemaProps: spec.SchemaProps{
Description: "whether support iSCSI Session CHAP authentication",
Type: []string{"boolean"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "CHAP Secret for iSCSI target and initiator authentication",
Ref: ref("k8s.io/api/core/v1.SecretReference"),
},
},
"initiatorName": {
SchemaProps: spec.SchemaProps{
Description: "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"targetPortal", "iqn", "lun"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.SecretReference"},
}
}
func schema_k8sio_api_core_v1_ISCSIVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"targetPortal": {
SchemaProps: spec.SchemaProps{
Description: "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
Type: []string{"string"},
Format: "",
},
},
"iqn": {
SchemaProps: spec.SchemaProps{
Description: "Target iSCSI Qualified Name.",
Type: []string{"string"},
Format: "",
},
},
"lun": {
SchemaProps: spec.SchemaProps{
Description: "iSCSI Target Lun number.",
Type: []string{"integer"},
Format: "int32",
},
},
"iscsiInterface": {
SchemaProps: spec.SchemaProps{
Description: "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
"portals": {
SchemaProps: spec.SchemaProps{
Description: "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"chapAuthDiscovery": {
SchemaProps: spec.SchemaProps{
Description: "whether support iSCSI Discovery CHAP authentication",
Type: []string{"boolean"},
Format: "",
},
},
"chapAuthSession": {
SchemaProps: spec.SchemaProps{
Description: "whether support iSCSI Session CHAP authentication",
Type: []string{"boolean"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "CHAP Secret for iSCSI target and initiator authentication",
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
"initiatorName": {
SchemaProps: spec.SchemaProps{
Description: "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"targetPortal", "iqn", "lun"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LocalObjectReference"},
}
}
func schema_k8sio_api_core_v1_KeyToPath(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Maps a string key to a path within a volume.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"key": {
SchemaProps: spec.SchemaProps{
Description: "The key to project.",
Type: []string{"string"},
Format: "",
},
},
"path": {
SchemaProps: spec.SchemaProps{
Description: "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.",
Type: []string{"string"},
Format: "",
},
},
"mode": {
SchemaProps: spec.SchemaProps{
Description: "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"key", "path"},
},
},
}
}
func schema_k8sio_api_core_v1_Lifecycle(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"postStart": {
SchemaProps: spec.SchemaProps{
Description: "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
Ref: ref("k8s.io/api/core/v1.Handler"),
},
},
"preStop": {
SchemaProps: spec.SchemaProps{
Description: "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
Ref: ref("k8s.io/api/core/v1.Handler"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Handler"},
}
}
func schema_k8sio_api_core_v1_LimitRange(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "LimitRange sets resource usage limits for each kind of resource in a Namespace.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.LimitRangeSpec"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LimitRangeSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_LimitRangeItem(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type of resource that this limit applies to.",
Type: []string{"string"},
Format: "",
},
},
"max": {
SchemaProps: spec.SchemaProps{
Description: "Max usage constraints on this kind by resource name.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"min": {
SchemaProps: spec.SchemaProps{
Description: "Min usage constraints on this kind by resource name.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"default": {
SchemaProps: spec.SchemaProps{
Description: "Default resource requirement limit value by resource name if resource limit is omitted.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"defaultRequest": {
SchemaProps: spec.SchemaProps{
Description: "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"maxLimitRequestRatio": {
SchemaProps: spec.SchemaProps{
Description: "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_core_v1_LimitRangeList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "LimitRangeList is a list of LimitRange items.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.LimitRange"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LimitRange", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_LimitRangeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "LimitRangeSpec defines a min/max usage limit for resources that match on kind.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"limits": {
SchemaProps: spec.SchemaProps{
Description: "Limits is the list of LimitRangeItem objects that are enforced.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.LimitRangeItem"),
},
},
},
},
},
},
Required: []string{"limits"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LimitRangeItem"},
}
}
func schema_k8sio_api_core_v1_List(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "List holds a list of objects, which may not be known by the server.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of objects",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
func schema_k8sio_api_core_v1_LoadBalancerIngress(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"ip": {
SchemaProps: spec.SchemaProps{
Description: "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)",
Type: []string{"string"},
Format: "",
},
},
"hostname": {
SchemaProps: spec.SchemaProps{
Description: "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_LoadBalancerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "LoadBalancerStatus represents the status of a load-balancer.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"ingress": {
SchemaProps: spec.SchemaProps{
Description: "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.LoadBalancerIngress"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LoadBalancerIngress"},
}
}
func schema_k8sio_api_core_v1_LocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_LocalVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Local represents directly-attached storage with node affinity (Beta feature)",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"path": {
SchemaProps: spec.SchemaProps{
Description: "The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a fileystem if unspecified.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"path"},
},
},
}
}
func schema_k8sio_api_core_v1_NFSVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"server": {
SchemaProps: spec.SchemaProps{
Description: "Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
Type: []string{"string"},
Format: "",
},
},
"path": {
SchemaProps: spec.SchemaProps{
Description: "Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"server", "path"},
},
},
}
}
func schema_k8sio_api_core_v1_Namespace(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Namespace provides a scope for Names. Use of multiple namespaces is optional.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.NamespaceSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.NamespaceStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.NamespaceSpec", "k8s.io/api/core/v1.NamespaceStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_NamespaceCondition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NamespaceCondition contains details about state of namespace.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type of namespace controller condition.",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status of the condition, one of True, False, Unknown.",
Type: []string{"string"},
Format: "",
},
},
"lastTransitionTime": {
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_core_v1_NamespaceList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NamespaceList is a list of Namespaces.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Namespace"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Namespace", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_NamespaceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NamespaceSpec describes the attributes on a Namespace.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"finalizers": {
SchemaProps: spec.SchemaProps{
Description: "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_NamespaceStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NamespaceStatus is information about the current status of a Namespace.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"phase": {
SchemaProps: spec.SchemaProps{
Description: "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/",
Type: []string{"string"},
Format: "",
},
},
"conditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Represents the latest available observations of a namespace's current state.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.NamespaceCondition"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.NamespaceCondition"},
}
}
func schema_k8sio_api_core_v1_Node(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.NodeSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.NodeStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.NodeSpec", "k8s.io/api/core/v1.NodeStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_NodeAddress(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NodeAddress contains information for the node's address.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "Node address type, one of Hostname, ExternalIP or InternalIP.",
Type: []string{"string"},
Format: "",
},
},
"address": {
SchemaProps: spec.SchemaProps{
Description: "The node address.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "address"},
},
},
}
}
func schema_k8sio_api_core_v1_NodeAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Node affinity is a group of node affinity scheduling rules.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"requiredDuringSchedulingIgnoredDuringExecution": {
SchemaProps: spec.SchemaProps{
Description: "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.",
Ref: ref("k8s.io/api/core/v1.NodeSelector"),
},
},
"preferredDuringSchedulingIgnoredDuringExecution": {
SchemaProps: spec.SchemaProps{
Description: "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.PreferredSchedulingTerm"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.NodeSelector", "k8s.io/api/core/v1.PreferredSchedulingTerm"},
}
}
func schema_k8sio_api_core_v1_NodeCondition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NodeCondition contains condition information for a node.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type of node condition.",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status of the condition, one of True, False, Unknown.",
Type: []string{"string"},
Format: "",
},
},
"lastHeartbeatTime": {
SchemaProps: spec.SchemaProps{
Description: "Last time we got an update on a given condition.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"lastTransitionTime": {
SchemaProps: spec.SchemaProps{
Description: "Last time the condition transit from one status to another.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "(brief) reason for the condition's last transition.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "Human readable message indicating details about last transition.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_core_v1_NodeConfigSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"configMap": {
SchemaProps: spec.SchemaProps{
Description: "ConfigMap is a reference to a Node's ConfigMap",
Ref: ref("k8s.io/api/core/v1.ConfigMapNodeConfigSource"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ConfigMapNodeConfigSource"},
}
}
func schema_k8sio_api_core_v1_NodeConfigStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"assigned": {
SchemaProps: spec.SchemaProps{
Description: "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.",
Ref: ref("k8s.io/api/core/v1.NodeConfigSource"),
},
},
"active": {
SchemaProps: spec.SchemaProps{
Description: "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.",
Ref: ref("k8s.io/api/core/v1.NodeConfigSource"),
},
},
"lastKnownGood": {
SchemaProps: spec.SchemaProps{
Description: "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.",
Ref: ref("k8s.io/api/core/v1.NodeConfigSource"),
},
},
"error": {
SchemaProps: spec.SchemaProps{
Description: "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.NodeConfigSource"},
}
}
func schema_k8sio_api_core_v1_NodeDaemonEndpoints(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NodeDaemonEndpoints lists ports opened by daemons running on the Node.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kubeletEndpoint": {
SchemaProps: spec.SchemaProps{
Description: "Endpoint on which Kubelet is listening.",
Ref: ref("k8s.io/api/core/v1.DaemonEndpoint"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.DaemonEndpoint"},
}
}
func schema_k8sio_api_core_v1_NodeList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NodeList is the whole list of all Nodes which have been registered with master.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of nodes",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Node"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Node", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_NodeProxyOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NodeProxyOptions is the query options to a Node's proxy call.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"path": {
SchemaProps: spec.SchemaProps{
Description: "Path is the URL path to use for the current proxy request to node.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_NodeResources(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NodeResources is an object for conveying resource information about a node. see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"Capacity": {
SchemaProps: spec.SchemaProps{
Description: "Capacity represents the available resources of a node",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
},
Required: []string{"Capacity"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_core_v1_NodeSelector(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"nodeSelectorTerms": {
SchemaProps: spec.SchemaProps{
Description: "Required. A list of node selector terms. The terms are ORed.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.NodeSelectorTerm"),
},
},
},
},
},
},
Required: []string{"nodeSelectorTerms"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.NodeSelectorTerm"},
}
}
func schema_k8sio_api_core_v1_NodeSelectorRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"key": {
SchemaProps: spec.SchemaProps{
Description: "The label key that the selector applies to.",
Type: []string{"string"},
Format: "",
},
},
"operator": {
SchemaProps: spec.SchemaProps{
Description: "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.",
Type: []string{"string"},
Format: "",
},
},
"values": {
SchemaProps: spec.SchemaProps{
Description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"key", "operator"},
},
},
}
}
func schema_k8sio_api_core_v1_NodeSelectorTerm(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"matchExpressions": {
SchemaProps: spec.SchemaProps{
Description: "A list of node selector requirements by node's labels.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.NodeSelectorRequirement"),
},
},
},
},
},
"matchFields": {
SchemaProps: spec.SchemaProps{
Description: "A list of node selector requirements by node's fields.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.NodeSelectorRequirement"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.NodeSelectorRequirement"},
}
}
func schema_k8sio_api_core_v1_NodeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NodeSpec describes the attributes that a node is created with.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"podCIDR": {
SchemaProps: spec.SchemaProps{
Description: "PodCIDR represents the pod IP range assigned to the node.",
Type: []string{"string"},
Format: "",
},
},
"podCIDRs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"providerID": {
SchemaProps: spec.SchemaProps{
Description: "ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>",
Type: []string{"string"},
Format: "",
},
},
"unschedulable": {
SchemaProps: spec.SchemaProps{
Description: "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration",
Type: []string{"boolean"},
Format: "",
},
},
"taints": {
SchemaProps: spec.SchemaProps{
Description: "If specified, the node's taints.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Taint"),
},
},
},
},
},
"configSource": {
SchemaProps: spec.SchemaProps{
Description: "If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field",
Ref: ref("k8s.io/api/core/v1.NodeConfigSource"),
},
},
"externalID": {
SchemaProps: spec.SchemaProps{
Description: "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.NodeConfigSource", "k8s.io/api/core/v1.Taint"},
}
}
func schema_k8sio_api_core_v1_NodeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NodeStatus is information about the current status of a node.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"capacity": {
SchemaProps: spec.SchemaProps{
Description: "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"allocatable": {
SchemaProps: spec.SchemaProps{
Description: "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"phase": {
SchemaProps: spec.SchemaProps{
Description: "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.",
Type: []string{"string"},
Format: "",
},
},
"conditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.NodeCondition"),
},
},
},
},
},
"addresses": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.NodeAddress"),
},
},
},
},
},
"daemonEndpoints": {
SchemaProps: spec.SchemaProps{
Description: "Endpoints of daemons running on the Node.",
Ref: ref("k8s.io/api/core/v1.NodeDaemonEndpoints"),
},
},
"nodeInfo": {
SchemaProps: spec.SchemaProps{
Description: "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info",
Ref: ref("k8s.io/api/core/v1.NodeSystemInfo"),
},
},
"images": {
SchemaProps: spec.SchemaProps{
Description: "List of container images on this node",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ContainerImage"),
},
},
},
},
},
"volumesInUse": {
SchemaProps: spec.SchemaProps{
Description: "List of attachable volumes in use (mounted) by the node.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"volumesAttached": {
SchemaProps: spec.SchemaProps{
Description: "List of volumes that are attached to the node.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.AttachedVolume"),
},
},
},
},
},
"config": {
SchemaProps: spec.SchemaProps{
Description: "Status of the config assigned to the node via the dynamic Kubelet config feature.",
Ref: ref("k8s.io/api/core/v1.NodeConfigStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.AttachedVolume", "k8s.io/api/core/v1.ContainerImage", "k8s.io/api/core/v1.NodeAddress", "k8s.io/api/core/v1.NodeCondition", "k8s.io/api/core/v1.NodeConfigStatus", "k8s.io/api/core/v1.NodeDaemonEndpoints", "k8s.io/api/core/v1.NodeSystemInfo", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_core_v1_NodeSystemInfo(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"machineID": {
SchemaProps: spec.SchemaProps{
Description: "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html",
Type: []string{"string"},
Format: "",
},
},
"systemUUID": {
SchemaProps: spec.SchemaProps{
Description: "SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html",
Type: []string{"string"},
Format: "",
},
},
"bootID": {
SchemaProps: spec.SchemaProps{
Description: "Boot ID reported by the node.",
Type: []string{"string"},
Format: "",
},
},
"kernelVersion": {
SchemaProps: spec.SchemaProps{
Description: "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).",
Type: []string{"string"},
Format: "",
},
},
"osImage": {
SchemaProps: spec.SchemaProps{
Description: "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).",
Type: []string{"string"},
Format: "",
},
},
"containerRuntimeVersion": {
SchemaProps: spec.SchemaProps{
Description: "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).",
Type: []string{"string"},
Format: "",
},
},
"kubeletVersion": {
SchemaProps: spec.SchemaProps{
Description: "Kubelet Version reported by the node.",
Type: []string{"string"},
Format: "",
},
},
"kubeProxyVersion": {
SchemaProps: spec.SchemaProps{
Description: "KubeProxy Version reported by the node.",
Type: []string{"string"},
Format: "",
},
},
"operatingSystem": {
SchemaProps: spec.SchemaProps{
Description: "The Operating System reported by the node",
Type: []string{"string"},
Format: "",
},
},
"architecture": {
SchemaProps: spec.SchemaProps{
Description: "The Architecture reported by the node",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"machineID", "systemUUID", "bootID", "kernelVersion", "osImage", "containerRuntimeVersion", "kubeletVersion", "kubeProxyVersion", "operatingSystem", "architecture"},
},
},
}
}
func schema_k8sio_api_core_v1_ObjectFieldSelector(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ObjectFieldSelector selects an APIVersioned field of an object.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".",
Type: []string{"string"},
Format: "",
},
},
"fieldPath": {
SchemaProps: spec.SchemaProps{
Description: "Path of the field to select in the specified API version.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"fieldPath"},
},
},
}
}
func schema_k8sio_api_core_v1_ObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ObjectReference contains enough information to let you inspect or modify the referred object.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"namespace": {
SchemaProps: spec.SchemaProps{
Description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
Type: []string{"string"},
Format: "",
},
},
"uid": {
SchemaProps: spec.SchemaProps{
Description: "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "API version of the referent.",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
Type: []string{"string"},
Format: "",
},
},
"fieldPath": {
SchemaProps: spec.SchemaProps{
Description: "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_PersistentVolume(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes",
Ref: ref("k8s.io/api/core/v1.PersistentVolumeSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes",
Ref: ref("k8s.io/api/core/v1.PersistentVolumeStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PersistentVolumeSpec", "k8s.io/api/core/v1.PersistentVolumeStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_PersistentVolumeClaim(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeClaim is a user's request for and claim to a persistent volume",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PersistentVolumeClaimSpec", "k8s.io/api/core/v1.PersistentVolumeClaimStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_PersistentVolumeClaimCondition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeClaimCondition contails details about state of pvc",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"lastProbeTime": {
SchemaProps: spec.SchemaProps{
Description: "Last time we probed the condition.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"lastTransitionTime": {
SchemaProps: spec.SchemaProps{
Description: "Last time the condition transitioned from one status to another.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "Human-readable message indicating details about last transition.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_core_v1_PersistentVolumeClaimList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_PersistentVolumeClaimSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"accessModes": {
SchemaProps: spec.SchemaProps{
Description: "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"selector": {
SchemaProps: spec.SchemaProps{
Description: "A label query over volumes to consider for binding.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
},
},
"volumeName": {
SchemaProps: spec.SchemaProps{
Description: "VolumeName is the binding reference to the PersistentVolume backing this claim.",
Type: []string{"string"},
Format: "",
},
},
"storageClassName": {
SchemaProps: spec.SchemaProps{
Description: "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
Type: []string{"string"},
Format: "",
},
},
"volumeMode": {
SchemaProps: spec.SchemaProps{
Description: "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.",
Type: []string{"string"},
Format: "",
},
},
"dataSource": {
SchemaProps: spec.SchemaProps{
Description: "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.",
Ref: ref("k8s.io/api/core/v1.TypedLocalObjectReference"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.TypedLocalObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_core_v1_PersistentVolumeClaimStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeClaimStatus is the current status of a persistent volume claim.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"phase": {
SchemaProps: spec.SchemaProps{
Description: "Phase represents the current phase of PersistentVolumeClaim.",
Type: []string{"string"},
Format: "",
},
},
"accessModes": {
SchemaProps: spec.SchemaProps{
Description: "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"capacity": {
SchemaProps: spec.SchemaProps{
Description: "Represents the actual resources of the underlying volume.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"conditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimCondition"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PersistentVolumeClaimCondition", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_core_v1_PersistentVolumeClaimVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"claimName": {
SchemaProps: spec.SchemaProps{
Description: "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Will force the ReadOnly setting in VolumeMounts. Default false.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"claimName"},
},
},
}
}
func schema_k8sio_api_core_v1_PersistentVolumeList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeList is a list of PersistentVolume items.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.PersistentVolume"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PersistentVolume", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_PersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"gcePersistentDisk": {
SchemaProps: spec.SchemaProps{
Description: "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"),
},
},
"awsElasticBlockStore": {
SchemaProps: spec.SchemaProps{
Description: "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"),
},
},
"hostPath": {
SchemaProps: spec.SchemaProps{
Description: "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"),
},
},
"glusterfs": {
SchemaProps: spec.SchemaProps{
Description: "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
Ref: ref("k8s.io/api/core/v1.GlusterfsPersistentVolumeSource"),
},
},
"nfs": {
SchemaProps: spec.SchemaProps{
Description: "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"),
},
},
"rbd": {
SchemaProps: spec.SchemaProps{
Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md",
Ref: ref("k8s.io/api/core/v1.RBDPersistentVolumeSource"),
},
},
"iscsi": {
SchemaProps: spec.SchemaProps{
Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.",
Ref: ref("k8s.io/api/core/v1.ISCSIPersistentVolumeSource"),
},
},
"cinder": {
SchemaProps: spec.SchemaProps{
Description: "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
Ref: ref("k8s.io/api/core/v1.CinderPersistentVolumeSource"),
},
},
"cephfs": {
SchemaProps: spec.SchemaProps{
Description: "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
Ref: ref("k8s.io/api/core/v1.CephFSPersistentVolumeSource"),
},
},
"fc": {
SchemaProps: spec.SchemaProps{
Description: "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
Ref: ref("k8s.io/api/core/v1.FCVolumeSource"),
},
},
"flocker": {
SchemaProps: spec.SchemaProps{
Description: "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running",
Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"),
},
},
"flexVolume": {
SchemaProps: spec.SchemaProps{
Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
Ref: ref("k8s.io/api/core/v1.FlexPersistentVolumeSource"),
},
},
"azureFile": {
SchemaProps: spec.SchemaProps{
Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
Ref: ref("k8s.io/api/core/v1.AzureFilePersistentVolumeSource"),
},
},
"vsphereVolume": {
SchemaProps: spec.SchemaProps{
Description: "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"),
},
},
"quobyte": {
SchemaProps: spec.SchemaProps{
Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"),
},
},
"azureDisk": {
SchemaProps: spec.SchemaProps{
Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"),
},
},
"photonPersistentDisk": {
SchemaProps: spec.SchemaProps{
Description: "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"),
},
},
"portworxVolume": {
SchemaProps: spec.SchemaProps{
Description: "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"),
},
},
"scaleIO": {
SchemaProps: spec.SchemaProps{
Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
Ref: ref("k8s.io/api/core/v1.ScaleIOPersistentVolumeSource"),
},
},
"local": {
SchemaProps: spec.SchemaProps{
Description: "Local represents directly-attached storage with node affinity",
Ref: ref("k8s.io/api/core/v1.LocalVolumeSource"),
},
},
"storageos": {
SchemaProps: spec.SchemaProps{
Description: "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md",
Ref: ref("k8s.io/api/core/v1.StorageOSPersistentVolumeSource"),
},
},
"csi": {
SchemaProps: spec.SchemaProps{
Description: "CSI represents storage that is handled by an external CSI driver (Beta feature).",
Ref: ref("k8s.io/api/core/v1.CSIPersistentVolumeSource"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFilePersistentVolumeSource", "k8s.io/api/core/v1.CSIPersistentVolumeSource", "k8s.io/api/core/v1.CephFSPersistentVolumeSource", "k8s.io/api/core/v1.CinderPersistentVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexPersistentVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsPersistentVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIPersistentVolumeSource", "k8s.io/api/core/v1.LocalVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDPersistentVolumeSource", "k8s.io/api/core/v1.ScaleIOPersistentVolumeSource", "k8s.io/api/core/v1.StorageOSPersistentVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"},
}
}
func schema_k8sio_api_core_v1_PersistentVolumeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeSpec is the specification of a persistent volume.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"capacity": {
SchemaProps: spec.SchemaProps{
Description: "A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"gcePersistentDisk": {
SchemaProps: spec.SchemaProps{
Description: "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"),
},
},
"awsElasticBlockStore": {
SchemaProps: spec.SchemaProps{
Description: "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"),
},
},
"hostPath": {
SchemaProps: spec.SchemaProps{
Description: "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"),
},
},
"glusterfs": {
SchemaProps: spec.SchemaProps{
Description: "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
Ref: ref("k8s.io/api/core/v1.GlusterfsPersistentVolumeSource"),
},
},
"nfs": {
SchemaProps: spec.SchemaProps{
Description: "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"),
},
},
"rbd": {
SchemaProps: spec.SchemaProps{
Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md",
Ref: ref("k8s.io/api/core/v1.RBDPersistentVolumeSource"),
},
},
"iscsi": {
SchemaProps: spec.SchemaProps{
Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.",
Ref: ref("k8s.io/api/core/v1.ISCSIPersistentVolumeSource"),
},
},
"cinder": {
SchemaProps: spec.SchemaProps{
Description: "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
Ref: ref("k8s.io/api/core/v1.CinderPersistentVolumeSource"),
},
},
"cephfs": {
SchemaProps: spec.SchemaProps{
Description: "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
Ref: ref("k8s.io/api/core/v1.CephFSPersistentVolumeSource"),
},
},
"fc": {
SchemaProps: spec.SchemaProps{
Description: "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
Ref: ref("k8s.io/api/core/v1.FCVolumeSource"),
},
},
"flocker": {
SchemaProps: spec.SchemaProps{
Description: "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running",
Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"),
},
},
"flexVolume": {
SchemaProps: spec.SchemaProps{
Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
Ref: ref("k8s.io/api/core/v1.FlexPersistentVolumeSource"),
},
},
"azureFile": {
SchemaProps: spec.SchemaProps{
Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
Ref: ref("k8s.io/api/core/v1.AzureFilePersistentVolumeSource"),
},
},
"vsphereVolume": {
SchemaProps: spec.SchemaProps{
Description: "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"),
},
},
"quobyte": {
SchemaProps: spec.SchemaProps{
Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"),
},
},
"azureDisk": {
SchemaProps: spec.SchemaProps{
Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"),
},
},
"photonPersistentDisk": {
SchemaProps: spec.SchemaProps{
Description: "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"),
},
},
"portworxVolume": {
SchemaProps: spec.SchemaProps{
Description: "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"),
},
},
"scaleIO": {
SchemaProps: spec.SchemaProps{
Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
Ref: ref("k8s.io/api/core/v1.ScaleIOPersistentVolumeSource"),
},
},
"local": {
SchemaProps: spec.SchemaProps{
Description: "Local represents directly-attached storage with node affinity",
Ref: ref("k8s.io/api/core/v1.LocalVolumeSource"),
},
},
"storageos": {
SchemaProps: spec.SchemaProps{
Description: "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md",
Ref: ref("k8s.io/api/core/v1.StorageOSPersistentVolumeSource"),
},
},
"csi": {
SchemaProps: spec.SchemaProps{
Description: "CSI represents storage that is handled by an external CSI driver (Beta feature).",
Ref: ref("k8s.io/api/core/v1.CSIPersistentVolumeSource"),
},
},
"accessModes": {
SchemaProps: spec.SchemaProps{
Description: "AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"claimRef": {
SchemaProps: spec.SchemaProps{
Description: "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding",
Ref: ref("k8s.io/api/core/v1.ObjectReference"),
},
},
"persistentVolumeReclaimPolicy": {
SchemaProps: spec.SchemaProps{
Description: "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming",
Type: []string{"string"},
Format: "",
},
},
"storageClassName": {
SchemaProps: spec.SchemaProps{
Description: "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.",
Type: []string{"string"},
Format: "",
},
},
"mountOptions": {
SchemaProps: spec.SchemaProps{
Description: "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"volumeMode": {
SchemaProps: spec.SchemaProps{
Description: "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is a beta feature.",
Type: []string{"string"},
Format: "",
},
},
"nodeAffinity": {
SchemaProps: spec.SchemaProps{
Description: "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
Ref: ref("k8s.io/api/core/v1.VolumeNodeAffinity"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFilePersistentVolumeSource", "k8s.io/api/core/v1.CSIPersistentVolumeSource", "k8s.io/api/core/v1.CephFSPersistentVolumeSource", "k8s.io/api/core/v1.CinderPersistentVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexPersistentVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsPersistentVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIPersistentVolumeSource", "k8s.io/api/core/v1.LocalVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.ObjectReference", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDPersistentVolumeSource", "k8s.io/api/core/v1.ScaleIOPersistentVolumeSource", "k8s.io/api/core/v1.StorageOSPersistentVolumeSource", "k8s.io/api/core/v1.VolumeNodeAffinity", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_core_v1_PersistentVolumeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeStatus is the current status of a persistent volume.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"phase": {
SchemaProps: spec.SchemaProps{
Description: "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "A human-readable message indicating details about why the volume is in this state.",
Type: []string{"string"},
Format: "",
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_PhotonPersistentDiskVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a Photon Controller persistent disk resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"pdID": {
SchemaProps: spec.SchemaProps{
Description: "ID that identifies Photon Controller persistent disk",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"pdID"},
},
},
}
}
func schema_k8sio_api_core_v1_Pod(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.PodSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.PodStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PodSpec", "k8s.io/api/core/v1.PodStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_PodAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Pod affinity is a group of inter pod affinity scheduling rules.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"requiredDuringSchedulingIgnoredDuringExecution": {
SchemaProps: spec.SchemaProps{
Description: "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.PodAffinityTerm"),
},
},
},
},
},
"preferredDuringSchedulingIgnoredDuringExecution": {
SchemaProps: spec.SchemaProps{
Description: "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.WeightedPodAffinityTerm"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PodAffinityTerm", "k8s.io/api/core/v1.WeightedPodAffinityTerm"},
}
}
func schema_k8sio_api_core_v1_PodAffinityTerm(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"labelSelector": {
SchemaProps: spec.SchemaProps{
Description: "A label query over a set of resources, in this case pods.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
"namespaces": {
SchemaProps: spec.SchemaProps{
Description: "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"topologyKey": {
SchemaProps: spec.SchemaProps{
Description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"topologyKey"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_core_v1_PodAntiAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Pod anti affinity is a group of inter pod anti affinity scheduling rules.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"requiredDuringSchedulingIgnoredDuringExecution": {
SchemaProps: spec.SchemaProps{
Description: "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.PodAffinityTerm"),
},
},
},
},
},
"preferredDuringSchedulingIgnoredDuringExecution": {
SchemaProps: spec.SchemaProps{
Description: "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.WeightedPodAffinityTerm"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PodAffinityTerm", "k8s.io/api/core/v1.WeightedPodAffinityTerm"},
}
}
func schema_k8sio_api_core_v1_PodAttachOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodAttachOptions is the query options to a Pod's remote attach call.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"stdin": {
SchemaProps: spec.SchemaProps{
Description: "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
"stdout": {
SchemaProps: spec.SchemaProps{
Description: "Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.",
Type: []string{"boolean"},
Format: "",
},
},
"stderr": {
SchemaProps: spec.SchemaProps{
Description: "Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.",
Type: []string{"boolean"},
Format: "",
},
},
"tty": {
SchemaProps: spec.SchemaProps{
Description: "TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
"container": {
SchemaProps: spec.SchemaProps{
Description: "The container in which to execute the command. Defaults to only container if there is only one container in the pod.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_PodCondition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodCondition contains details for the current condition of this pod.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
Type: []string{"string"},
Format: "",
},
},
"lastProbeTime": {
SchemaProps: spec.SchemaProps{
Description: "Last time we probed the condition.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"lastTransitionTime": {
SchemaProps: spec.SchemaProps{
Description: "Last time the condition transitioned from one status to another.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "Unique, one-word, CamelCase reason for the condition's last transition.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "Human-readable message indicating details about last transition.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_core_v1_PodDNSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"nameservers": {
SchemaProps: spec.SchemaProps{
Description: "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"searches": {
SchemaProps: spec.SchemaProps{
Description: "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"options": {
SchemaProps: spec.SchemaProps{
Description: "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.PodDNSConfigOption"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PodDNSConfigOption"},
}
}
func schema_k8sio_api_core_v1_PodDNSConfigOption(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodDNSConfigOption defines DNS resolver options of a pod.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Required.",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_PodExecOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodExecOptions is the query options to a Pod's remote exec call.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"stdin": {
SchemaProps: spec.SchemaProps{
Description: "Redirect the standard input stream of the pod for this call. Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
"stdout": {
SchemaProps: spec.SchemaProps{
Description: "Redirect the standard output stream of the pod for this call. Defaults to true.",
Type: []string{"boolean"},
Format: "",
},
},
"stderr": {
SchemaProps: spec.SchemaProps{
Description: "Redirect the standard error stream of the pod for this call. Defaults to true.",
Type: []string{"boolean"},
Format: "",
},
},
"tty": {
SchemaProps: spec.SchemaProps{
Description: "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
"container": {
SchemaProps: spec.SchemaProps{
Description: "Container in which to execute the command. Defaults to only container if there is only one container in the pod.",
Type: []string{"string"},
Format: "",
},
},
"command": {
SchemaProps: spec.SchemaProps{
Description: "Command is the remote command to execute. argv array. Not executed within a shell.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"command"},
},
},
}
}
func schema_k8sio_api_core_v1_PodIP(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"ip": {
SchemaProps: spec.SchemaProps{
Description: "ip is an IP address (IPv4 or IPv6) assigned to the pod",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_PodList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodList is a list of Pods.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Pod"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Pod", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_PodLogOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodLogOptions is the query options for a Pod's logs REST call.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"container": {
SchemaProps: spec.SchemaProps{
Description: "The container for which to stream logs. Defaults to only container if there is one container in the pod.",
Type: []string{"string"},
Format: "",
},
},
"follow": {
SchemaProps: spec.SchemaProps{
Description: "Follow the log stream of the pod. Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
"previous": {
SchemaProps: spec.SchemaProps{
Description: "Return previous terminated container logs. Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
"sinceSeconds": {
SchemaProps: spec.SchemaProps{
Description: "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
Type: []string{"integer"},
Format: "int64",
},
},
"sinceTime": {
SchemaProps: spec.SchemaProps{
Description: "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"timestamps": {
SchemaProps: spec.SchemaProps{
Description: "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
"tailLines": {
SchemaProps: spec.SchemaProps{
Description: "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
Type: []string{"integer"},
Format: "int64",
},
},
"limitBytes": {
SchemaProps: spec.SchemaProps{
Description: "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_core_v1_PodPortForwardOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodPortForwardOptions is the query options to a Pod's port forward call when using WebSockets. The `port` query parameter must specify the port or ports (comma separated) to forward over. Port forwarding over SPDY does not use these options. It requires the port to be passed in the `port` header as part of request.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"ports": {
SchemaProps: spec.SchemaProps{
Description: "List of ports to forward Required when using WebSockets",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_PodProxyOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodProxyOptions is the query options to a Pod's proxy call.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"path": {
SchemaProps: spec.SchemaProps{
Description: "Path is the URL path to use for the current proxy request to pod.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_PodReadinessGate(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodReadinessGate contains the reference to a pod condition",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"conditionType": {
SchemaProps: spec.SchemaProps{
Description: "ConditionType refers to a condition in the pod's condition list with matching type.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"conditionType"},
},
},
}
}
func schema_k8sio_api_core_v1_PodSecurityContext(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"seLinuxOptions": {
SchemaProps: spec.SchemaProps{
Description: "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
Ref: ref("k8s.io/api/core/v1.SELinuxOptions"),
},
},
"windowsOptions": {
SchemaProps: spec.SchemaProps{
Description: "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
Ref: ref("k8s.io/api/core/v1.WindowsSecurityContextOptions"),
},
},
"runAsUser": {
SchemaProps: spec.SchemaProps{
Description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
Type: []string{"integer"},
Format: "int64",
},
},
"runAsGroup": {
SchemaProps: spec.SchemaProps{
Description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
Type: []string{"integer"},
Format: "int64",
},
},
"runAsNonRoot": {
SchemaProps: spec.SchemaProps{
Description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
Type: []string{"boolean"},
Format: "",
},
},
"supplementalGroups": {
SchemaProps: spec.SchemaProps{
Description: "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
"fsGroup": {
SchemaProps: spec.SchemaProps{
Description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.",
Type: []string{"integer"},
Format: "int64",
},
},
"sysctls": {
SchemaProps: spec.SchemaProps{
Description: "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Sysctl"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.SELinuxOptions", "k8s.io/api/core/v1.Sysctl", "k8s.io/api/core/v1.WindowsSecurityContextOptions"},
}
}
func schema_k8sio_api_core_v1_PodSignature(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Describes the class of pods that should avoid this node. Exactly one field should be set.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"podController": {
SchemaProps: spec.SchemaProps{
Description: "Reference to controller whose pods should avoid this node.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"},
}
}
func schema_k8sio_api_core_v1_PodSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodSpec is a description of a pod.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"volumes": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge,retainKeys",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Volume"),
},
},
},
},
},
"initContainers": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Container"),
},
},
},
},
},
"containers": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Container"),
},
},
},
},
},
"ephemeralContainers": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.EphemeralContainer"),
},
},
},
},
},
"restartPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
Type: []string{"string"},
Format: "",
},
},
"terminationGracePeriodSeconds": {
SchemaProps: spec.SchemaProps{
Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.",
Type: []string{"integer"},
Format: "int64",
},
},
"activeDeadlineSeconds": {
SchemaProps: spec.SchemaProps{
Description: "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.",
Type: []string{"integer"},
Format: "int64",
},
},
"dnsPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.",
Type: []string{"string"},
Format: "",
},
},
"nodeSelector": {
SchemaProps: spec.SchemaProps{
Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"serviceAccountName": {
SchemaProps: spec.SchemaProps{
Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
Type: []string{"string"},
Format: "",
},
},
"serviceAccount": {
SchemaProps: spec.SchemaProps{
Description: "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
Type: []string{"string"},
Format: "",
},
},
"automountServiceAccountToken": {
SchemaProps: spec.SchemaProps{
Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
Type: []string{"boolean"},
Format: "",
},
},
"nodeName": {
SchemaProps: spec.SchemaProps{
Description: "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.",
Type: []string{"string"},
Format: "",
},
},
"hostNetwork": {
SchemaProps: spec.SchemaProps{
Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
Type: []string{"boolean"},
Format: "",
},
},
"hostPID": {
SchemaProps: spec.SchemaProps{
Description: "Use the host's pid namespace. Optional: Default to false.",
Type: []string{"boolean"},
Format: "",
},
},
"hostIPC": {
SchemaProps: spec.SchemaProps{
Description: "Use the host's ipc namespace. Optional: Default to false.",
Type: []string{"boolean"},
Format: "",
},
},
"shareProcessNamespace": {
SchemaProps: spec.SchemaProps{
Description: "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.",
Type: []string{"boolean"},
Format: "",
},
},
"securityContext": {
SchemaProps: spec.SchemaProps{
Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.",
Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
},
},
"imagePullSecrets": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
},
},
},
"hostname": {
SchemaProps: spec.SchemaProps{
Description: "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.",
Type: []string{"string"},
Format: "",
},
},
"subdomain": {
SchemaProps: spec.SchemaProps{
Description: "If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all.",
Type: []string{"string"},
Format: "",
},
},
"affinity": {
SchemaProps: spec.SchemaProps{
Description: "If specified, the pod's scheduling constraints",
Ref: ref("k8s.io/api/core/v1.Affinity"),
},
},
"schedulerName": {
SchemaProps: spec.SchemaProps{
Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.",
Type: []string{"string"},
Format: "",
},
},
"tolerations": {
SchemaProps: spec.SchemaProps{
Description: "If specified, the pod's tolerations.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Toleration"),
},
},
},
},
},
"hostAliases": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "ip",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.HostAlias"),
},
},
},
},
},
"priorityClassName": {
SchemaProps: spec.SchemaProps{
Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
Type: []string{"string"},
Format: "",
},
},
"priority": {
SchemaProps: spec.SchemaProps{
Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.",
Type: []string{"integer"},
Format: "int32",
},
},
"dnsConfig": {
SchemaProps: spec.SchemaProps{
Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.",
Ref: ref("k8s.io/api/core/v1.PodDNSConfig"),
},
},
"readinessGates": {
SchemaProps: spec.SchemaProps{
Description: "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.PodReadinessGate"),
},
},
},
},
},
"runtimeClassName": {
SchemaProps: spec.SchemaProps{
Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.",
Type: []string{"string"},
Format: "",
},
},
"enableServiceLinks": {
SchemaProps: spec.SchemaProps{
Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
Type: []string{"boolean"},
Format: "",
},
},
"preemptionPolicy": {
SchemaProps: spec.SchemaProps{
Description: "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.",
Type: []string{"string"},
Format: "",
},
},
"overhead": {
SchemaProps: spec.SchemaProps{
Description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"topologySpreadConstraints": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-map-keys": []interface{}{
"topologyKey",
"whenUnsatisfiable",
},
"x-kubernetes-list-type": "map",
"x-kubernetes-patch-merge-key": "topologyKey",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"),
},
},
},
},
},
},
Required: []string{"containers"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EphemeralContainer", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodReadinessGate", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_core_v1_PodStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"phase": {
SchemaProps: spec.SchemaProps{
Description: "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
Type: []string{"string"},
Format: "",
},
},
"conditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.PodCondition"),
},
},
},
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "A human readable message indicating details about why the pod is in this condition.",
Type: []string{"string"},
Format: "",
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'",
Type: []string{"string"},
Format: "",
},
},
"nominatedNodeName": {
SchemaProps: spec.SchemaProps{
Description: "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.",
Type: []string{"string"},
Format: "",
},
},
"hostIP": {
SchemaProps: spec.SchemaProps{
Description: "IP address of the host to which the pod is assigned. Empty if not yet scheduled.",
Type: []string{"string"},
Format: "",
},
},
"podIP": {
SchemaProps: spec.SchemaProps{
Description: "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
Type: []string{"string"},
Format: "",
},
},
"podIPs": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "ip",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.PodIP"),
},
},
},
},
},
"startTime": {
SchemaProps: spec.SchemaProps{
Description: "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"initContainerStatuses": {
SchemaProps: spec.SchemaProps{
Description: "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ContainerStatus"),
},
},
},
},
},
"containerStatuses": {
SchemaProps: spec.SchemaProps{
Description: "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ContainerStatus"),
},
},
},
},
},
"qosClass": {
SchemaProps: spec.SchemaProps{
Description: "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md",
Type: []string{"string"},
Format: "",
},
},
"ephemeralContainerStatuses": {
SchemaProps: spec.SchemaProps{
Description: "Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ContainerStatus"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ContainerStatus", "k8s.io/api/core/v1.PodCondition", "k8s.io/api/core/v1.PodIP", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_core_v1_PodStatusResult(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.PodStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PodStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_PodTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodTemplate describes a template for creating copies of a predefined pod.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"template": {
SchemaProps: spec.SchemaProps{
Description: "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_PodTemplateList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodTemplateList is a list of PodTemplates.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of pod templates",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.PodTemplate"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PodTemplate", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_PodTemplateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodTemplateSpec describes the data a pod should have when created from a template",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.PodSpec"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PodSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_PortworxVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PortworxVolumeSource represents a Portworx volume resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"volumeID": {
SchemaProps: spec.SchemaProps{
Description: "VolumeID uniquely identifies a Portworx volume",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"volumeID"},
},
},
}
}
func schema_k8sio_api_core_v1_PreferAvoidPodsEntry(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Describes a class of pods that should avoid this node.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"podSignature": {
SchemaProps: spec.SchemaProps{
Description: "The class of pods.",
Ref: ref("k8s.io/api/core/v1.PodSignature"),
},
},
"evictionTime": {
SchemaProps: spec.SchemaProps{
Description: "Time at which this entry was added to the list.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "(brief) reason why this entry was added to the list.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "Human readable message indicating why this entry was added to the list.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"podSignature"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PodSignature", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_core_v1_PreferredSchedulingTerm(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"weight": {
SchemaProps: spec.SchemaProps{
Description: "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.",
Type: []string{"integer"},
Format: "int32",
},
},
"preference": {
SchemaProps: spec.SchemaProps{
Description: "A node selector term, associated with the corresponding weight.",
Ref: ref("k8s.io/api/core/v1.NodeSelectorTerm"),
},
},
},
Required: []string{"weight", "preference"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.NodeSelectorTerm"},
}
}
func schema_k8sio_api_core_v1_Probe(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"exec": {
SchemaProps: spec.SchemaProps{
Description: "One and only one of the following should be specified. Exec specifies the action to take.",
Ref: ref("k8s.io/api/core/v1.ExecAction"),
},
},
"httpGet": {
SchemaProps: spec.SchemaProps{
Description: "HTTPGet specifies the http request to perform.",
Ref: ref("k8s.io/api/core/v1.HTTPGetAction"),
},
},
"tcpSocket": {
SchemaProps: spec.SchemaProps{
Description: "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported",
Ref: ref("k8s.io/api/core/v1.TCPSocketAction"),
},
},
"initialDelaySeconds": {
SchemaProps: spec.SchemaProps{
Description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
Type: []string{"integer"},
Format: "int32",
},
},
"timeoutSeconds": {
SchemaProps: spec.SchemaProps{
Description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
Type: []string{"integer"},
Format: "int32",
},
},
"periodSeconds": {
SchemaProps: spec.SchemaProps{
Description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.",
Type: []string{"integer"},
Format: "int32",
},
},
"successThreshold": {
SchemaProps: spec.SchemaProps{
Description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.",
Type: []string{"integer"},
Format: "int32",
},
},
"failureThreshold": {
SchemaProps: spec.SchemaProps{
Description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ExecAction", "k8s.io/api/core/v1.HTTPGetAction", "k8s.io/api/core/v1.TCPSocketAction"},
}
}
func schema_k8sio_api_core_v1_ProjectedVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a projected volume source",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"sources": {
SchemaProps: spec.SchemaProps{
Description: "list of volume projections",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.VolumeProjection"),
},
},
},
},
},
"defaultMode": {
SchemaProps: spec.SchemaProps{
Description: "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"sources"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.VolumeProjection"},
}
}
func schema_k8sio_api_core_v1_QuobyteVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"registry": {
SchemaProps: spec.SchemaProps{
Description: "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes",
Type: []string{"string"},
Format: "",
},
},
"volume": {
SchemaProps: spec.SchemaProps{
Description: "Volume is a string that references an already created Quobyte volume by name.",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
"user": {
SchemaProps: spec.SchemaProps{
Description: "User to map volume access to Defaults to serivceaccount user",
Type: []string{"string"},
Format: "",
},
},
"group": {
SchemaProps: spec.SchemaProps{
Description: "Group to map volume access to Default is no group",
Type: []string{"string"},
Format: "",
},
},
"tenant": {
SchemaProps: spec.SchemaProps{
Description: "Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"registry", "volume"},
},
},
}
}
func schema_k8sio_api_core_v1_RBDPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"monitors": {
SchemaProps: spec.SchemaProps{
Description: "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"image": {
SchemaProps: spec.SchemaProps{
Description: "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd",
Type: []string{"string"},
Format: "",
},
},
"pool": {
SchemaProps: spec.SchemaProps{
Description: "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Type: []string{"string"},
Format: "",
},
},
"user": {
SchemaProps: spec.SchemaProps{
Description: "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Type: []string{"string"},
Format: "",
},
},
"keyring": {
SchemaProps: spec.SchemaProps{
Description: "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Type: []string{"string"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Ref: ref("k8s.io/api/core/v1.SecretReference"),
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"monitors", "image"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.SecretReference"},
}
}
func schema_k8sio_api_core_v1_RBDVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"monitors": {
SchemaProps: spec.SchemaProps{
Description: "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"image": {
SchemaProps: spec.SchemaProps{
Description: "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd",
Type: []string{"string"},
Format: "",
},
},
"pool": {
SchemaProps: spec.SchemaProps{
Description: "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Type: []string{"string"},
Format: "",
},
},
"user": {
SchemaProps: spec.SchemaProps{
Description: "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Type: []string{"string"},
Format: "",
},
},
"keyring": {
SchemaProps: spec.SchemaProps{
Description: "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Type: []string{"string"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"monitors", "image"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LocalObjectReference"},
}
}
func schema_k8sio_api_core_v1_RangeAllocation(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "RangeAllocation is not a public type.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"range": {
SchemaProps: spec.SchemaProps{
Description: "Range is string that identifies the range represented by 'data'.",
Type: []string{"string"},
Format: "",
},
},
"data": {
SchemaProps: spec.SchemaProps{
Description: "Data is a bit array containing all allocated addresses in the previous segment.",
Type: []string{"string"},
Format: "byte",
},
},
},
Required: []string{"range", "data"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_ReplicationController(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ReplicationController represents the configuration of a replication controller.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.ReplicationControllerSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.ReplicationControllerStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ReplicationControllerSpec", "k8s.io/api/core/v1.ReplicationControllerStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_ReplicationControllerCondition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ReplicationControllerCondition describes the state of a replication controller at a certain point.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type of replication controller condition.",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status of the condition, one of True, False, Unknown.",
Type: []string{"string"},
Format: "",
},
},
"lastTransitionTime": {
SchemaProps: spec.SchemaProps{
Description: "The last time the condition transitioned from one status to another.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "The reason for the condition's last transition.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "A human readable message indicating details about the transition.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_core_v1_ReplicationControllerList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ReplicationControllerList is a collection of replication controllers.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ReplicationController"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ReplicationController", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_ReplicationControllerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ReplicationControllerSpec is the specification of a replication controller.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"replicas": {
SchemaProps: spec.SchemaProps{
Description: "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller",
Type: []string{"integer"},
Format: "int32",
},
},
"minReadySeconds": {
SchemaProps: spec.SchemaProps{
Description: "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
Type: []string{"integer"},
Format: "int32",
},
},
"selector": {
SchemaProps: spec.SchemaProps{
Description: "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"template": {
SchemaProps: spec.SchemaProps{
Description: "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PodTemplateSpec"},
}
}
func schema_k8sio_api_core_v1_ReplicationControllerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ReplicationControllerStatus represents the current status of a replication controller.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"replicas": {
SchemaProps: spec.SchemaProps{
Description: "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller",
Type: []string{"integer"},
Format: "int32",
},
},
"fullyLabeledReplicas": {
SchemaProps: spec.SchemaProps{
Description: "The number of pods that have labels matching the labels of the pod template of the replication controller.",
Type: []string{"integer"},
Format: "int32",
},
},
"readyReplicas": {
SchemaProps: spec.SchemaProps{
Description: "The number of ready replicas for this replication controller.",
Type: []string{"integer"},
Format: "int32",
},
},
"availableReplicas": {
SchemaProps: spec.SchemaProps{
Description: "The number of available replicas (ready for at least minReadySeconds) for this replication controller.",
Type: []string{"integer"},
Format: "int32",
},
},
"observedGeneration": {
SchemaProps: spec.SchemaProps{
Description: "ObservedGeneration reflects the generation of the most recently observed replication controller.",
Type: []string{"integer"},
Format: "int64",
},
},
"conditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "type",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Represents the latest available observations of a replication controller's current state.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ReplicationControllerCondition"),
},
},
},
},
},
},
Required: []string{"replicas"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ReplicationControllerCondition"},
}
}
func schema_k8sio_api_core_v1_ResourceFieldSelector(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceFieldSelector represents container resources (cpu, memory) and their output format",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"containerName": {
SchemaProps: spec.SchemaProps{
Description: "Container name: required for volumes, optional for env vars",
Type: []string{"string"},
Format: "",
},
},
"resource": {
SchemaProps: spec.SchemaProps{
Description: "Required: resource to select",
Type: []string{"string"},
Format: "",
},
},
"divisor": {
SchemaProps: spec.SchemaProps{
Description: "Specifies the output format of the exposed resources, defaults to \"1\"",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
Required: []string{"resource"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_core_v1_ResourceQuota(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceQuota sets aggregate quota restrictions enforced per namespace",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.ResourceQuotaSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.ResourceQuotaStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ResourceQuotaSpec", "k8s.io/api/core/v1.ResourceQuotaStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_ResourceQuotaList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceQuotaList is a list of ResourceQuota items.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ResourceQuota"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ResourceQuota", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_ResourceQuotaSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"hard": {
SchemaProps: spec.SchemaProps{
Description: "hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"scopes": {
SchemaProps: spec.SchemaProps{
Description: "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"scopeSelector": {
SchemaProps: spec.SchemaProps{
Description: "scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.",
Ref: ref("k8s.io/api/core/v1.ScopeSelector"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ScopeSelector", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_core_v1_ResourceQuotaStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceQuotaStatus defines the enforced hard limits and observed use.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"hard": {
SchemaProps: spec.SchemaProps{
Description: "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"used": {
SchemaProps: spec.SchemaProps{
Description: "Used is the current observed total usage of the resource in the namespace.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_core_v1_ResourceRequirements(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceRequirements describes the compute resource requirements.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"limits": {
SchemaProps: spec.SchemaProps{
Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
"requests": {
SchemaProps: spec.SchemaProps{
Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_core_v1_SELinuxOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SELinuxOptions are the labels to be applied to the container",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"user": {
SchemaProps: spec.SchemaProps{
Description: "User is a SELinux user label that applies to the container.",
Type: []string{"string"},
Format: "",
},
},
"role": {
SchemaProps: spec.SchemaProps{
Description: "Role is a SELinux role label that applies to the container.",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type is a SELinux type label that applies to the container.",
Type: []string{"string"},
Format: "",
},
},
"level": {
SchemaProps: spec.SchemaProps{
Description: "Level is SELinux level label that applies to the container.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_ScaleIOPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"gateway": {
SchemaProps: spec.SchemaProps{
Description: "The host address of the ScaleIO API Gateway.",
Type: []string{"string"},
Format: "",
},
},
"system": {
SchemaProps: spec.SchemaProps{
Description: "The name of the storage system as configured in ScaleIO.",
Type: []string{"string"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.",
Ref: ref("k8s.io/api/core/v1.SecretReference"),
},
},
"sslEnabled": {
SchemaProps: spec.SchemaProps{
Description: "Flag to enable/disable SSL communication with Gateway, default false",
Type: []string{"boolean"},
Format: "",
},
},
"protectionDomain": {
SchemaProps: spec.SchemaProps{
Description: "The name of the ScaleIO Protection Domain for the configured storage.",
Type: []string{"string"},
Format: "",
},
},
"storagePool": {
SchemaProps: spec.SchemaProps{
Description: "The ScaleIO Storage Pool associated with the protection domain.",
Type: []string{"string"},
Format: "",
},
},
"storageMode": {
SchemaProps: spec.SchemaProps{
Description: "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.",
Type: []string{"string"},
Format: "",
},
},
"volumeName": {
SchemaProps: spec.SchemaProps{
Description: "The name of a volume already created in the ScaleIO system that is associated with this volume source.",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"gateway", "system", "secretRef"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.SecretReference"},
}
}
func schema_k8sio_api_core_v1_ScaleIOVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ScaleIOVolumeSource represents a persistent ScaleIO volume",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"gateway": {
SchemaProps: spec.SchemaProps{
Description: "The host address of the ScaleIO API Gateway.",
Type: []string{"string"},
Format: "",
},
},
"system": {
SchemaProps: spec.SchemaProps{
Description: "The name of the storage system as configured in ScaleIO.",
Type: []string{"string"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.",
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
"sslEnabled": {
SchemaProps: spec.SchemaProps{
Description: "Flag to enable/disable SSL communication with Gateway, default false",
Type: []string{"boolean"},
Format: "",
},
},
"protectionDomain": {
SchemaProps: spec.SchemaProps{
Description: "The name of the ScaleIO Protection Domain for the configured storage.",
Type: []string{"string"},
Format: "",
},
},
"storagePool": {
SchemaProps: spec.SchemaProps{
Description: "The ScaleIO Storage Pool associated with the protection domain.",
Type: []string{"string"},
Format: "",
},
},
"storageMode": {
SchemaProps: spec.SchemaProps{
Description: "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.",
Type: []string{"string"},
Format: "",
},
},
"volumeName": {
SchemaProps: spec.SchemaProps{
Description: "The name of a volume already created in the ScaleIO system that is associated with this volume source.",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"gateway", "system", "secretRef"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LocalObjectReference"},
}
}
func schema_k8sio_api_core_v1_ScopeSelector(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"matchExpressions": {
SchemaProps: spec.SchemaProps{
Description: "A list of scope selector requirements by scope of the resources.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ScopedResourceSelectorRequirement"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ScopedResourceSelectorRequirement"},
}
}
func schema_k8sio_api_core_v1_ScopedResourceSelectorRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"scopeName": {
SchemaProps: spec.SchemaProps{
Description: "The name of the scope that the selector applies to.",
Type: []string{"string"},
Format: "",
},
},
"operator": {
SchemaProps: spec.SchemaProps{
Description: "Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist.",
Type: []string{"string"},
Format: "",
},
},
"values": {
SchemaProps: spec.SchemaProps{
Description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"scopeName", "operator"},
},
},
}
}
func schema_k8sio_api_core_v1_Secret(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"data": {
SchemaProps: spec.SchemaProps{
Description: "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "byte",
},
},
},
},
},
"stringData": {
SchemaProps: spec.SchemaProps{
Description: "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "Used to facilitate programmatic handling of secret data.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_SecretEnvSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
Type: []string{"string"},
Format: "",
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Specify whether the Secret must be defined",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_SecretKeySelector(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SecretKeySelector selects a key of a Secret.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
Type: []string{"string"},
Format: "",
},
},
"key": {
SchemaProps: spec.SchemaProps{
Description: "The key of the secret to select from. Must be a valid secret key.",
Type: []string{"string"},
Format: "",
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Specify whether the Secret or its key must be defined",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"key"},
},
},
}
}
func schema_k8sio_api_core_v1_SecretList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SecretList is a list of Secret.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Secret"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Secret", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_SecretProjection(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
Type: []string{"string"},
Format: "",
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.KeyToPath"),
},
},
},
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Specify whether the Secret or its key must be defined",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.KeyToPath"},
}
}
func schema_k8sio_api_core_v1_SecretReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is unique within a namespace to reference a secret resource.",
Type: []string{"string"},
Format: "",
},
},
"namespace": {
SchemaProps: spec.SchemaProps{
Description: "Namespace defines the space within which the secret name must be unique.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_SecretVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"secretName": {
SchemaProps: spec.SchemaProps{
Description: "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
Type: []string{"string"},
Format: "",
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.KeyToPath"),
},
},
},
},
},
"defaultMode": {
SchemaProps: spec.SchemaProps{
Description: "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
Type: []string{"integer"},
Format: "int32",
},
},
"optional": {
SchemaProps: spec.SchemaProps{
Description: "Specify whether the Secret or its keys must be defined",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.KeyToPath"},
}
}
func schema_k8sio_api_core_v1_SecurityContext(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"capabilities": {
SchemaProps: spec.SchemaProps{
Description: "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.",
Ref: ref("k8s.io/api/core/v1.Capabilities"),
},
},
"privileged": {
SchemaProps: spec.SchemaProps{
Description: "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
"seLinuxOptions": {
SchemaProps: spec.SchemaProps{
Description: "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
Ref: ref("k8s.io/api/core/v1.SELinuxOptions"),
},
},
"windowsOptions": {
SchemaProps: spec.SchemaProps{
Description: "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
Ref: ref("k8s.io/api/core/v1.WindowsSecurityContextOptions"),
},
},
"runAsUser": {
SchemaProps: spec.SchemaProps{
Description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
Type: []string{"integer"},
Format: "int64",
},
},
"runAsGroup": {
SchemaProps: spec.SchemaProps{
Description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
Type: []string{"integer"},
Format: "int64",
},
},
"runAsNonRoot": {
SchemaProps: spec.SchemaProps{
Description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
Type: []string{"boolean"},
Format: "",
},
},
"readOnlyRootFilesystem": {
SchemaProps: spec.SchemaProps{
Description: "Whether this container has a read-only root filesystem. Default is false.",
Type: []string{"boolean"},
Format: "",
},
},
"allowPrivilegeEscalation": {
SchemaProps: spec.SchemaProps{
Description: "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN",
Type: []string{"boolean"},
Format: "",
},
},
"procMount": {
SchemaProps: spec.SchemaProps{
Description: "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Capabilities", "k8s.io/api/core/v1.SELinuxOptions", "k8s.io/api/core/v1.WindowsSecurityContextOptions"},
}
}
func schema_k8sio_api_core_v1_SerializedReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SerializedReference is a reference to serialized object.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"reference": {
SchemaProps: spec.SchemaProps{
Description: "The reference to an object in the system.",
Ref: ref("k8s.io/api/core/v1.ObjectReference"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ObjectReference"},
}
}
func schema_k8sio_api_core_v1_Service(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.ServiceSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Ref: ref("k8s.io/api/core/v1.ServiceStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ServiceSpec", "k8s.io/api/core/v1.ServiceStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_ServiceAccount(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"secrets": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "name",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ObjectReference"),
},
},
},
},
},
"imagePullSecrets": {
SchemaProps: spec.SchemaProps{
Description: "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
},
},
},
"automountServiceAccountToken": {
SchemaProps: spec.SchemaProps{
Description: "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_core_v1_ServiceAccountList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ServiceAccountList is a list of ServiceAccount objects",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ServiceAccount"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ServiceAccount", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_ServiceAccountTokenProjection(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"audience": {
SchemaProps: spec.SchemaProps{
Description: "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.",
Type: []string{"string"},
Format: "",
},
},
"expirationSeconds": {
SchemaProps: spec.SchemaProps{
Description: "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.",
Type: []string{"integer"},
Format: "int64",
},
},
"path": {
SchemaProps: spec.SchemaProps{
Description: "Path is the path relative to the mount point of the file to project the token into.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"path"},
},
},
}
}
func schema_k8sio_api_core_v1_ServiceList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ServiceList holds a list of services.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of services",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.Service"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.Service", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_core_v1_ServicePort(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ServicePort contains information on service's port.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.",
Type: []string{"string"},
Format: "",
},
},
"protocol": {
SchemaProps: spec.SchemaProps{
Description: "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.",
Type: []string{"string"},
Format: "",
},
},
"port": {
SchemaProps: spec.SchemaProps{
Description: "The port that will be exposed by this service.",
Type: []string{"integer"},
Format: "int32",
},
},
"targetPort": {
SchemaProps: spec.SchemaProps{
Description: "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service",
Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"),
},
},
"nodePort": {
SchemaProps: spec.SchemaProps{
Description: "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"port"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/util/intstr.IntOrString"},
}
}
func schema_k8sio_api_core_v1_ServiceProxyOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ServiceProxyOptions is the query options to a Service's proxy call.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"path": {
SchemaProps: spec.SchemaProps{
Description: "Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_ServiceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ServiceSpec describes the attributes that a user creates on a service.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"ports": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-map-keys": []interface{}{
"port",
"protocol",
},
"x-kubernetes-list-type": "map",
"x-kubernetes-patch-merge-key": "port",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.ServicePort"),
},
},
},
},
},
"selector": {
SchemaProps: spec.SchemaProps{
Description: "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"clusterIP": {
SchemaProps: spec.SchemaProps{
Description: "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types",
Type: []string{"string"},
Format: "",
},
},
"externalIPs": {
SchemaProps: spec.SchemaProps{
Description: "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"sessionAffinity": {
SchemaProps: spec.SchemaProps{
Description: "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
Type: []string{"string"},
Format: "",
},
},
"loadBalancerIP": {
SchemaProps: spec.SchemaProps{
Description: "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.",
Type: []string{"string"},
Format: "",
},
},
"loadBalancerSourceRanges": {
SchemaProps: spec.SchemaProps{
Description: "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"externalName": {
SchemaProps: spec.SchemaProps{
Description: "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.",
Type: []string{"string"},
Format: "",
},
},
"externalTrafficPolicy": {
SchemaProps: spec.SchemaProps{
Description: "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.",
Type: []string{"string"},
Format: "",
},
},
"healthCheckNodePort": {
SchemaProps: spec.SchemaProps{
Description: "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.",
Type: []string{"integer"},
Format: "int32",
},
},
"publishNotReadyAddresses": {
SchemaProps: spec.SchemaProps{
Description: "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.",
Type: []string{"boolean"},
Format: "",
},
},
"sessionAffinityConfig": {
SchemaProps: spec.SchemaProps{
Description: "sessionAffinityConfig contains the configurations of session affinity.",
Ref: ref("k8s.io/api/core/v1.SessionAffinityConfig"),
},
},
"ipFamily": {
SchemaProps: spec.SchemaProps{
Description: "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ServicePort", "k8s.io/api/core/v1.SessionAffinityConfig"},
}
}
func schema_k8sio_api_core_v1_ServiceStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ServiceStatus represents the current status of a service.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"loadBalancer": {
SchemaProps: spec.SchemaProps{
Description: "LoadBalancer contains the current status of the load-balancer, if one is present.",
Ref: ref("k8s.io/api/core/v1.LoadBalancerStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LoadBalancerStatus"},
}
}
func schema_k8sio_api_core_v1_SessionAffinityConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SessionAffinityConfig represents the configurations of session affinity.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"clientIP": {
SchemaProps: spec.SchemaProps{
Description: "clientIP contains the configurations of Client IP based session affinity.",
Ref: ref("k8s.io/api/core/v1.ClientIPConfig"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ClientIPConfig"},
}
}
func schema_k8sio_api_core_v1_StorageOSPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a StorageOS persistent volume resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"volumeName": {
SchemaProps: spec.SchemaProps{
Description: "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.",
Type: []string{"string"},
Format: "",
},
},
"volumeNamespace": {
SchemaProps: spec.SchemaProps{
Description: "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
Type: []string{"boolean"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.",
Ref: ref("k8s.io/api/core/v1.ObjectReference"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ObjectReference"},
}
}
func schema_k8sio_api_core_v1_StorageOSVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a StorageOS persistent volume resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"volumeName": {
SchemaProps: spec.SchemaProps{
Description: "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.",
Type: []string{"string"},
Format: "",
},
},
"volumeNamespace": {
SchemaProps: spec.SchemaProps{
Description: "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
Type: []string{"boolean"},
Format: "",
},
},
"secretRef": {
SchemaProps: spec.SchemaProps{
Description: "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.",
Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.LocalObjectReference"},
}
}
func schema_k8sio_api_core_v1_Sysctl(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Sysctl defines a kernel parameter to be set",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of a property to set",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value of a property to set",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "value"},
},
},
}
}
func schema_k8sio_api_core_v1_TCPSocketAction(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TCPSocketAction describes an action based on opening a socket",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"port": {
SchemaProps: spec.SchemaProps{
Description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.",
Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"),
},
},
"host": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Host name to connect to, defaults to the pod IP.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"port"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/util/intstr.IntOrString"},
}
}
func schema_k8sio_api_core_v1_Taint(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"key": {
SchemaProps: spec.SchemaProps{
Description: "Required. The taint key to be applied to a node.",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "Required. The taint value corresponding to the taint key.",
Type: []string{"string"},
Format: "",
},
},
"effect": {
SchemaProps: spec.SchemaProps{
Description: "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.",
Type: []string{"string"},
Format: "",
},
},
"timeAdded": {
SchemaProps: spec.SchemaProps{
Description: "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
},
Required: []string{"key", "effect"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_core_v1_Toleration(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"key": {
SchemaProps: spec.SchemaProps{
Description: "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.",
Type: []string{"string"},
Format: "",
},
},
"operator": {
SchemaProps: spec.SchemaProps{
Description: "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.",
Type: []string{"string"},
Format: "",
},
},
"effect": {
SchemaProps: spec.SchemaProps{
Description: "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.",
Type: []string{"string"},
Format: "",
},
},
"tolerationSeconds": {
SchemaProps: spec.SchemaProps{
Description: "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.",
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
}
}
func schema_k8sio_api_core_v1_TopologySelectorLabelRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"key": {
SchemaProps: spec.SchemaProps{
Description: "The label key that the selector applies to.",
Type: []string{"string"},
Format: "",
},
},
"values": {
SchemaProps: spec.SchemaProps{
Description: "An array of string values. One value must match the label to be selected. Each entry in Values is ORed.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"key", "values"},
},
},
}
}
func schema_k8sio_api_core_v1_TopologySelectorTerm(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"matchLabelExpressions": {
SchemaProps: spec.SchemaProps{
Description: "A list of topology selector requirements by labels.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/api/core/v1.TopologySelectorLabelRequirement"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.TopologySelectorLabelRequirement"},
}
}
func schema_k8sio_api_core_v1_TopologySpreadConstraint(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TopologySpreadConstraint specifies how to spread matching pods among the given topology.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"maxSkew": {
SchemaProps: spec.SchemaProps{
Description: "MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It's a required field. Default value is 1 and 0 is not allowed.",
Type: []string{"integer"},
Format: "int32",
},
},
"topologyKey": {
SchemaProps: spec.SchemaProps{
Description: "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.",
Type: []string{"string"},
Format: "",
},
},
"whenUnsatisfiable": {
SchemaProps: spec.SchemaProps{
Description: "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as \"Unsatisfiable\" if and only if placing incoming pod on any topology violates \"MaxSkew\". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.",
Type: []string{"string"},
Format: "",
},
},
"labelSelector": {
SchemaProps: spec.SchemaProps{
Description: "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
},
Required: []string{"maxSkew", "topologyKey", "whenUnsatisfiable"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_core_v1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiGroup": {
SchemaProps: spec.SchemaProps{
Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is the type of resource being referenced",
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name is the name of resource being referenced",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"kind", "name"},
},
},
}
}
func schema_k8sio_api_core_v1_Volume(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Volume represents a named volume in a pod that may be accessed by any container in the pod.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
Type: []string{"string"},
Format: "",
},
},
"hostPath": {
SchemaProps: spec.SchemaProps{
Description: "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"),
},
},
"emptyDir": {
SchemaProps: spec.SchemaProps{
Description: "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"),
},
},
"gcePersistentDisk": {
SchemaProps: spec.SchemaProps{
Description: "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"),
},
},
"awsElasticBlockStore": {
SchemaProps: spec.SchemaProps{
Description: "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"),
},
},
"gitRepo": {
SchemaProps: spec.SchemaProps{
Description: "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
Ref: ref("k8s.io/api/core/v1.GitRepoVolumeSource"),
},
},
"secret": {
SchemaProps: spec.SchemaProps{
Description: "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"),
},
},
"nfs": {
SchemaProps: spec.SchemaProps{
Description: "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"),
},
},
"iscsi": {
SchemaProps: spec.SchemaProps{
Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"),
},
},
"glusterfs": {
SchemaProps: spec.SchemaProps{
Description: "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"),
},
},
"persistentVolumeClaim": {
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"),
},
},
"rbd": {
SchemaProps: spec.SchemaProps{
Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md",
Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"),
},
},
"flexVolume": {
SchemaProps: spec.SchemaProps{
Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"),
},
},
"cinder": {
SchemaProps: spec.SchemaProps{
Description: "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"),
},
},
"cephfs": {
SchemaProps: spec.SchemaProps{
Description: "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"),
},
},
"flocker": {
SchemaProps: spec.SchemaProps{
Description: "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"),
},
},
"downwardAPI": {
SchemaProps: spec.SchemaProps{
Description: "DownwardAPI represents downward API about the pod that should populate this volume",
Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"),
},
},
"fc": {
SchemaProps: spec.SchemaProps{
Description: "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
Ref: ref("k8s.io/api/core/v1.FCVolumeSource"),
},
},
"azureFile": {
SchemaProps: spec.SchemaProps{
Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"),
},
},
"configMap": {
SchemaProps: spec.SchemaProps{
Description: "ConfigMap represents a configMap that should populate this volume",
Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"),
},
},
"vsphereVolume": {
SchemaProps: spec.SchemaProps{
Description: "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"),
},
},
"quobyte": {
SchemaProps: spec.SchemaProps{
Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"),
},
},
"azureDisk": {
SchemaProps: spec.SchemaProps{
Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"),
},
},
"photonPersistentDisk": {
SchemaProps: spec.SchemaProps{
Description: "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"),
},
},
"projected": {
SchemaProps: spec.SchemaProps{
Description: "Items for all in one resources secrets, configmaps, and downward API",
Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"),
},
},
"portworxVolume": {
SchemaProps: spec.SchemaProps{
Description: "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"),
},
},
"scaleIO": {
SchemaProps: spec.SchemaProps{
Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"),
},
},
"storageos": {
SchemaProps: spec.SchemaProps{
Description: "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"),
},
},
"csi": {
SchemaProps: spec.SchemaProps{
Description: "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).",
Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"),
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GitRepoVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"},
}
}
func schema_k8sio_api_core_v1_VolumeDevice(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "volumeDevice describes a mapping of a raw block device within a container.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name must match the name of a persistentVolumeClaim in the pod",
Type: []string{"string"},
Format: "",
},
},
"devicePath": {
SchemaProps: spec.SchemaProps{
Description: "devicePath is the path inside of the container that the device will be mapped to.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "devicePath"},
},
},
}
}
func schema_k8sio_api_core_v1_VolumeMount(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "VolumeMount describes a mounting of a Volume within a container.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "This must match the Name of a Volume.",
Type: []string{"string"},
Format: "",
},
},
"readOnly": {
SchemaProps: spec.SchemaProps{
Description: "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.",
Type: []string{"boolean"},
Format: "",
},
},
"mountPath": {
SchemaProps: spec.SchemaProps{
Description: "Path within the container at which the volume should be mounted. Must not contain ':'.",
Type: []string{"string"},
Format: "",
},
},
"subPath": {
SchemaProps: spec.SchemaProps{
Description: "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).",
Type: []string{"string"},
Format: "",
},
},
"mountPropagation": {
SchemaProps: spec.SchemaProps{
Description: "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.",
Type: []string{"string"},
Format: "",
},
},
"subPathExpr": {
SchemaProps: spec.SchemaProps{
Description: "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "mountPath"},
},
},
}
}
func schema_k8sio_api_core_v1_VolumeNodeAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"required": {
SchemaProps: spec.SchemaProps{
Description: "Required specifies hard node constraints that must be met.",
Ref: ref("k8s.io/api/core/v1.NodeSelector"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.NodeSelector"},
}
}
func schema_k8sio_api_core_v1_VolumeProjection(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Projection that may be projected along with other supported volume types",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"secret": {
SchemaProps: spec.SchemaProps{
Description: "information about the secret data to project",
Ref: ref("k8s.io/api/core/v1.SecretProjection"),
},
},
"downwardAPI": {
SchemaProps: spec.SchemaProps{
Description: "information about the downwardAPI data to project",
Ref: ref("k8s.io/api/core/v1.DownwardAPIProjection"),
},
},
"configMap": {
SchemaProps: spec.SchemaProps{
Description: "information about the configMap data to project",
Ref: ref("k8s.io/api/core/v1.ConfigMapProjection"),
},
},
"serviceAccountToken": {
SchemaProps: spec.SchemaProps{
Description: "information about the serviceAccountToken data to project",
Ref: ref("k8s.io/api/core/v1.ServiceAccountTokenProjection"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.ConfigMapProjection", "k8s.io/api/core/v1.DownwardAPIProjection", "k8s.io/api/core/v1.SecretProjection", "k8s.io/api/core/v1.ServiceAccountTokenProjection"},
}
}
func schema_k8sio_api_core_v1_VolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents the source of a volume to mount. Only one of its members may be specified.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"hostPath": {
SchemaProps: spec.SchemaProps{
Description: "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"),
},
},
"emptyDir": {
SchemaProps: spec.SchemaProps{
Description: "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"),
},
},
"gcePersistentDisk": {
SchemaProps: spec.SchemaProps{
Description: "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"),
},
},
"awsElasticBlockStore": {
SchemaProps: spec.SchemaProps{
Description: "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"),
},
},
"gitRepo": {
SchemaProps: spec.SchemaProps{
Description: "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
Ref: ref("k8s.io/api/core/v1.GitRepoVolumeSource"),
},
},
"secret": {
SchemaProps: spec.SchemaProps{
Description: "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"),
},
},
"nfs": {
SchemaProps: spec.SchemaProps{
Description: "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"),
},
},
"iscsi": {
SchemaProps: spec.SchemaProps{
Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"),
},
},
"glusterfs": {
SchemaProps: spec.SchemaProps{
Description: "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
|
"persistentVolumeClaim": {
SchemaProps: spec.SchemaProps{
Description: "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"),
},
},
"rbd": {
SchemaProps: spec.SchemaProps{
Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md",
Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"),
},
},
"flexVolume": {
SchemaProps: spec.SchemaProps{
Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"),
},
},
"cinder": {
SchemaProps: spec.SchemaProps{
Description: "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"),
},
},
"cephfs": {
SchemaProps: spec.SchemaProps{
Description: "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"),
},
},
"flocker": {
SchemaProps: spec.SchemaProps{
Description: "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"),
},
},
"downwardAPI": {
SchemaProps: spec.SchemaProps{
Description: "DownwardAPI represents downward API about the pod that should populate this volume",
Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"),
},
},
"fc": {
SchemaProps: spec.SchemaProps{
Description: "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
Ref: ref("k8s.io/api/core/v1.FCVolumeSource"),
},
},
"azureFile": {
SchemaProps: spec.SchemaProps{
Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"),
},
},
"configMap": {
SchemaProps: spec.SchemaProps{
Description: "ConfigMap represents a configMap that should populate this volume",
Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"),
},
},
"vsphereVolume": {
SchemaProps: spec.SchemaProps{
Description: "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"),
},
},
"quobyte": {
SchemaProps: spec.SchemaProps{
Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"),
},
},
"azureDisk": {
SchemaProps: spec.SchemaProps{
Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"),
},
},
"photonPersistentDisk": {
SchemaProps: spec.SchemaProps{
Description: "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"),
},
},
"projected": {
SchemaProps: spec.SchemaProps{
Description: "Items for all in one resources secrets, configmaps, and downward API",
Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"),
},
},
"portworxVolume": {
SchemaProps: spec.SchemaProps{
Description: "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"),
},
},
"scaleIO": {
SchemaProps: spec.SchemaProps{
Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"),
},
},
"storageos": {
SchemaProps: spec.SchemaProps{
Description: "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"),
},
},
"csi": {
SchemaProps: spec.SchemaProps{
Description: "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).",
Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GitRepoVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"},
}
}
func schema_k8sio_api_core_v1_VsphereVirtualDiskVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Represents a vSphere volume resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"volumePath": {
SchemaProps: spec.SchemaProps{
Description: "Path that identifies vSphere volume vmdk",
Type: []string{"string"},
Format: "",
},
},
"fsType": {
SchemaProps: spec.SchemaProps{
Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
Type: []string{"string"},
Format: "",
},
},
"storagePolicyName": {
SchemaProps: spec.SchemaProps{
Description: "Storage Policy Based Management (SPBM) profile name.",
Type: []string{"string"},
Format: "",
},
},
"storagePolicyID": {
SchemaProps: spec.SchemaProps{
Description: "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"volumePath"},
},
},
}
}
func schema_k8sio_api_core_v1_WeightedPodAffinityTerm(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"weight": {
SchemaProps: spec.SchemaProps{
Description: "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.",
Type: []string{"integer"},
Format: "int32",
},
},
"podAffinityTerm": {
SchemaProps: spec.SchemaProps{
Description: "Required. A pod affinity term, associated with the corresponding weight.",
Ref: ref("k8s.io/api/core/v1.PodAffinityTerm"),
},
},
},
Required: []string{"weight", "podAffinityTerm"},
},
},
Dependencies: []string{
"k8s.io/api/core/v1.PodAffinityTerm"},
}
}
func schema_k8sio_api_core_v1_WindowsSecurityContextOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "WindowsSecurityContextOptions contain Windows-specific options and credentials.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"gmsaCredentialSpecName": {
SchemaProps: spec.SchemaProps{
Description: "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
Type: []string{"string"},
Format: "",
},
},
"gmsaCredentialSpec": {
SchemaProps: spec.SchemaProps{
Description: "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
Type: []string{"string"},
Format: "",
},
},
"runAsUserName": {
SchemaProps: spec.SchemaProps{
Description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_APIGroup(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIGroup contains the name, the supported versions, and the preferred version of a group.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the name of the group.",
Type: []string{"string"},
Format: "",
},
},
"versions": {
SchemaProps: spec.SchemaProps{
Description: "versions are the versions supported in this group.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery"),
},
},
},
},
},
"preferredVersion": {
SchemaProps: spec.SchemaProps{
Description: "preferredVersion is the version preferred by the API server, which probably is the storage version.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery"),
},
},
"serverAddressByClientCIDRs": {
SchemaProps: spec.SchemaProps{
Description: "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"),
},
},
},
},
},
},
Required: []string{"name", "versions"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery", "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"},
}
}
func schema_pkg_apis_meta_v1_APIGroupList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"groups": {
SchemaProps: spec.SchemaProps{
Description: "groups is a list of APIGroup.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup"),
},
},
},
},
},
},
Required: []string{"groups"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup"},
}
}
func schema_pkg_apis_meta_v1_APIResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIResource specifies the name of a resource and whether it is namespaced.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the plural name of the resource.",
Type: []string{"string"},
Format: "",
},
},
"singularName": {
SchemaProps: spec.SchemaProps{
Description: "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.",
Type: []string{"string"},
Format: "",
},
},
"namespaced": {
SchemaProps: spec.SchemaProps{
Description: "namespaced indicates if a resource is namespaced or not.",
Type: []string{"boolean"},
Format: "",
},
},
"group": {
SchemaProps: spec.SchemaProps{
Description: "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".",
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Description: "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')",
Type: []string{"string"},
Format: "",
},
},
"verbs": {
SchemaProps: spec.SchemaProps{
Description: "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"shortNames": {
SchemaProps: spec.SchemaProps{
Description: "shortNames is a list of suggested short names of the resource.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"categories": {
SchemaProps: spec.SchemaProps{
Description: "categories is a list of the grouped resources this resource belongs to (e.g. 'all')",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"storageVersionHash": {
SchemaProps: spec.SchemaProps{
Description: "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "singularName", "namespaced", "kind", "verbs"},
},
},
}
}
func schema_pkg_apis_meta_v1_APIResourceList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"groupVersion": {
SchemaProps: spec.SchemaProps{
Description: "groupVersion is the group and version this APIResourceList is for.",
Type: []string{"string"},
Format: "",
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "resources contains the name of the resources and if they are namespaced.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.APIResource"),
},
},
},
},
},
},
Required: []string{"groupVersion", "resources"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.APIResource"},
}
}
func schema_pkg_apis_meta_v1_APIVersions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"versions": {
SchemaProps: spec.SchemaProps{
Description: "versions are the api versions that are available.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"serverAddressByClientCIDRs": {
SchemaProps: spec.SchemaProps{
Description: "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"),
},
},
},
},
},
},
Required: []string{"versions", "serverAddressByClientCIDRs"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"},
}
}
func schema_pkg_apis_meta_v1_CreateOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CreateOptions may be provided when creating an API object.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"fieldManager": {
SchemaProps: spec.SchemaProps{
Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_DeleteOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DeleteOptions may be provided when deleting an API object.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"gracePeriodSeconds": {
SchemaProps: spec.SchemaProps{
Description: "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
Type: []string{"integer"},
Format: "int64",
},
},
"preconditions": {
SchemaProps: spec.SchemaProps{
Description: "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions"),
},
},
"orphanDependents": {
SchemaProps: spec.SchemaProps{
Description: "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
Type: []string{"boolean"},
Format: "",
},
},
"propagationPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions"},
}
}
func schema_pkg_apis_meta_v1_Duration(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Duration is a wrapper around time.Duration which supports correct marshaling to YAML and JSON. In particular, it marshals into strings, which can be used as map keys in json.",
Type: v1.Duration{}.OpenAPISchemaType(),
Format: v1.Duration{}.OpenAPISchemaFormat(),
},
},
}
}
func schema_pkg_apis_meta_v1_ExportOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ExportOptions is the query options to the standard REST get call. Deprecated. Planned for removal in 1.18.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"export": {
SchemaProps: spec.SchemaProps{
Description: "Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.",
Type: []string{"boolean"},
Format: "",
},
},
"exact": {
SchemaProps: spec.SchemaProps{
Description: "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"export", "exact"},
},
},
}
}
func schema_pkg_apis_meta_v1_FieldsV1(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:<name>', where <name> is the name of a field in a struct, or key in a map 'v:<value>', where <value> is the exact json formatted value of a list item 'i:<index>', where <index> is position of a item in a list 'k:<keys>', where <keys> is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff",
Type: []string{"object"},
},
},
}
}
func schema_pkg_apis_meta_v1_GetOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GetOptions is the standard query options to the standard REST get call.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupKind(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying concepts during lookup stages without having partially valid types",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "kind"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying concepts during lookup stages without having partially valid types",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"resource": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "resource"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupVersion(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupVersion contains the \"group\" and the \"version\", which uniquely identifies the API.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "version"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"groupVersion": {
SchemaProps: spec.SchemaProps{
Description: "groupVersion specifies the API group and version in the form \"group/version\"",
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Description: "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"groupVersion", "version"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupVersionKind(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "version", "kind"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupVersionResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"resource": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "version", "resource"},
},
},
}
}
func schema_pkg_apis_meta_v1_InternalEvent(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "InternalEvent makes watch.Event versioned",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"Type": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"Object": {
SchemaProps: spec.SchemaProps{
Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Bookmark: the object (instance of a type being watched) where\n only ResourceVersion field is set. On successful restart of watch from a\n bookmark resourceVersion, client is guaranteed to not get repeat event\n nor miss any events.\n * If Type is Error: *api.Status is recommended; other types may make sense\n depending on context.",
Ref: ref("k8s.io/apimachinery/pkg/runtime.Object"),
},
},
},
Required: []string{"Type", "Object"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/runtime.Object"},
}
}
func schema_pkg_apis_meta_v1_LabelSelector(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"matchLabels": {
SchemaProps: spec.SchemaProps{
Description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"matchExpressions": {
SchemaProps: spec.SchemaProps{
Description: "matchExpressions is a list of label selector requirements. The requirements are ANDed.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement"},
}
}
func schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"key": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "key",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "key is the label key that the selector applies to.",
Type: []string{"string"},
Format: "",
},
},
"operator": {
SchemaProps: spec.SchemaProps{
Description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.",
Type: []string{"string"},
Format: "",
},
},
"values": {
SchemaProps: spec.SchemaProps{
Description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"key", "operator"},
},
},
}
}
func schema_pkg_apis_meta_v1_List(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "List holds a list of objects, which may not be known by the server.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of objects",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
func schema_pkg_apis_meta_v1_ListMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"selfLink": {
SchemaProps: spec.SchemaProps{
Description: "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
Type: []string{"string"},
Format: "",
},
},
"continue": {
SchemaProps: spec.SchemaProps{
Description: "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.",
Type: []string{"string"},
Format: "",
},
},
"remainingItemCount": {
SchemaProps: spec.SchemaProps{
Description: "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.",
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_ListOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ListOptions is the query options to a standard REST list call.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"labelSelector": {
SchemaProps: spec.SchemaProps{
Description: "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
Type: []string{"string"},
Format: "",
},
},
"fieldSelector": {
SchemaProps: spec.SchemaProps{
Description: "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
Type: []string{"string"},
Format: "",
},
},
"watch": {
SchemaProps: spec.SchemaProps{
Description: "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
Type: []string{"boolean"},
Format: "",
},
},
"allowWatchBookmarks": {
SchemaProps: spec.SchemaProps{
Description: "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.",
Type: []string{"boolean"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
Type: []string{"string"},
Format: "",
},
},
"timeoutSeconds": {
SchemaProps: spec.SchemaProps{
Description: "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
Type: []string{"integer"},
Format: "int64",
},
},
"limit": {
SchemaProps: spec.SchemaProps{
Description: "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
Type: []string{"integer"},
Format: "int64",
},
},
"continue": {
SchemaProps: spec.SchemaProps{
Description: "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_ManagedFieldsEntry(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"manager": {
SchemaProps: spec.SchemaProps{
Description: "Manager is an identifier of the workflow managing these fields.",
Type: []string{"string"},
Format: "",
},
},
"operation": {
SchemaProps: spec.SchemaProps{
Description: "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.",
Type: []string{"string"},
Format: "",
},
},
"time": {
SchemaProps: spec.SchemaProps{
Description: "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"fieldsType": {
SchemaProps: spec.SchemaProps{
Description: "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"",
Type: []string{"string"},
Format: "",
},
},
"fieldsV1": {
SchemaProps: spec.SchemaProps{
Description: "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_meta_v1_MicroTime(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "MicroTime is version of Time with microsecond level precision.",
Type: v1.MicroTime{}.OpenAPISchemaType(),
Format: v1.MicroTime{}.OpenAPISchemaFormat(),
},
},
}
}
func schema_pkg_apis_meta_v1_ObjectMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Type: []string{"string"},
Format: "",
},
},
"generateName": {
SchemaProps: spec.SchemaProps{
Description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency",
Type: []string{"string"},
Format: "",
},
},
"namespace": {
SchemaProps: spec.SchemaProps{
Description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
Type: []string{"string"},
Format: "",
},
},
"selfLink": {
SchemaProps: spec.SchemaProps{
Description: "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.",
Type: []string{"string"},
Format: "",
},
},
"uid": {
SchemaProps: spec.SchemaProps{
Description: "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
Type: []string{"string"},
Format: "",
},
},
"generation": {
SchemaProps: spec.SchemaProps{
Description: "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.",
Type: []string{"integer"},
Format: "int64",
},
},
"creationTimestamp": {
SchemaProps: spec.SchemaProps{
Description: "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"deletionTimestamp": {
SchemaProps: spec.SchemaProps{
Description: "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"deletionGracePeriodSeconds": {
SchemaProps: spec.SchemaProps{
Description: "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.",
Type: []string{"integer"},
Format: "int64",
},
},
"labels": {
SchemaProps: spec.SchemaProps{
Description: "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"ownerReferences": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "uid",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"),
},
},
},
},
},
"finalizers": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"clusterName": {
SchemaProps: spec.SchemaProps{
Description: "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.",
Type: []string{"string"},
Format: "",
},
},
"managedFields": {
SchemaProps: spec.SchemaProps{
Description: "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry", "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_meta_v1_OwnerReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "API version of the referent.",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Type: []string{"string"},
Format: "",
},
},
"uid": {
SchemaProps: spec.SchemaProps{
Description: "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
Type: []string{"string"},
Format: "",
},
},
"controller": {
SchemaProps: spec.SchemaProps{
Description: "If true, this reference points to the managing controller.",
Type: []string{"boolean"},
Format: "",
},
},
"blockOwnerDeletion": {
SchemaProps: spec.SchemaProps{
Description: "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"apiVersion", "kind", "name", "uid"},
},
},
}
}
func schema_pkg_apis_meta_v1_PartialObjectMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_meta_v1_PartialObjectMetadataList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PartialObjectMetadataList contains a list of objects containing only their metadata",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "items contains each of the included items.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata"},
}
}
func schema_pkg_apis_meta_v1_Patch(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.",
Type: []string{"object"},
},
},
}
}
func schema_pkg_apis_meta_v1_PatchOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"force": {
SchemaProps: spec.SchemaProps{
Description: "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
Type: []string{"boolean"},
Format: "",
},
},
"fieldManager": {
SchemaProps: spec.SchemaProps{
Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_Preconditions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"uid": {
SchemaProps: spec.SchemaProps{
Description: "Specifies the target UID.",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "Specifies the target ResourceVersion",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_RootPaths(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"paths": {
SchemaProps: spec.SchemaProps{
Description: "paths are the paths available at root.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"paths"},
},
},
}
}
func schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"clientCIDR": {
SchemaProps: spec.SchemaProps{
Description: "The CIDR with which clients can match their IP to figure out the server address that they should use.",
Type: []string{"string"},
Format: "",
},
},
"serverAddress": {
SchemaProps: spec.SchemaProps{
Description: "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"clientCIDR", "serverAddress"},
},
},
}
}
func schema_pkg_apis_meta_v1_Status(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Status is a return value for calls that don't return other objects.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "A human-readable description of the status of this operation.",
Type: []string{"string"},
Format: "",
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.",
Type: []string{"string"},
Format: "",
},
},
"details": {
SchemaProps: spec.SchemaProps{
Description: "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails"),
},
},
"code": {
SchemaProps: spec.SchemaProps{
Description: "Suggested HTTP return code for this status, 0 if not set.",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails"},
}
}
func schema_pkg_apis_meta_v1_StatusCause(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"reason": {
SchemaProps: spec.SchemaProps{
Description: "A machine-readable description of the cause of the error. If this value is empty there is no information available.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "A human-readable description of the cause of the error. This field may be presented as-is to a reader.",
Type: []string{"string"},
Format: "",
},
},
"field": {
SchemaProps: spec.SchemaProps{
Description: "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_StatusDetails(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).",
Type: []string{"string"},
Format: "",
},
},
"group": {
SchemaProps: spec.SchemaProps{
Description: "The group attribute of the resource associated with the status StatusReason.",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"uid": {
SchemaProps: spec.SchemaProps{
Description: "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
Type: []string{"string"},
Format: "",
},
},
"causes": {
SchemaProps: spec.SchemaProps{
Description: "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause"),
},
},
},
},
},
"retryAfterSeconds": {
SchemaProps: spec.SchemaProps{
Description: "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause"},
}
}
func schema_pkg_apis_meta_v1_Table(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"columnDefinitions": {
SchemaProps: spec.SchemaProps{
Description: "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition"),
},
},
},
},
},
"rows": {
SchemaProps: spec.SchemaProps{
Description: "rows is the list of items in the table.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.TableRow"),
},
},
},
},
},
},
Required: []string{"columnDefinitions", "rows"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition", "k8s.io/apimachinery/pkg/apis/meta/v1.TableRow"},
}
}
func schema_pkg_apis_meta_v1_TableColumnDefinition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TableColumnDefinition contains information about a column returned in the Table.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is a human readable name for the column.",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "type is an OpenAPI type definition for this column, such as number, integer, string, or array. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.",
Type: []string{"string"},
Format: "",
},
},
"format": {
SchemaProps: spec.SchemaProps{
Description: "format is an optional OpenAPI type modifier for this column. A format modifies the type and imposes additional rules, like date or time formatting for a string. The 'name' format is applied to the primary identifier column which has type 'string' to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "description is a human readable description of this column.",
Type: []string{"string"},
Format: "",
},
},
"priority": {
SchemaProps: spec.SchemaProps{
Description: "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"name", "type", "format", "description", "priority"},
},
},
}
}
func schema_pkg_apis_meta_v1_TableOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TableOptions are used when a Table is requested by the caller.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"includeObject": {
SchemaProps: spec.SchemaProps{
Description: "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_TableRow(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TableRow is an individual row in a table.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"cells": {
SchemaProps: spec.SchemaProps{
Description: "cells will be as wide as the column definitions array and may contain strings, numbers (float64 or int64), booleans, simple maps, lists, or null. See the type field of the column definition for a more detailed description.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
},
},
},
"conditions": {
SchemaProps: spec.SchemaProps{
Description: "conditions describe additional status of a row that are relevant for a human user. These conditions apply to the row, not to the object, and will be specific to table output. The only defined condition type is 'Completed', for a row that indicates a resource that has run to completion and can be given less visual priority.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition"),
},
},
},
},
},
"object": {
SchemaProps: spec.SchemaProps{
Description: "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.",
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
},
Required: []string{"cells"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition", "k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
func schema_pkg_apis_meta_v1_TableRowCondition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TableRowCondition allows a row to be marked with additional information.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type of row condition. The only defined value is 'Completed' indicating that the object this row represents has reached a completed state and may be given less visual priority than other rows. Clients are not required to honor any conditions but should be consistent where possible about handling the conditions.",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status of the condition, one of True, False, Unknown.",
Type: []string{"string"},
Format: "",
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "(brief) machine readable reason for the condition's last transition.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "Human readable message indicating details about last transition.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status"},
},
},
}
}
func schema_pkg_apis_meta_v1_Time(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.",
Type: v1.Time{}.OpenAPISchemaType(),
Format: v1.Time{}.OpenAPISchemaFormat(),
},
},
}
}
func schema_pkg_apis_meta_v1_Timestamp(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Timestamp is a struct that is equivalent to Time, but intended for protobuf marshalling/unmarshalling. It is generated into a serialization that matches Time. Do not use in Go structs.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"seconds": {
SchemaProps: spec.SchemaProps{
Description: "Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.",
Type: []string{"integer"},
Format: "int64",
},
},
"nanos": {
SchemaProps: spec.SchemaProps{
Description: "Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive. This field may be limited in precision depending on context.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"seconds", "nanos"},
},
},
}
}
func schema_pkg_apis_meta_v1_TypeMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_UpdateOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
"fieldManager": {
SchemaProps: spec.SchemaProps{
Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_WatchEvent(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Event represents a single event to a watched resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"object": {
SchemaProps: spec.SchemaProps{
Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.",
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
},
Required: []string{"type", "object"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
|
Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"),
},
},
|
container.go
|
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package container contains a Google Container Engine client.
//
// For more information about the API,
// see https://cloud.google.com/container-engine/docs
package container // import "camlistore.org/third_party/google.golang.org/cloud/container"
import (
"errors"
"net/http"
"time"
"camlistore.org/third_party/golang.org/x/net/context"
raw "camlistore.org/third_party/google.golang.org/api/container/v1beta1"
"camlistore.org/third_party/google.golang.org/cloud/internal"
)
type Type string
var (
TypeCreate Type = Type("createCluster")
TypeDelete Type = Type("deleteCluster")
)
type Status string
var (
Done = Status("done")
Pending = Status("pending")
Running = Status("running")
Error = Status("error")
Provisioning = Status("provisioning")
Stopping = Status("stopping")
)
// Resource is a Google Container Engine cluster resource.
type Resource struct {
// Name is the name of this cluster. The name must be unique
// within this project and zone, and can be up to 40 characters.
Name string
// Description is the description of the cluster. Optional.
Description string
// Zone is the Google Compute Engine zone in which the cluster resides.
Zone string
// Status is the current status of the cluster. It could either be
// StatusError, StatusProvisioning, StatusRunning or StatusStopping.
Status Status
// Num is the number of the nodes in this cluster resource.
Num int64
// APIVersion is the version of the Kubernetes master and kubelets running
// in this cluster. Allowed value is 0.4.2, or leave blank to
// pick up the latest stable release.
APIVersion string
// Endpoint is the IP address of this cluster's Kubernetes master.
// The endpoint can be accessed at https://username:password@endpoint/.
// See Username and Password fields for the username and password information.
Endpoint string
// Username is the username to use when accessing the Kubernetes master endpoint.
Username string
// Password is the password to use when accessing the Kubernetes master endpoint.
Password string
// ContainerIPv4CIDR is the IP addresses of the container pods in
// this cluster, in CIDR notation (e.g. 1.2.3.4/29).
ContainerIPv4CIDR string
// ServicesIPv4CIDR is the IP addresses of the Kubernetes services in this
// cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are
// always in the 10.0.0.0/16 range.
ServicesIPv4CIDR string
// MachineType is a Google Compute Engine machine type (e.g. n1-standard-1).
// If none set, the default type is used while creating a new cluster.
MachineType string
// SourceImage is the fully-specified name of a Google Compute Engine image.
// For example: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/backports-debian-7-wheezy-vYYYYMMDD.
SourceImage string
// Created is the creation time of this cluster.
Created time.Time
}
func resourceFromRaw(c *raw.Cluster) *Resource {
if c == nil
|
r := &Resource{
Name: c.Name,
Description: c.Description,
Zone: c.Zone,
Status: Status(c.Status),
Num: c.NumNodes,
APIVersion: c.ClusterApiVersion,
Endpoint: c.Endpoint,
Username: c.MasterAuth.User,
Password: c.MasterAuth.Password,
ContainerIPv4CIDR: c.ContainerIpv4Cidr,
ServicesIPv4CIDR: c.ServicesIpv4Cidr,
MachineType: c.NodeConfig.MachineType,
SourceImage: c.NodeConfig.SourceImage,
}
r.Created, _ = time.Parse(time.RFC3339, c.CreationTimestamp)
return r
}
func resourcesFromRaw(c []*raw.Cluster) []*Resource {
r := make([]*Resource, len(c))
for i, val := range c {
r[i] = resourceFromRaw(val)
}
return r
}
// Op represents a Google Container Engine API operation.
type Op struct {
// Name is the name of the operation.
Name string
// Zone is the Google Compute Engine zone.
Zone string
// TargetURL is the URL of the cluster resource
// that this operation is associated with.
TargetURL string
// Type is the operation type. It could be either be TypeCreate or TypeDelete.
Type Type
// Status is the current status of this operation. It could be either
// OpDone or OpPending.
Status Status
}
func opFromRaw(o *raw.Operation) *Op {
if o == nil {
return nil
}
return &Op{
Name: o.Name,
Zone: o.Zone,
TargetURL: o.Target,
Type: Type(o.OperationType),
Status: Status(o.Status),
}
}
func opsFromRaw(o []*raw.Operation) []*Op {
ops := make([]*Op, len(o))
for i, val := range o {
ops[i] = opFromRaw(val)
}
return ops
}
// Clusters returns a list of cluster resources from the specified zone.
// If no zone is specified, it returns all clusters under the user project.
func Clusters(ctx context.Context, zone string) ([]*Resource, error) {
s := rawService(ctx)
if zone == "" {
resp, err := s.Projects.Clusters.List(internal.ProjID(ctx)).Do()
if err != nil {
return nil, err
}
return resourcesFromRaw(resp.Clusters), nil
}
resp, err := s.Projects.Zones.Clusters.List(internal.ProjID(ctx), zone).Do()
if err != nil {
return nil, err
}
return resourcesFromRaw(resp.Clusters), nil
}
// Cluster returns metadata about the specified cluster.
func Cluster(ctx context.Context, zone, name string) (*Resource, error) {
s := rawService(ctx)
resp, err := s.Projects.Zones.Clusters.Get(internal.ProjID(ctx), zone, name).Do()
if err != nil {
return nil, err
}
return resourceFromRaw(resp), nil
}
// CreateCluster creates a new cluster with the provided metadata
// in the specified zone.
func CreateCluster(ctx context.Context, zone string, resource *Resource) (*Resource, error) {
panic("not implemented")
}
// DeleteCluster deletes a cluster.
func DeleteCluster(ctx context.Context, zone, name string) error {
s := rawService(ctx)
_, err := s.Projects.Zones.Clusters.Delete(internal.ProjID(ctx), zone, name).Do()
return err
}
// Operations returns a list of operations from the specified zone.
// If no zone is specified, it looks up for all of the operations
// that are running under the user's project.
func Operations(ctx context.Context, zone string) ([]*Op, error) {
s := rawService(ctx)
if zone == "" {
resp, err := s.Projects.Operations.List(internal.ProjID(ctx)).Do()
if err != nil {
return nil, err
}
return opsFromRaw(resp.Operations), nil
}
resp, err := s.Projects.Zones.Operations.List(internal.ProjID(ctx), zone).Do()
if err != nil {
return nil, err
}
return opsFromRaw(resp.Operations), nil
}
// Operation returns an operation.
func Operation(ctx context.Context, zone, name string) (*Op, error) {
s := rawService(ctx)
resp, err := s.Projects.Zones.Operations.Get(internal.ProjID(ctx), zone, name).Do()
if err != nil {
return nil, err
}
if resp.ErrorMessage != "" {
return nil, errors.New(resp.ErrorMessage)
}
return opFromRaw(resp), nil
}
func rawService(ctx context.Context) *raw.Service {
return internal.Service(ctx, "container", func(hc *http.Client) interface{} {
svc, _ := raw.New(hc)
return svc
}).(*raw.Service)
}
|
{
return nil
}
|
config.js
|
/**
* @fileoverview added by tsickle
* @suppress {checkTypes,constantProperty,extraRequire,missingOverride,missingReturn,unusedPrivateMembers,uselessCode} checked by tsc
*/
/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import { EmptyOutletComponent } from './components/empty_outlet';
import { PRIMARY_OUTLET } from './shared';
/**
* A configuration object that defines a single route.
* A set of routes are collected in a `Routes` array to define a `Router` configuration.
* The router attempts to match segments of a given URL against each route,
* using the configuration options defined in this object.
*
* Supports static, parameterized, redirect, and wildcard routes, as well as
* custom route data and resolve methods.
*
* For detailed usage information, see the [Routing Guide](guide/router).
*
* \@usageNotes
*
* ### Simple Configuration
*
* The following route specifies that when navigating to, for example,
* `/team/11/user/bob`, the router creates the 'Team' component
* with the 'User' child component in it.
*
* ```
* [{
* path: 'team/:id',
* component: Team,
* children: [{
* path: 'user/:name',
* component: User
* }]
* }]
* ```
*
* ### Multiple Outlets
*
* The following route creates sibling components with multiple outlets.
* When navigating to `/team/11(aux:chat/jim)`, the router creates the 'Team' component next to
* the 'Chat' component. The 'Chat' component is placed into the 'aux' outlet.
*
* ```
* [{
* path: 'team/:id',
* component: Team
* }, {
* path: 'chat/:user',
* component: Chat
* outlet: 'aux'
* }]
* ```
*
* ### Wild Cards
*
* The following route uses wild-card notation to specify a component
* that is always instantiated regardless of where you navigate to.
*
* ```
* [{
* path: '**',
* component: WildcardComponent
* }]
* ```
*
* ### Redirects
*
* The following route uses the `redirectTo` property to ignore a segment of
* a given URL when looking for a child path.
*
* When navigating to '/team/11/legacy/user/jim', the router changes the URL segment
* '/team/11/legacy/user/jim' to '/team/11/user/jim', and then instantiates
* the Team component with the User child component in it.
*
* ```
* [{
* path: 'team/:id',
* component: Team,
* children: [{
* path: 'legacy/user/:name',
* redirectTo: 'user/:name'
* }, {
* path: 'user/:name',
* component: User
* }]
* }]
* ```
*
* The redirect path can be relative, as shown in this example, or absolute.
* If we change the `redirectTo` value in the example to the absolute URL segment '/user/:name',
* the result URL is also absolute, '/user/jim'.
* ### Empty Path
*
* Empty-path route configurations can be used to instantiate components that do not 'consume'
* any URL segments.
*
* In the following configuration, when navigating to
* `/team/11`, the router instantiates the 'AllUsers' component.
*
* ```
* [{
* path: 'team/:id',
* component: Team,
* children: [{
* path: '',
* component: AllUsers
* }, {
* path: 'user/:name',
* component: User
* }]
* }]
* ```
*
* Empty-path routes can have children. In the following example, when navigating
* to `/team/11/user/jim`, the router instantiates the wrapper component with
* the user component in it.
*
* Note that an empty path route inherits its parent's parameters and data.
*
* ```
* [{
* path: 'team/:id',
* component: Team,
* children: [{
* path: '',
* component: WrapperCmp,
* children: [{
* path: 'user/:name',
* component: User
* }]
* }]
* }]
* ```
*
* ### Matching Strategy
*
* The default path-match strategy is 'prefix', which means that the router
* checks URL elements from the left to see if the URL matches a specified path.
* For example, '/team/11/user' matches 'team/:id'.
*
* ```
* [{
* path: '',
* pathMatch: 'prefix', //default
* redirectTo: 'main'
* }, {
* path: 'main',
* component: Main
* }]
* ```
*
* You can specify the path-match strategy 'full' to make sure that the path
* covers the whole unconsumed URL. It is important to do this when redirecting
* empty-path routes. Otherwise, because an empty path is a prefix of any URL,
* the router would apply the redirect even when navigating to the redirect destination,
* creating an endless loop.
*
* In the following example, supplying the 'full' `patchMatch` strategy ensures
* that the router applies the redirect if and only if navigating to '/'.
*
* ```
* [{
* path: '',
* pathMatch: 'full',
* redirectTo: 'main'
* }, {
* path: 'main',
* component: Main
* }]
* ```
*
* ### Componentless Routes
*
* You can share parameters between sibling components.
* For example, suppose that two sibling components should go next to each other,
* and both of them require an ID parameter. You can accomplish this using a route
* that does not specify a component at the top level.
*
* In the following example, 'ChildCmp' and 'AuxCmp' are siblings.
* When navigating to 'parent/10/(a//aux:b)', the route instantiates
* the main child and aux child components next to each other.
* For this to work, the application component must have the primary and aux outlets defined.
*
* ```
* [{
* path: 'parent/:id',
* children: [
* { path: 'a', component: MainChild },
* { path: 'b', component: AuxChild, outlet: 'aux' }
* ]
* }]
* ```
*
* The router merges the parameters, data, and resolve of the componentless
* parent into the parameters, data, and resolve of the children.
*
* This is especially useful when child components are defined
* with an empty path string, as in the following example.
* With this configuration, navigating to '/parent/10' creates
* the main child and aux components.
*
* ```
* [{
* path: 'parent/:id',
* children: [
* { path: '', component: MainChild },
* { path: '', component: AuxChild, outlet: 'aux' }
* ]
* }]
* ```
*
* ### Lazy Loading
*
* Lazy loading speeds up application load time by splitting the application
* into multiple bundles and loading them on demand.
* To use lazy loading, provide the `loadChildren` property instead of the `children` property.
*
* Given the following example route, the router uses the registered
* `NgModuleFactoryLoader` to fetch an NgModule associated with 'team'.
* It then extracts the set of routes defined in that NgModule,
* and transparently adds those routes to the main configuration.
*
* ```
* [{
* path: 'team/:id',
* component: Team,
* loadChildren: 'team'
* }]
* ```
*
* \@publicApi
* @record
*/
export function Route() { }
if (false) {
/**
* The path to match against, a URL string that uses router matching notation.
* Can be a wild card (`**`) that matches any URL (see Usage Notes below).
* Default is "/" (the root path).
* @type {?|undefined}
*/
Route.prototype.path;
/**
* The path-matching strategy, one of 'prefix' or 'full'.
* Default is 'prefix'.
*
* By default, the router checks URL elements from the left to see if the URL
* matches a given path, and stops when there is a match. For example,
* '/team/11/user' matches 'team/:id'.
*
* The path-match strategy 'full' matches against the entire URL.
* It is important to do this when redirecting empty-path routes.
* Otherwise, because an empty path is a prefix of any URL,
* the router would apply the redirect even when navigating
* to the redirect destination, creating an endless loop.
*
* @type {?|undefined}
*/
Route.prototype.pathMatch;
/**
* A URL-matching function to use as a custom strategy for path matching.
* If present, supersedes `path` and `pathMatch`.
* @type {?|undefined}
*/
Route.prototype.matcher;
/**
* The component to instantiate when the path matches.
* Can be empty if child routes specify components.
* @type {?|undefined}
*/
Route.prototype.component;
/**
* A URL to which to redirect when a the path matches.
* Absolute if the URL begins with a slash (/), otherwise relative to the path URL.
|
/**
* Name of a `RouterOutlet` object where the component can be placed
* when the path matches.
* @type {?|undefined}
*/
Route.prototype.outlet;
/**
* An array of dependency-injection tokens used to look up `CanActivate()`
* handlers, in order to determine if the current user is allowed to
* activate the component. By default, any user can activate.
* @type {?|undefined}
*/
Route.prototype.canActivate;
/**
* An array of DI tokens used to look up `CanActivateChild()` handlers,
* in order to determine if the current user is allowed to activate
* a child of the component. By default, any user can activate a child.
* @type {?|undefined}
*/
Route.prototype.canActivateChild;
/**
* An array of DI tokens used to look up `CanDeactivate()`
* handlers, in order to determine if the current user is allowed to
* deactivate the component. By default, any user can deactivate.
*
* @type {?|undefined}
*/
Route.prototype.canDeactivate;
/**
* An array of DI tokens used to look up `CanLoad()`
* handlers, in order to determine if the current user is allowed to
* load the component. By default, any user can load.
* @type {?|undefined}
*/
Route.prototype.canLoad;
/**
* Additional developer-defined data provided to the component via
* `ActivatedRoute`. By default, no additional data is passed.
* @type {?|undefined}
*/
Route.prototype.data;
/**
* A map of DI tokens used to look up data resolvers. See `Resolve`.
* @type {?|undefined}
*/
Route.prototype.resolve;
/**
* An array of child `Route` objects that specifies a nested route
* configuration.
* @type {?|undefined}
*/
Route.prototype.children;
/**
* A `LoadChildren` object specifying lazy-loaded child routes.
* @type {?|undefined}
*/
Route.prototype.loadChildren;
/**
* Defines when guards and resolvers will be run. One of
* - `paramsOrQueryParamsChange` : Run when query parameters change.
* - `always` : Run on every execution.
* By default, guards and resolvers run only when the matrix
* parameters of the route change.
* @type {?|undefined}
*/
Route.prototype.runGuardsAndResolvers;
/**
* Filled for routes with `loadChildren` once the module has been loaded
* \@internal
* @type {?|undefined}
*/
Route.prototype._loadedConfig;
}
export class LoadedRouterConfig {
/**
* @param {?} routes
* @param {?} module
*/
constructor(routes, module) {
this.routes = routes;
this.module = module;
}
}
if (false) {
/** @type {?} */
LoadedRouterConfig.prototype.routes;
/** @type {?} */
LoadedRouterConfig.prototype.module;
}
/**
* @param {?} config
* @param {?=} parentPath
* @return {?}
*/
export function validateConfig(config, parentPath = '') {
// forEach doesn't iterate undefined values
for (let i = 0; i < config.length; i++) {
/** @type {?} */
const route = config[i];
/** @type {?} */
const fullPath = getFullPath(parentPath, route);
validateNode(route, fullPath);
}
}
/**
* @param {?} route
* @param {?} fullPath
* @return {?}
*/
function validateNode(route, fullPath) {
if (!route) {
throw new Error(`
Invalid configuration of route '${fullPath}': Encountered undefined route.
The reason might be an extra comma.
Example:
const routes: Routes = [
{ path: '', redirectTo: '/dashboard', pathMatch: 'full' },
{ path: 'dashboard', component: DashboardComponent },, << two commas
{ path: 'detail/:id', component: HeroDetailComponent }
];
`);
}
if (Array.isArray(route)) {
throw new Error(`Invalid configuration of route '${fullPath}': Array cannot be specified`);
}
if (!route.component && !route.children && !route.loadChildren &&
(route.outlet && route.outlet !== PRIMARY_OUTLET)) {
throw new Error(`Invalid configuration of route '${fullPath}': a componentless route without children or loadChildren cannot have a named outlet set`);
}
if (route.redirectTo && route.children) {
throw new Error(`Invalid configuration of route '${fullPath}': redirectTo and children cannot be used together`);
}
if (route.redirectTo && route.loadChildren) {
throw new Error(`Invalid configuration of route '${fullPath}': redirectTo and loadChildren cannot be used together`);
}
if (route.children && route.loadChildren) {
throw new Error(`Invalid configuration of route '${fullPath}': children and loadChildren cannot be used together`);
}
if (route.redirectTo && route.component) {
throw new Error(`Invalid configuration of route '${fullPath}': redirectTo and component cannot be used together`);
}
if (route.path && route.matcher) {
throw new Error(`Invalid configuration of route '${fullPath}': path and matcher cannot be used together`);
}
if (route.redirectTo === void 0 && !route.component && !route.children && !route.loadChildren) {
throw new Error(`Invalid configuration of route '${fullPath}'. One of the following must be provided: component, redirectTo, children or loadChildren`);
}
if (route.path === void 0 && route.matcher === void 0) {
throw new Error(`Invalid configuration of route '${fullPath}': routes must have either a path or a matcher specified`);
}
if (typeof route.path === 'string' && route.path.charAt(0) === '/') {
throw new Error(`Invalid configuration of route '${fullPath}': path cannot start with a slash`);
}
if (route.path === '' && route.redirectTo !== void 0 && route.pathMatch === void 0) {
/** @type {?} */
const exp = `The default value of 'pathMatch' is 'prefix', but often the intent is to use 'full'.`;
throw new Error(`Invalid configuration of route '{path: "${fullPath}", redirectTo: "${route.redirectTo}"}': please provide 'pathMatch'. ${exp}`);
}
if (route.pathMatch !== void 0 && route.pathMatch !== 'full' && route.pathMatch !== 'prefix') {
throw new Error(`Invalid configuration of route '${fullPath}': pathMatch can only be set to 'prefix' or 'full'`);
}
if (route.children) {
validateConfig(route.children, fullPath);
}
}
/**
* @param {?} parentPath
* @param {?} currentRoute
* @return {?}
*/
function getFullPath(parentPath, currentRoute) {
if (!currentRoute) {
return parentPath;
}
if (!parentPath && !currentRoute.path) {
return '';
}
else if (parentPath && !currentRoute.path) {
return `${parentPath}/`;
}
else if (!parentPath && currentRoute.path) {
return currentRoute.path;
}
else {
return `${parentPath}/${currentRoute.path}`;
}
}
/**
* Makes a copy of the config and adds any default required properties.
* @param {?} r
* @return {?}
*/
export function standardizeConfig(r) {
/** @type {?} */
const children = r.children && r.children.map(standardizeConfig);
/** @type {?} */
const c = children ? Object.assign({}, r, { children }) : Object.assign({}, r);
if (!c.component && (children || c.loadChildren) && (c.outlet && c.outlet !== PRIMARY_OUTLET)) {
c.component = EmptyOutletComponent;
}
return c;
}
//# sourceMappingURL=data:application/json;base64,{"version":3,"file":"config.js","sourceRoot":"","sources":["../../../../../../packages/router/src/config.ts"],"names":[],"mappings":";;;;;;;;;;;AAWA,OAAO,EAAC,oBAAoB,EAAC,MAAM,2BAA2B,CAAC;AAE/D,OAAO,EAAC,cAAc,EAAC,MAAM,UAAU,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAwXxC,2BAoGC;;;;;;;;IA9FC,qBAAc;;;;;;;;;;;;;;;;;IAgBd,0BAAmB;;;;;;IAKnB,wBAAqB;;;;;;IAKrB,0BAAsB;;;;;;;IAMtB,2BAAoB;;;;;;IAKpB,uBAAgB;;;;;;;IAMhB,4BAAoB;;;;;;;IAMpB,iCAAyB;;;;;;;;IAOzB,8BAAsB;;;;;;;IAMtB,wBAAgB;;;;;;IAKhB,qBAAY;;;;;IAIZ,wBAAsB;;;;;;IAKtB,yBAAkB;;;;;IAIlB,6BAA4B;;;;;;;;;IAQ5B,sCAA8C;;;;;;IAK9C,8BAAmC;;AAGrC,MAAM,OAAO,kBAAkB;;;;;IAC7B,YAAmB,MAAe,EAAS,MAAwB;QAAhD,WAAM,GAAN,MAAM,CAAS;QAAS,WAAM,GAAN,MAAM,CAAkB;IAAG,CAAC;CACxE;;;IADa,oCAAsB;;IAAE,oCAA+B;;;;;;;AAGrE,MAAM,UAAU,cAAc,CAAC,MAAc,EAAE,aAAqB,EAAE;IACpE,2CAA2C;IAC3C,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;;cAChC,KAAK,GAAU,MAAM,CAAC,CAAC,CAAC;;cACxB,QAAQ,GAAW,WAAW,CAAC,UAAU,EAAE,KAAK,CAAC;QACvD,YAAY,CAAC,KAAK,EAAE,QAAQ,CAAC,CAAC;KAC/B;AACH,CAAC;;;;;;AAED,SAAS,YAAY,CAAC,KAAY,EAAE,QAAgB;IAClD,IAAI,CAAC,KAAK,EAAE;QACV,MAAM,IAAI,KAAK,CAAC;wCACoB,QAAQ;;;;;;;;;KAS3C,CAAC,CAAC;KACJ;IACD,IAAI,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE;QACxB,MAAM,IAAI,KAAK,CAAC,mCAAmC,QAAQ,8BAA8B,CAAC,CAAC;KAC5F;IACD,IAAI,CAAC,KAAK,CAAC,SAAS,IAAI,CAAC,KAAK,CAAC,QAAQ,IAAI,CAAC,KAAK,CAAC,YAAY;QAC1D,CAAC,KAAK,CAAC,MAAM,IAAI,KAAK,CAAC,MAAM,KAAK,cAAc,CAAC,EAAE;QACrD,MAAM,IAAI,KAAK,CACX,mCAAmC,QAAQ,0FAA0F,CAAC,CAAC;KAC5I;IACD,IAAI,KAAK,CAAC,UAAU,IAAI,KAAK,CAAC,QAAQ,EAAE;QACtC,MAAM,IAAI,KAAK,CACX,mCAAmC,QAAQ,oDAAoD,CAAC,CAAC;KACtG;IACD,IAAI,KAAK,CAAC,UAAU,IAAI,KAAK,CAAC,YAAY,EAAE;QAC1C,MAAM,IAAI,KAAK,CACX,mCAAmC,QAAQ,wDAAwD,CAAC,CAAC;KAC1G;IACD,IAAI,KAAK,CAAC,QAAQ,IAAI,KAAK,CAAC,YAAY,EAAE;QACxC,MAAM,IAAI,KAAK,CACX,mCAAmC,QAAQ,sDAAsD,CAAC,CAAC;KACxG;IACD,IAAI,KAAK,CAAC,UAAU,IAAI,KAAK,CAAC,SAAS,EAAE;QACvC,MAAM,IAAI,KAAK,CACX,mCAAmC,QAAQ,qDAAqD,CAAC,CAAC;KACvG;IACD,IAAI,KAAK,CAAC,IAAI,IAAI,KAAK,CAAC,OAAO,EAAE;QAC/B,MAAM,IAAI,KAAK,CACX,mCAAmC,QAAQ,6CAA6C,CAAC,CAAC;KAC/F;IACD,IAAI,KAAK,CAAC,UAAU,KAAK,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,IAAI,CAAC,KAAK,CAAC,QAAQ,IAAI,CAAC,KAAK,CAAC,YAAY,EAAE;QAC7F,MAAM,IAAI,KAAK,CACX,mCAAmC,QAAQ,2FAA2F,CAAC,CAAC;KAC7I;IACD,IAAI,KAAK,CAAC,IAAI,KAAK,KAAK,CAAC,IAAI,KAAK,CAAC,OAAO,KAAK,KAAK,CAAC,EAAE;QACrD,MAAM,IAAI,KAAK,CACX,mCAAmC,QAAQ,0DAA0D,CAAC,CAAC;KAC5G;IACD,IAAI,OAAO,KAAK,CAAC,IAAI,KAAK,QAAQ,IAAI,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,KAAK,GAAG,EAAE;QAClE,MAAM,IAAI,KAAK,CAAC,mCAAmC,QAAQ,mCAAmC,CAAC,CAAC;KACjG;IACD,IAAI,KAAK,CAAC,IAAI,KAAK,EAAE,IAAI,KAAK,CAAC,UAAU,KAAK,KAAK,CAAC,IAAI,KAAK,CAAC,SAAS,KAAK,KAAK,CAAC,EAAE;;cAC5E,GAAG,GACL,sFAAsF;QAC1F,MAAM,IAAI,KAAK,CACX,2CAA2C,QAAQ,mBAAmB,KAAK,CAAC,UAAU,oCAAoC,GAAG,EAAE,CAAC,CAAC;KACtI;IACD,IAAI,KAAK,CAAC,SAAS,KAAK,KAAK,CAAC,IAAI,KAAK,CAAC,SAAS,KAAK,MAAM,IAAI,KAAK,CAAC,SAAS,KAAK,QAAQ,EAAE;QAC5F,MAAM,IAAI,KAAK,CACX,mCAAmC,QAAQ,oDAAoD,CAAC,CAAC;KACtG;IACD,IAAI,KAAK,CAAC,QAAQ,EAAE;QAClB,cAAc,CAAC,KAAK,CAAC,QAAQ,EAAE,QAAQ,CAAC,CAAC;KAC1C;AACH,CAAC;;;;;;AAED,SAAS,WAAW,CAAC,UAAkB,EAAE,YAAmB;IAC1D,IAAI,CAAC,YAAY,EAAE;QACjB,OAAO,UAAU,CAAC;KACnB;IACD,IAAI,CAAC,UAAU,IAAI,CAAC,YAAY,CAAC,IAAI,EAAE;QACrC,OAAO,EAAE,CAAC;KACX;SAAM,IAAI,UAAU,IAAI,CAAC,YAAY,CAAC,IAAI,EAAE;QAC3C,OAAO,GAAG,UAAU,GAAG,CAAC;KACzB;SAAM,IAAI,CAAC,UAAU,IAAI,YAAY,CAAC,IAAI,EAAE;QAC3C,OAAO,YAAY,CAAC,IAAI,CAAC;KAC1B;SAAM;QACL,OAAO,GAAG,UAAU,IAAI,YAAY,CAAC,IAAI,EAAE,CAAC;KAC7C;AACH,CAAC;;;;;;AAKD,MAAM,UAAU,iBAAiB,CAAC,CAAQ;;UAClC,QAAQ,GAAG,CAAC,CAAC,QAAQ,IAAI,CAAC,CAAC,QAAQ,CAAC,GAAG,CAAC,iBAAiB,CAAC;;UAC1D,CAAC,GAAG,QAAQ,CAAC,CAAC,mBAAK,CAAC,IAAE,QAAQ,IAAE,CAAC,mBAAK,CAAC,CAAC;IAC9C,IAAI,CAAC,CAAC,CAAC,SAAS,IAAI,CAAC,QAAQ,IAAI,CAAC,CAAC,YAAY,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,IAAI,CAAC,CAAC,MAAM,KAAK,cAAc,CAAC,EAAE;QAC7F,CAAC,CAAC,SAAS,GAAG,oBAAoB,CAAC;KACpC;IACD,OAAO,CAAC,CAAC;AACX,CAAC","sourcesContent":["/**\n * @license\n * Copyright Google Inc. All Rights Reserved.\n *\n * Use of this source code is governed by an MIT-style license that can be\n * found in the LICENSE file at https://angular.io/license\n */\n\nimport {NgModuleFactory, NgModuleRef, Type} from '@angular/core';\nimport {Observable} from 'rxjs';\n\nimport {EmptyOutletComponent} from './components/empty_outlet';\nimport {ActivatedRouteSnapshot} from './router_state';\nimport {PRIMARY_OUTLET} from './shared';\nimport {UrlSegment, UrlSegmentGroup} from './url_tree';\n\n\n/**\n * Represents a route configuration for the Router service.\n * An array of `Route` objects, used in `Router.config` and for nested route configurations\n * in `Route.children`.\n *\n * @see `Route`\n * @see `Router`\n * @publicApi\n */\nexport type Routes = Route[];\n\n/**\n * Represents the result of matching URLs with a custom matching function.\n *\n * * `consumed` is an array of the consumed URL segments.\n * * `posParams` is a map of positional parameters.\n *\n * @see `UrlMatcher()`\n * @publicApi\n */\nexport type UrlMatchResult = {\n  consumed: UrlSegment[]; posParams?: {[name: string]: UrlSegment};\n};\n\n/**\n * A function for matching a route against URLs. Implement a custom URL matcher\n * for `Route.matcher` when a combination of `path` and `pathMatch`\n * is not expressive enough.\n *\n * @param segments An array of URL segments.\n * @param group A segment group.\n * @param route The route to match against.\n * @returns The match-result,\n *\n * @usageNotes\n *\n * The following matcher matches HTML files.\n *\n * ```\n * export function htmlFiles(url: UrlSegment[]) {\n *   return url.length === 1 && url[0].path.endsWith('.html') ? ({consumed: url}) : null;\n * }\n *\n * export const routes = [{ matcher: htmlFiles, component: AnyComponent }];\n * ```\n *\n * @publicApi\n */\nexport type UrlMatcher = (segments: UrlSegment[], group: UrlSegmentGroup, route: Route) =>\n    UrlMatchResult;\n\n/**\n *\n * Represents static data associated with a particular route.\n *\n * @see `Route#data`\n *\n * @publicApi\n */\nexport type Data = {\n  [name: string]: any\n};\n\n/**\n *\n * Represents the resolved data associated with a particular route.\n *\n * @see `Route#resolve`.\n *\n * @publicApi\n */\nexport type ResolveData = {\n  [name: string]: any\n};\n\n/**\n *\n * A function that is called to resolve a collection of lazy-loaded routes.\n *\n * Often this function will be implemented using an ES dynamic `import()` expression. For example:\n *\n * ```\n * [{\n *   path: 'lazy',\n *   loadChildren: () => import('./lazy-route/lazy.module').then(mod => mod.LazyModule),\n * }];\n * ```\n *\n * This function _must_ match the form above: an arrow function of the form\n * `() => import('...').then(mod => mod.MODULE)`.\n *\n * @see `Route#loadChildren`.\n * @publicApi\n */\nexport type LoadChildrenCallback = () => Type<any>| NgModuleFactory<any>| Observable<Type<any>>|\n    Promise<NgModuleFactory<any>|Type<any>|any>;\n\n/**\n *\n * A string of the form `path/to/file#exportName` that acts as a URL for a set of routes to load,\n * or a function that returns such a set.\n *\n * The string form of `LoadChildren` is deprecated (see `DeprecatedLoadChildren`). The function\n * form (`LoadChildrenCallback`) should be used instead.\n *\n * @see `Route#loadChildren`.\n * @publicApi\n */\nexport type LoadChildren = LoadChildrenCallback | DeprecatedLoadChildren;\n\n/**\n * A string of the form `path/to/file#exportName` that acts as a URL for a set of routes to load.\n *\n * @see `Route#loadChildren`\n * @publicApi\n * @deprecated the `string` form of `loadChildren` is deprecated in favor of the proposed ES dynamic\n * `import()` expression, which offers a more natural and standards-based mechanism to dynamically\n * load an ES module at runtime.\n */\nexport type DeprecatedLoadChildren = string;\n\n/**\n *\n * How to handle query parameters in a router link.\n * One of:\n * - `merge` : Merge new with current parameters.\n * - `preserve` : Preserve current parameters.\n *\n * @see `NavigationExtras#queryParamsHandling`\n * @see `RouterLink`\n * @publicApi\n */\nexport type QueryParamsHandling = 'merge' | 'preserve' | '';\n\n/**\n *\n * A policy for when to run guards and resolvers on a route.\n *\n * @see `Route#runGuardsAndResolvers`\n * @publicApi\n */\nexport type RunGuardsAndResolvers = 'pathParamsChange' | 'pathParamsOrQueryParamsChange' |\n    'paramsChange' | 'paramsOrQueryParamsChange' | 'always' |\n    ((from: ActivatedRouteSnapshot, to: ActivatedRouteSnapshot) => boolean);\n\n/**\n * A configuration object that defines a single route.\n * A set of routes are collected in a `Routes` array to define a `Router` configuration.\n * The router attempts to match segments of a given URL against each route,\n * using the configuration options defined in this object.\n *\n * Supports static, parameterized, redirect, and wildcard routes, as well as\n * custom route data and resolve methods.\n *\n * For detailed usage information, see the [Routing Guide](guide/router).\n *\n * @usageNotes\n *\n * ### Simple Configuration\n *\n * The following route specifies that when navigating to, for example,\n * `/team/11/user/bob`, the router creates the 'Team' component\n * with the 'User' child component in it.\n *\n * ```\n * [{\n *   path: 'team/:id',\n  *  component: Team,\n *   children: [{\n *     path: 'user/:name',\n *     component: User\n *   }]\n * }]\n * ```\n *\n * ### Multiple Outlets\n *\n * The following route creates sibling components with multiple outlets.\n * When navigating to `/team/11(aux:chat/jim)`, the router creates the 'Team' component next to\n * the 'Chat' component. The 'Chat' component is placed into the 'aux' outlet.\n *\n * ```\n * [{\n *   path: 'team/:id',\n *   component: Team\n * }, {\n *   path: 'chat/:user',\n *   component: Chat\n *   outlet: 'aux'\n * }]\n * ```\n *\n * ### Wild Cards\n *\n * The following route uses wild-card notation to specify a component\n * that is always instantiated regardless of where you navigate to.\n *\n * ```\n * [{\n *   path: '**',\n *   component: WildcardComponent\n * }]\n * ```\n *\n * ### Redirects\n *\n * The following route uses the `redirectTo` property to ignore a segment of\n * a given URL when looking for a child path.\n *\n * When navigating to '/team/11/legacy/user/jim', the router changes the URL segment\n * '/team/11/legacy/user/jim' to '/team/11/user/jim', and then instantiates\n * the Team component with the User child component in it.\n *\n * ```\n * [{\n *   path: 'team/:id',\n *   component: Team,\n *   children: [{\n *     path: 'legacy/user/:name',\n *     redirectTo: 'user/:name'\n *   }, {\n *     path: 'user/:name',\n *     component: User\n *   }]\n * }]\n * ```\n *\n * The redirect path can be relative, as shown in this example, or absolute.\n * If we change the `redirectTo` value in the example to the absolute URL segment '/user/:name',\n * the result URL is also absolute, '/user/jim'.\n\n * ### Empty Path\n *\n * Empty-path route configurations can be used to instantiate components that do not 'consume'\n * any URL segments.\n *\n * In the following configuration, when navigating to\n * `/team/11`, the router instantiates the 'AllUsers' component.\n *\n * ```\n * [{\n *   path: 'team/:id',\n *   component: Team,\n *   children: [{\n *     path: '',\n *     component: AllUsers\n *   }, {\n *     path: 'user/:name',\n *     component: User\n *   }]\n * }]\n * ```\n *\n * Empty-path routes can have children. In the following example, when navigating\n * to `/team/11/user/jim`, the router instantiates the wrapper component with\n * the user component in it.\n *\n * Note that an empty path route inherits its parent's parameters and data.\n *\n * ```\n * [{\n *   path: 'team/:id',\n *   component: Team,\n *   children: [{\n *     path: '',\n *     component: WrapperCmp,\n *     children: [{\n *       path: 'user/:name',\n *       component: User\n *     }]\n *   }]\n * }]\n * ```\n *\n * ### Matching Strategy\n *\n * The default path-match strategy is 'prefix', which means that the router\n * checks URL elements from the left to see if the URL matches a specified path.\n * For example, '/team/11/user' matches 'team/:id'.\n *\n * ```\n * [{\n *   path: '',\n *   pathMatch: 'prefix', //default\n *   redirectTo: 'main'\n * }, {\n *   path: 'main',\n *   component: Main\n * }]\n * ```\n *\n * You can specify the path-match strategy 'full' to make sure that the path\n * covers the whole unconsumed URL. It is important to do this when redirecting\n * empty-path routes. Otherwise, because an empty path is a prefix of any URL,\n * the router would apply the redirect even when navigating to the redirect destination,\n * creating an endless loop.\n *\n * In the following example, supplying the 'full' `patchMatch` strategy ensures\n * that the router applies the redirect if and only if navigating to '/'.\n *\n * ```\n * [{\n *   path: '',\n *   pathMatch: 'full',\n *   redirectTo: 'main'\n * }, {\n *   path: 'main',\n *   component: Main\n * }]\n * ```\n *\n * ### Componentless Routes\n *\n * You can share parameters between sibling components.\n * For example, suppose that two sibling components should go next to each other,\n * and both of them require an ID parameter. You can accomplish this using a route\n * that does not specify a component at the top level.\n *\n * In the following example, 'ChildCmp' and 'AuxCmp' are siblings.\n * When navigating to 'parent/10/(a//aux:b)', the route instantiates\n * the main child and aux child components next to each other.\n * For this to work, the application component must have the primary and aux outlets defined.\n *\n * ```\n * [{\n *    path: 'parent/:id',\n *    children: [\n *      { path: 'a', component: MainChild },\n *      { path: 'b', component: AuxChild, outlet: 'aux' }\n *    ]\n * }]\n * ```\n *\n * The router merges the parameters, data, and resolve of the componentless\n * parent into the parameters, data, and resolve of the children.\n *\n * This is especially useful when child components are defined\n * with an empty path string, as in the following example.\n * With this configuration, navigating to '/parent/10' creates\n * the main child and aux components.\n *\n * ```\n * [{\n *    path: 'parent/:id',\n *    children: [\n *      { path: '', component: MainChild },\n *      { path: '', component: AuxChild, outlet: 'aux' }\n *    ]\n * }]\n * ```\n *\n * ### Lazy Loading\n *\n * Lazy loading speeds up application load time by splitting the application\n * into multiple bundles and loading them on demand.\n * To use lazy loading, provide the `loadChildren` property  instead of the `children` property.\n *\n * Given the following example route, the router uses the registered\n * `NgModuleFactoryLoader` to fetch an NgModule associated with 'team'.\n * It then extracts the set of routes defined in that NgModule,\n * and transparently adds those routes to the main configuration.\n *\n * ```\n * [{\n *   path: 'team/:id',\n *   component: Team,\n *   loadChildren: 'team'\n * }]\n * ```\n *\n * @publicApi\n */\nexport interface Route {\n  /**\n   * The path to match against, a URL string that uses router matching notation.\n   * Can be a wild card (`**`) that matches any URL (see Usage Notes below).\n   * Default is \"/\" (the root path).\n   */\n  path?: string;\n  /**\n   * The path-matching strategy, one of 'prefix' or 'full'.\n   * Default is 'prefix'.\n   *\n   * By default, the router checks URL elements from the left to see if the URL\n   * matches a given  path, and stops when there is a match. For example,\n   * '/team/11/user' matches 'team/:id'.\n   *\n   * The path-match strategy 'full' matches against the entire URL.\n   * It is important to do this when redirecting empty-path routes.\n   * Otherwise, because an empty path is a prefix of any URL,\n   * the router would apply the redirect even when navigating\n   * to the redirect destination, creating an endless loop.\n   *\n   */\n  pathMatch?: string;\n  /**\n   * A URL-matching function to use as a custom strategy for path matching.\n   * If present, supersedes `path` and `pathMatch`.\n   */\n  matcher?: UrlMatcher;\n  /**\n   * The component to instantiate when the path matches.\n   * Can be empty if child routes specify components.\n   */\n  component?: Type<any>;\n  /**\n   * A URL to which to redirect when a the path matches.\n   * Absolute if the URL begins with a slash (/), otherwise relative to the path URL.\n   * When not present, router does not redirect.\n   */\n  redirectTo?: string;\n  /**\n   * Name of a `RouterOutlet` object where the component can be placed\n   * when the path matches.\n   */\n  outlet?: string;\n  /**\n   * An array of dependency-injection tokens used to look up `CanActivate()`\n   * handlers, in order to determine if the current user is allowed to\n   * activate the component. By default, any user can activate.\n   */\n  canActivate?: any[];\n  /**\n   * An array of DI tokens used to look up `CanActivateChild()` handlers,\n   * in order to determine if the current user is allowed to activate\n   * a child of the component. By default, any user can activate a child.\n   */\n  canActivateChild?: any[];\n  /**\n   * An array of DI tokens used to look up `CanDeactivate()`\n   * handlers, in order to determine if the current user is allowed to\n   * deactivate the component. By default, any user can deactivate.\n   *\n   */\n  canDeactivate?: any[];\n  /**\n   * An array of DI tokens used to look up `CanLoad()`\n   * handlers, in order to determine if the current user is allowed to\n   * load the component. By default, any user can load.\n   */\n  canLoad?: any[];\n  /**\n   * Additional developer-defined data provided to the component via\n   * `ActivatedRoute`. By default, no additional data is passed.\n   */\n  data?: Data;\n  /**\n   * A map of DI tokens used to look up data resolvers. See `Resolve`.\n   */\n  resolve?: ResolveData;\n  /**\n   * An array of child `Route` objects that specifies a nested route\n   * configuration.\n   */\n  children?: Routes;\n  /**\n   * A `LoadChildren` object specifying lazy-loaded child routes.\n   */\n  loadChildren?: LoadChildren;\n  /**\n   * Defines when guards and resolvers will be run. One of\n   * - `paramsOrQueryParamsChange` : Run when query parameters change.\n   * - `always` : Run on every execution.\n   * By default, guards and resolvers run only when the matrix\n   * parameters of the route change.\n   */\n  runGuardsAndResolvers?: RunGuardsAndResolvers;\n  /**\n   * Filled for routes with `loadChildren` once the module has been loaded\n   * @internal\n   */\n  _loadedConfig?: LoadedRouterConfig;\n}\n\nexport class LoadedRouterConfig {\n  constructor(public routes: Route[], public module: NgModuleRef<any>) {}\n}\n\nexport function validateConfig(config: Routes, parentPath: string = ''): void {\n  // forEach doesn't iterate undefined values\n  for (let i = 0; i < config.length; i++) {\n    const route: Route = config[i];\n    const fullPath: string = getFullPath(parentPath, route);\n    validateNode(route, fullPath);\n  }\n}\n\nfunction validateNode(route: Route, fullPath: string): void {\n  if (!route) {\n    throw new Error(`\n      Invalid configuration of route '${fullPath}': Encountered undefined route.\n      The reason might be an extra comma.\n\n      Example:\n      const routes: Routes = [\n        { path: '', redirectTo: '/dashboard', pathMatch: 'full' },\n        { path: 'dashboard',  component: DashboardComponent },, << two commas\n        { path: 'detail/:id', component: HeroDetailComponent }\n      ];\n    `);\n  }\n  if (Array.isArray(route)) {\n    throw new Error(`Invalid configuration of route '${fullPath}': Array cannot be specified`);\n  }\n  if (!route.component && !route.children && !route.loadChildren &&\n      (route.outlet && route.outlet !== PRIMARY_OUTLET)) {\n    throw new Error(\n        `Invalid configuration of route '${fullPath}': a componentless route without children or loadChildren cannot have a named outlet set`);\n  }\n  if (route.redirectTo && route.children) {\n    throw new Error(\n        `Invalid configuration of route '${fullPath}': redirectTo and children cannot be used together`);\n  }\n  if (route.redirectTo && route.loadChildren) {\n    throw new Error(\n        `Invalid configuration of route '${fullPath}': redirectTo and loadChildren cannot be used together`);\n  }\n  if (route.children && route.loadChildren) {\n    throw new Error(\n        `Invalid configuration of route '${fullPath}': children and loadChildren cannot be used together`);\n  }\n  if (route.redirectTo && route.component) {\n    throw new Error(\n        `Invalid configuration of route '${fullPath}': redirectTo and component cannot be used together`);\n  }\n  if (route.path && route.matcher) {\n    throw new Error(\n        `Invalid configuration of route '${fullPath}': path and matcher cannot be used together`);\n  }\n  if (route.redirectTo === void 0 && !route.component && !route.children && !route.loadChildren) {\n    throw new Error(\n        `Invalid configuration of route '${fullPath}'. One of the following must be provided: component, redirectTo, children or loadChildren`);\n  }\n  if (route.path === void 0 && route.matcher === void 0) {\n    throw new Error(\n        `Invalid configuration of route '${fullPath}': routes must have either a path or a matcher specified`);\n  }\n  if (typeof route.path === 'string' && route.path.charAt(0) === '/') {\n    throw new Error(`Invalid configuration of route '${fullPath}': path cannot start with a slash`);\n  }\n  if (route.path === '' && route.redirectTo !== void 0 && route.pathMatch === void 0) {\n    const exp =\n        `The default value of 'pathMatch' is 'prefix', but often the intent is to use 'full'.`;\n    throw new Error(\n        `Invalid configuration of route '{path: \"${fullPath}\", redirectTo: \"${route.redirectTo}\"}': please provide 'pathMatch'. ${exp}`);\n  }\n  if (route.pathMatch !== void 0 && route.pathMatch !== 'full' && route.pathMatch !== 'prefix') {\n    throw new Error(\n        `Invalid configuration of route '${fullPath}': pathMatch can only be set to 'prefix' or 'full'`);\n  }\n  if (route.children) {\n    validateConfig(route.children, fullPath);\n  }\n}\n\nfunction getFullPath(parentPath: string, currentRoute: Route): string {\n  if (!currentRoute) {\n    return parentPath;\n  }\n  if (!parentPath && !currentRoute.path) {\n    return '';\n  } else if (parentPath && !currentRoute.path) {\n    return `${parentPath}/`;\n  } else if (!parentPath && currentRoute.path) {\n    return currentRoute.path;\n  } else {\n    return `${parentPath}/${currentRoute.path}`;\n  }\n}\n\n/**\n * Makes a copy of the config and adds any default required properties.\n */\nexport function standardizeConfig(r: Route): Route {\n  const children = r.children && r.children.map(standardizeConfig);\n  const c = children ? {...r, children} : {...r};\n  if (!c.component && (children || c.loadChildren) && (c.outlet && c.outlet !== PRIMARY_OUTLET)) {\n    c.component = EmptyOutletComponent;\n  }\n  return c;\n}\n"]}
|
* When not present, router does not redirect.
* @type {?|undefined}
*/
Route.prototype.redirectTo;
|
admin_only.rs
|
use crate::{
db::DbConn,
helpers::TemplateContextUser,
models::{GithubUserRecord, User},
};
use actix_session::Session;
use serde::Serialize;
use super::{auth_from_session, AuthFromSessionError};
pub struct
|
{
user: (User, GithubUserRecord),
permissions: Vec<String>,
}
#[derive(Debug, Serialize)]
pub struct AdminOnlyContext {
user: TemplateContextUser,
}
impl AdminOnly {
pub fn from_session(
conn: &DbConn,
session: &Session,
) -> Result<AdminOnly, AuthFromSessionError> {
match auth_from_session(conn, session) {
Ok(Some((user, permissions))) => {
if !permissions.contains(&"admin".to_owned()) {
return Err(AuthFromSessionError::RoleNotMatched(
"admin".to_owned(),
permissions,
));
}
let github_user = GithubUserRecord::find_by_user_id(conn, user.id)?;
match github_user {
Some(github_user) => Ok(AdminOnly {
user: (user, github_user),
permissions,
}),
None => Err(AuthFromSessionError::GithubUserRecordNotFound(user.id)),
}
}
Ok(None) => Err(AuthFromSessionError::NoUser),
Err(e) => Err(e),
}
}
pub fn to_context(&self) -> AdminOnlyContext {
return AdminOnlyContext {
user: TemplateContextUser {
id: self.user.0.id,
login: self.user.1.login.clone(),
permissions: self.permissions.clone(),
},
};
}
pub fn get_id(&self) -> i32 {
self.user.0.id
}
}
|
AdminOnly
|
T509_Fib.py
|
class Solution:
def fib(self, n: int) -> int:
memo = {}
def inner(n: int) -> int:
|
return inner(n)
s = Solution()
print(s.fib(4))
# 执行用时:28 ms, 在所有 Python3 提交中击败了90.06% 的用户
# 内存消耗:14.8 MB, 在所有 Python3 提交中击败了82.45%的用户
|
if n == 0: return 0
if n == 1: return 1
if n in memo:
return memo[n]
else:
val = inner(n - 1) + inner(n - 2)
memo[n] = val
return val
|
cell_test.go
|
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package s2
import (
"math"
"testing"
"unsafe"
)
// maxCellSize is the upper bounds on the number of bytes we want the Cell object to ever be.
const maxCellSize = 48
func TestCellObjectSize(t *testing.T) {
if sz := unsafe.Sizeof(Cell{}); sz > maxCellSize {
t.Errorf("Cell struct too big: %d bytes > %d bytes", sz, maxCellSize)
}
}
func TestCellFaces(t *testing.T) {
edgeCounts := make(map[Point]int)
vertexCounts := make(map[Point]int)
for face := 0; face < 6; face++ {
id := CellIDFromFace(face)
cell := CellFromCellID(id)
if cell.id != id {
t.Errorf("cell.id != id; %v != %v", cell.id, id)
}
if cell.face != int8(face) {
t.Errorf("cell.face != face: %v != %v", cell.face, face)
}
if cell.level != 0 {
t.Errorf("cell.level != 0: %v != 0", cell.level)
}
// Top-level faces have alternating orientations to get RHS coordinates.
if cell.orientation != int8(face&swapMask) {
t.Errorf("cell.orientation != orientation: %v != %v", cell.orientation, face&swapMask)
}
if cell.IsLeaf() {
t.Errorf("cell should not be a leaf: IsLeaf = %v", cell.IsLeaf())
}
for k := 0; k < 4; k++ {
edgeCounts[cell.Edge(k)]++
vertexCounts[cell.Vertex(k)]++
if d := cell.Vertex(k).Dot(cell.Edge(k).Vector); !float64Eq(0.0, d) {
t.Errorf("dot product of vertex and edge failed, got %v, want 0", d)
}
if d := cell.Vertex((k + 1) & 3).Dot(cell.Edge(k).Vector); !float64Eq(0.0, d) {
t.Errorf("dot product for edge and next vertex failed, got %v, want 0", d)
}
if d := cell.Vertex(k).Vector.Cross(cell.Vertex((k + 1) & 3).Vector).Normalize().Dot(cell.Edge(k).Vector); !float64Eq(1.0, d) {
t.Errorf("dot product of cross product for vertices failed, got %v, want 1.0", d)
}
}
}
// Check that edges have multiplicity 2 and vertices have multiplicity 3.
for k, v := range edgeCounts {
if v != 2 {
t.Errorf("edge %v counts wrong, got %d, want 2", k, v)
}
}
for k, v := range vertexCounts {
if v != 3 {
t.Errorf("vertex %v counts wrong, got %d, want 3", k, v)
}
}
}
func
|
(t *testing.T) {
// Test 1. Check the area of a top level cell.
const level1Cell = CellID(0x1000000000000000)
const wantArea = 4 * math.Pi / 6
if area := CellFromCellID(level1Cell).ExactArea(); !float64Eq(area, wantArea) {
t.Fatalf("Area of a top-level cell %v = %f, want %f", level1Cell, area, wantArea)
}
// Test 2. Iterate inwards from this cell, checking at every level that
// the sum of the areas of the children is equal to the area of the parent.
childIndex := 1
for cell := CellID(0x1000000000000000); cell.Level() < 21; cell = cell.Children()[childIndex] {
childrenArea := 0.0
for _, child := range cell.Children() {
childrenArea += CellFromCellID(child).ExactArea()
}
if area := CellFromCellID(cell).ExactArea(); !float64Eq(childrenArea, area) {
t.Fatalf("Areas of children of a level-%d cell %v don't add up to parent's area. "+
"This cell: %e, sum of children: %e",
cell.Level(), cell, area, childrenArea)
}
childIndex = (childIndex + 1) % 4
}
}
|
TestExactArea
|
ser.rs
|
//! Serialize a Rust data structure to Candid binary format
use super::error::{Error, Result};
use super::parser::{typing::TypeEnv, value::IDLValue};
use super::types;
use super::types::{internal::Opcode, Field, Type};
use byteorder::{LittleEndian, WriteBytesExt};
use leb128::write::{signed as sleb128_encode, unsigned as leb128_encode};
use std::collections::HashMap;
use std::io;
use std::vec::Vec;
/// Use this struct to serialize a sequence of Rust values (heterogeneous) to IDL binary message.
#[derive(Default)]
pub struct
|
{
type_ser: TypeSerialize,
value_ser: ValueSerializer,
}
impl IDLBuilder {
pub fn new() -> Self {
IDLBuilder {
type_ser: TypeSerialize::new(),
value_ser: ValueSerializer::new(),
}
}
pub fn arg<'a, T: types::CandidType>(&'a mut self, value: &T) -> Result<&'a mut Self> {
self.type_ser.push_type(&T::ty())?;
value.idl_serialize(&mut self.value_ser)?;
Ok(self)
}
pub fn value_arg<'a>(&'a mut self, value: &IDLValue) -> Result<&'a mut Self> {
use super::CandidType;
self.type_ser.push_type(&value.value_ty())?;
value.idl_serialize(&mut self.value_ser)?;
Ok(self)
}
/// Annotate IDLValue with (TypeEnv, Type). Note that the TypeEnv will be added to the serializer state.
/// If the Type can already be resolved by previous TypeEnvs, you don't need to pass TypeEnv again.
pub fn value_arg_with_type<'a>(
&'a mut self,
value: &IDLValue,
env: &TypeEnv,
t: &Type,
) -> Result<&'a mut Self> {
use super::CandidType;
let env = self.type_ser.env.merge(env)?;
let v = value.annotate_type(env, t)?;
self.type_ser.push_type(t)?;
v.idl_serialize(&mut self.value_ser)?;
Ok(self)
}
pub fn serialize<W: io::Write>(&mut self, mut writer: W) -> Result<()> {
writer.write_all(b"DIDL")?;
self.type_ser.serialize()?;
writer.write_all(&self.type_ser.result)?;
writer.write_all(&self.value_ser.value)?;
Ok(())
}
pub fn serialize_to_vec(&mut self) -> Result<Vec<u8>> {
let mut vec = Vec::new();
self.serialize(&mut vec)?;
Ok(vec)
}
}
/// A structure for serializing Rust values to IDL.
#[derive(Default)]
pub struct ValueSerializer {
value: Vec<u8>,
}
impl ValueSerializer {
/// Creates a new IDL serializer.
#[inline]
pub fn new() -> Self {
ValueSerializer { value: Vec::new() }
}
fn write_leb128(&mut self, value: u64) -> Result<()> {
leb128_encode(&mut self.value, value)?;
Ok(())
}
fn write(&mut self, bytes: &[u8]) -> Result<()> {
use std::io::Write;
self.value.write_all(bytes)?;
Ok(())
}
}
macro_rules! serialize_num {
($name:ident, $ty:ty, $($method:tt)*) => {
paste::item! {
fn [<serialize_ $name>](self, v: $ty) -> Result<()> {
self.value.$($method)*(v)?;
Ok(())
}
}
};
}
impl<'a> types::Serializer for &'a mut ValueSerializer {
type Error = Error;
type Compound = Compound<'a>;
fn serialize_bool(self, v: bool) -> Result<()> {
let v = if v { 1 } else { 0 };
self.write(&[v])?;
Ok(())
}
fn serialize_int(self, v: &crate::Int) -> Result<()> {
v.encode(&mut self.value)
}
fn serialize_nat(self, v: &crate::Nat) -> Result<()> {
v.encode(&mut self.value)
}
serialize_num!(nat8, u8, write_u8);
serialize_num!(nat16, u16, write_u16::<LittleEndian>);
serialize_num!(nat32, u32, write_u32::<LittleEndian>);
serialize_num!(nat64, u64, write_u64::<LittleEndian>);
serialize_num!(int8, i8, write_i8);
serialize_num!(int16, i16, write_i16::<LittleEndian>);
serialize_num!(int32, i32, write_i32::<LittleEndian>);
serialize_num!(int64, i64, write_i64::<LittleEndian>);
serialize_num!(float32, f32, write_f32::<LittleEndian>);
serialize_num!(float64, f64, write_f64::<LittleEndian>);
fn serialize_text(self, v: &str) -> Result<()> {
let mut buf = Vec::from(v.as_bytes());
self.write_leb128(buf.len() as u64)?;
self.value.append(&mut buf);
Ok(())
}
fn serialize_null(self, _v: ()) -> Result<()> {
Ok(())
}
fn serialize_empty(self) -> Result<()> {
Err(Error::msg("cannot encode empty type"))
}
fn serialize_principal(self, blob: &[u8]) -> Result<()> {
self.write(&[1])?;
self.write_leb128(blob.len() as u64)?;
self.write(blob)?;
Ok(())
}
fn serialize_option<T: ?Sized>(self, v: Option<&T>) -> Result<()>
where
T: super::CandidType,
{
match v {
None => {
self.write_leb128(0)?;
Ok(())
}
Some(v) => {
self.write_leb128(1)?;
v.idl_serialize(self)
}
}
}
fn serialize_variant(self, index: u64) -> Result<Self::Compound> {
self.write_leb128(index)?;
Ok(Self::Compound { ser: self })
}
fn serialize_struct(self) -> Result<Self::Compound> {
Ok(Self::Compound { ser: self })
}
fn serialize_vec(self, len: usize) -> Result<Self::Compound> {
self.write_leb128(len as u64)?;
Ok(Self::Compound { ser: self })
}
}
pub struct Compound<'a> {
ser: &'a mut ValueSerializer,
}
impl<'a> types::Compound for Compound<'a> {
type Error = Error;
fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<()>
where
T: types::CandidType,
{
value.idl_serialize(&mut *self.ser)?;
Ok(())
}
}
/// A structure for serializing Rust values to IDL types.
#[derive(Default)]
pub struct TypeSerialize {
type_table: Vec<Vec<u8>>,
type_map: HashMap<Type, i32>,
env: TypeEnv,
args: Vec<Type>,
result: Vec<u8>,
}
impl TypeSerialize {
#[inline]
pub fn new() -> Self {
TypeSerialize {
type_table: Vec::new(),
type_map: HashMap::new(),
env: TypeEnv::new(),
args: Vec::new(),
result: Vec::new(),
}
}
#[inline]
fn build_type(&mut self, t: &Type) -> Result<()> {
if self.type_map.contains_key(t) {
return Ok(());
}
let actual_type = if let Type::Var(id) = t {
self.env.rec_find_type(id)?
} else {
t
}
.clone();
if types::internal::is_primitive(&actual_type) {
return Ok(());
}
// This is a hack to remove (some) equivalent mu types
// from the type table.
// Someone should implement Pottier's O(nlogn) algorithm
// http://gallium.inria.fr/~fpottier/publis/gauthier-fpottier-icfp04.pdf
let unrolled = types::internal::unroll(t);
if let Some(idx) = self.type_map.get(&unrolled) {
let idx = *idx;
self.type_map.insert((*t).clone(), idx);
return Ok(());
}
let idx = self.type_table.len();
self.type_map.insert((*t).clone(), idx as i32);
self.type_table.push(Vec::new());
let mut buf = Vec::new();
match actual_type {
Type::Opt(ref ty) => {
self.build_type(ty)?;
sleb128_encode(&mut buf, Opcode::Opt as i64)?;
self.encode(&mut buf, ty)?;
}
Type::Vec(ref ty) => {
self.build_type(ty)?;
sleb128_encode(&mut buf, Opcode::Vec as i64)?;
self.encode(&mut buf, ty)?;
}
Type::Record(fs) => {
for Field { ty, .. } in fs.iter() {
self.build_type(ty)?;
}
sleb128_encode(&mut buf, Opcode::Record as i64)?;
leb128_encode(&mut buf, fs.len() as u64)?;
for Field { hash, ty, .. } in fs.iter() {
leb128_encode(&mut buf, u64::from(*hash))?;
self.encode(&mut buf, ty)?;
}
}
Type::Variant(fs) => {
for Field { ty, .. } in fs.iter() {
self.build_type(ty)?;
}
sleb128_encode(&mut buf, Opcode::Variant as i64)?;
leb128_encode(&mut buf, fs.len() as u64)?;
for Field { hash, ty, .. } in fs.iter() {
leb128_encode(&mut buf, u64::from(*hash))?;
self.encode(&mut buf, ty)?;
}
}
_ => unreachable!(),
};
self.type_table[idx] = buf;
Ok(())
}
fn push_type(&mut self, t: &Type) -> Result<()> {
self.args.push(t.clone());
self.build_type(t)
}
fn encode(&self, buf: &mut Vec<u8>, t: &Type) -> Result<()> {
if let Type::Var(id) = t {
let actual_type = self.env.rec_find_type(id)?;
if types::internal::is_primitive(&actual_type) {
return self.encode(buf, actual_type);
}
}
match t {
Type::Null => sleb128_encode(buf, Opcode::Null as i64),
Type::Bool => sleb128_encode(buf, Opcode::Bool as i64),
Type::Nat => sleb128_encode(buf, Opcode::Nat as i64),
Type::Int => sleb128_encode(buf, Opcode::Int as i64),
Type::Nat8 => sleb128_encode(buf, Opcode::Nat8 as i64),
Type::Nat16 => sleb128_encode(buf, Opcode::Nat16 as i64),
Type::Nat32 => sleb128_encode(buf, Opcode::Nat32 as i64),
Type::Nat64 => sleb128_encode(buf, Opcode::Nat64 as i64),
Type::Int8 => sleb128_encode(buf, Opcode::Int8 as i64),
Type::Int16 => sleb128_encode(buf, Opcode::Int16 as i64),
Type::Int32 => sleb128_encode(buf, Opcode::Int32 as i64),
Type::Int64 => sleb128_encode(buf, Opcode::Int64 as i64),
Type::Float32 => sleb128_encode(buf, Opcode::Float32 as i64),
Type::Float64 => sleb128_encode(buf, Opcode::Float64 as i64),
Type::Text => sleb128_encode(buf, Opcode::Text as i64),
Type::Reserved => sleb128_encode(buf, Opcode::Reserved as i64),
Type::Empty => sleb128_encode(buf, Opcode::Empty as i64),
Type::Principal => sleb128_encode(buf, Opcode::Principal as i64),
Type::Knot(id) => {
let ty = types::internal::find_type(*id)
.ok_or_else(|| Error::msg("knot TypeId not found"))?;
let idx = self
.type_map
.get(&ty)
.ok_or_else(|| Error::msg(format!("knot type {:?} not found", ty)))?;
sleb128_encode(buf, i64::from(*idx))
}
Type::Var(_) => {
let idx = self
.type_map
.get(&t)
.ok_or_else(|| Error::msg(format!("var type {:?} not found", t)))?;
sleb128_encode(buf, i64::from(*idx))
}
_ => {
let idx = self
.type_map
.get(&t)
.ok_or_else(|| Error::msg(format!("type {:?} not found", t)))?;
sleb128_encode(buf, i64::from(*idx))
}
}?;
Ok(())
}
fn serialize(&mut self) -> Result<()> {
leb128_encode(&mut self.result, self.type_table.len() as u64)?;
self.result.append(&mut self.type_table.concat());
leb128_encode(&mut self.result, self.args.len() as u64)?;
let mut ty_encode = Vec::new();
for t in self.args.iter() {
self.encode(&mut ty_encode, t)?;
}
self.result.append(&mut ty_encode);
Ok(())
}
}
|
IDLBuilder
|
contribution_summary_report.py
|
# Copyright (c) 2013, [email protected] and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
def execute(filters=None):
columns, data = ["Item:link/Item:200","Total Qty:Float:100","Qty Percent:Percent:100","Total Sales:Currency:200","Sales Percent:Percent:100","Total Cost:Currency:200","Cost Percent:Percent:100"], []
result = frappe.db.sql("""select cr.item_code , sum(cr.amount) , sum(cr.qty),sum(cr.cost)
from `tabContribution Result` cr join `tabContribution Tool` p on cr.parent=p.name
where p.docstatus=1 and (p.date between "{}" and "{}") group by cr.item_code
""".format(filters.get("from_date"),filters.get("to_date")),as_list=1)
total_cost=0
total_qty=0
total_sales=0
for row in result:
total_cost=flt(row[3])
total_qty=flt(row[2])
total_sales=flt(row[1])
|
for row in result:
data.append([row[0],row[1],((flt(row[1])/total_qty)*100),row[2],((flt(row[2])/total_sales)*100),row[3],((flt(row[3])/total_cost)*100)])
return columns, data
|
|
timer.py
|
"""Data manager for the timers."""
from datetime import datetime, timedelta
from typing import Optional, List
import pymongo
from bson import ObjectId
from models import TimerModel, TimerListResult, OID_KEY
from mongodb.factory.results import WriteOutcome
from extutils.checker import arg_type_ensure
from extutils.locales import UTC
from extutils.dt import is_tz_naive, now_utc_aware, make_tz_aware
from JellyBot.systemconfig import Bot
from ._base import BaseCollection
__all__ = ("TimerManager",)
DB_NAME = "timer"
class
|
(BaseCollection):
database_name = DB_NAME
collection_name = "timer"
model_class = TimerModel
def build_indexes(self):
self.create_index(TimerModel.Keyword.key)
self.create_index(TimerModel.DeletionTime.key, expireAfterSeconds=0)
@arg_type_ensure
def add_new_timer(
self, ch_oid: ObjectId, keyword: str, title: str, target_time: datetime, *,
countup: bool = False, period_sec: int = 0) -> WriteOutcome:
"""`target_time` is recommended to be tz-aware. Tzinfo will be forced to be UTC if tz-naive."""
# Force target time to be tz-aware in UTC
if is_tz_naive(target_time):
target_time = make_tz_aware(target_time, UTC.to_tzinfo())
mdl = TimerModel(
ChannelOid=ch_oid, Keyword=keyword, Title=title, TargetTime=target_time,
Countup=countup, PeriodSeconds=period_sec)
if not countup:
mdl.deletion_time = target_time + timedelta(days=Bot.Timer.AutoDeletionDays)
mdl.deletion_time = make_tz_aware(mdl.deletion_time, target_time.tzinfo)
outcome, _ = self.insert_one_model(mdl)
return outcome
@arg_type_ensure
def del_timer(self, timer_oid: ObjectId) -> bool:
"""
Delete the timer by its OID.
:param timer_oid: OID of the timer to be deleted
:return: if the timer was successfully deleted
"""
return self.delete_one({OID_KEY: timer_oid}).deleted_count > 0
@arg_type_ensure
def list_all_timer(self, channel_oid: ObjectId) -> TimerListResult:
"""
List all the timers in the channel ``channel_oid``.
All timers in the returned result will be sorted by its target time (ASC).
:param channel_oid: channel of the timers
:return: a `TimerListResult` containing the timers that match the conditions
"""
return TimerListResult(
self.find_cursor_with_count(
{TimerModel.ChannelOid.key: channel_oid},
sort=[(TimerModel.TargetTime.key, pymongo.ASCENDING)]
)
)
@arg_type_ensure
def get_timers(self, channel_oid: ObjectId, keyword: str) -> TimerListResult:
"""
Get the timers in the channel ``channel_oid`` which keyword ``keyword``.
``keyword`` needs to be an exact match, **NOT** partial match.
All timers in the returned result will be sorted by its target time (ASC).
:param channel_oid: channel of the timers
:param keyword: keyword of the timers
:return: a `TimerListResult` containing the timers that match the conditions
"""
return TimerListResult(
self.find_cursor_with_count(
{TimerModel.Keyword.key: keyword, TimerModel.ChannelOid.key: channel_oid},
sort=[(TimerModel.TargetTime.key, pymongo.ASCENDING)]
)
)
@arg_type_ensure
def get_notify(self, channel_oid: ObjectId, within_secs: Optional[int] = None) -> List[TimerModel]:
"""
Get a list of unnotified timers which will timeup in ``within_secs`` seconds in ``channel_oid``.
Returned timers will be sorted by its target time (ASC).
:param channel_oid: channel of the timers
:param within_secs: timers that will timeup within this amount of seconds will be returned
:return: a list of timers that is not yet notified and will timeup in `within_secs` seconds
"""
now = now_utc_aware()
filter_ = {
TimerModel.ChannelOid.key: channel_oid,
TimerModel.TargetTime.key: {
"$lt": now + timedelta(seconds=within_secs if within_secs else Bot.Timer.MaxNotifyRangeSeconds),
"$gt": now
},
TimerModel.Notified.key: False
}
ret = list(self.find_cursor_with_count(filter_, sort=[(TimerModel.TargetTime.key, pymongo.ASCENDING)]))
self.update_many_async(filter_, {"$set": {TimerModel.Notified.key: True}})
return ret
@arg_type_ensure
def get_time_up(self, channel_oid: ObjectId) -> List[TimerModel]:
"""
Get a list of unnotified timers which timed up in ``channel_oid``.
All timers in the returned result will be sorted by its target time (ASC).
:param channel_oid: channel of the timers
:return: a list of timers that is not yet notified and already timed up
"""
now = now_utc_aware()
filter_ = {
TimerModel.ChannelOid.key: channel_oid,
TimerModel.TargetTime.key: {"$lt": now},
TimerModel.NotifiedExpired.key: False
}
ret = list(self.find_cursor_with_count(filter_, sort=[(TimerModel.TargetTime.key, pymongo.ASCENDING)]))
self.update_many_async(filter_, {"$set": {TimerModel.NotifiedExpired.key: True}})
return ret
@staticmethod
def get_notify_within_secs(message_frequency: float):
"""
Get a time range calculated by ``message_frequency`` which can be used to get the timers for notification.
Calculate formula: **message frequency x 20 + 600**
If the calculated result is greater than ``Bot.Timer.MaxNotifyRangeSeconds``,
then ``Bot.Timer.MaxNotifyRangeSeconds`` will be returned instead.
:param message_frequency: message frequency in seconds per message
:return: time range to be used to get the timers for notification
"""
return min(message_frequency * 20 + 600, Bot.Timer.MaxNotifyRangeSeconds)
TimerManager = _TimerManager()
|
_TimerManager
|
auth.rs
|
use chrono::Utc;
use diesel::PgConnection;
use diesel::prelude::*;
use jsonwebtoken::{Header, Validation};
use jsonwebtoken::{DecodingKey, EncodingKey};
use jsonwebtoken::errors::Result;
use jsonwebtoken::TokenData;
use rocket::http::Status;
use rocket::request::{self, FromRequest, Outcome, Request};
use rocket::response::content::Json;
use rocket::response::status;
use crate::constants;
use crate::database::PostgresDbConn;
use crate::models::response::Response;
use crate::models::user::{LoginInfoDTO, User};
use crate::schema::users::dsl::*;
static ONE_WEEK: i64 = 60 * 60 * 24 * 7; // Number of seconds in a week
/// A token that can be passed in the authentication header of an HTTP request
/// to authenticate a user.
#[derive(Debug, Serialize, Deserialize)]
pub struct UserToken {
// issued at
pub iat: i64,
// expiration
pub exp: i64,
// data
pub id: i32,
// user id
pub username: String,
}
#[rocket::async_trait]
impl<'r> FromRequest<'r> for UserToken {
type Error = status::Custom<Json<Response>>;
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
if let Some(authen_header) = request.headers().get_one("Authorization") {
let authen_str = authen_header.to_string();
if authen_str.starts_with("Bearer") {
let token = authen_str[6..authen_str.len()].trim();
if let Ok(token_data) = decode_token(token.to_string()) {
return Outcome::Success(token_data.claims);
}
}
}
Outcome::Failure((
Status::BadRequest,
status::Custom(
Status::Unauthorized,
Json(Response {
message: String::from(constants::MESSAGE_INVALID_TOKEN),
data: serde_json::to_value("").unwrap(),
}),
),
))
}
}
/// Encodes a token for the given login information as a string.
pub fn
|
(login: LoginInfoDTO) -> String {
let now = Utc::now().timestamp_nanos() / 1_000_000_000; // nanosecond -> second
let payload = UserToken {
iat: now,
exp: now + ONE_WEEK,
id: login.id,
username: login.username,
};
jsonwebtoken::encode(&Header::default(), &payload, &EncodingKey::from_secret(include_bytes!("secret.key"))).unwrap()
}
/// Attempts to decode the given string token into its raw data.
fn decode_token(token: String) -> Result<TokenData<UserToken>> {
jsonwebtoken::decode::<UserToken>(&token, &DecodingKey::from_secret(include_bytes!("secret.key")), &Validation::default())
}
|
generate_token
|
app.js
|
function setFileName() {
const text = document.querySelector('#dropzoneText');
const input = document.querySelector('#image');
if (input.files.length) {
text.innerHTML = input.files[0].name;
} else {
text.innerHTML = `<button type="button" id="upload" class="font-medium text-indigo-600 hover:text-indigo-500 focus:outline-none transition duration-150 ease-in-out">Upload a image</button>or drag and drop`;
}
}
function setupDropzone() {
const dropzone = document.querySelector('#dropzone');
const input = document.querySelector('#image');
dropzone.ondragover = dropzone.ondgragenter = (e) => {
e.preventDefault();
dropzone.classList.replace('border-gray-300', 'border-blue-300');
};
dropzone.ondragleave = () => {
dropzone.classList.replace('border-blue-300', 'border-gray-300');
};
|
e.preventDefault();
dropzone.classList.replace('border-blue-300', 'border-gray-300');
input.files = e.dataTransfer.files;
setFileName();
};
input.onchange = () => {
setFileName();
};
const button = document.querySelector('#upload');
button.onclick = () => {
const input = document.querySelector('#image');
input.click();
};
}
document.body.onload = () => {
setupDropzone();
};
|
dropzone.ondrop = (e) => {
|
repl.go
|
package repl
import (
"bufio"
"fmt"
"github.com/eyasuyuki/puml/lexer"
"github.com/eyasuyuki/puml/token"
"io"
)
const PROMPT = ">> "
func Start(in io.Reader, out io.Writer)
|
{
scanner := bufio.NewScanner(in)
for {
fmt.Fprintf(out, PROMPT)
scanned := scanner.Scan()
if !scanned {
return
}
line := scanner.Text()
l := lexer.New(line)
for tok := l.NextToken(); tok.Type != token.EOF; tok = l.NextToken() {
fmt.Fprintf(out, "%+v\n", tok)
}
}
}
|
|
macros.py
|
# Copyright 2021 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
import sys
import builtins
import importlib
import inspect
import pkgutil
import traceback
from ast import AST
from funcparserlib.parser import NoParseError
from hy._compat import PY3_8
from hy.model_patterns import whole
from hy.models import replace_hy_obj, Expression, Symbol, as_model, is_unpack
from hy.lex import mangle, unmangle
from hy.errors import (HyLanguageError, HyMacroExpansionError, HyTypeError,
HyRequireError)
import hy.compiler
EXTRA_MACROS = ["hy.core.result_macros", "hy.core.macros"]
def macro(name):
"""Decorator to define a macro called `name`.
"""
return lambda fn: install_macro(name, fn, fn)
def pattern_macro(names, pattern, shadow = None):
pattern = whole(pattern)
py_version_required = None
if isinstance(names, tuple):
py_version_required, names = names
def dec(fn):
def wrapper_maker(name):
def wrapper(hy_compiler, *args):
if (shadow and
any(is_unpack("iterable", x) for x in args)):
# Try a shadow function call with this name instead.
return Expression([
Symbol('hy.core.shadow.' + name),
*args]).replace(hy_compiler.this)
expr = hy_compiler.this
root = unmangle(expr[0])
if (py_version_required and
sys.version_info < py_version_required):
raise hy_compiler._syntax_error(expr,
'`{}` requires Python {} or later'.format(
root,
'.'.join(map(str, py_version_required))))
try:
parse_tree = pattern.parse(args)
except NoParseError as e:
raise hy_compiler._syntax_error(
expr[min(e.state.pos + 1, len(expr) - 1)],
"parse error for pattern macro '{}': {}".format(
root, e.msg.replace("<EOF>", "end of form")))
return fn(hy_compiler, expr, root, *parse_tree)
return wrapper
for name in ([names] if isinstance(names, str) else names):
install_macro(name, wrapper_maker(name), fn)
return fn
return dec
def install_macro(name, fn, module_of):
name = mangle(name)
fn = rename_function(fn, name)
(inspect.getmodule(module_of).__dict__
.setdefault('__macros__', {})[name]) = fn
return fn
def _same_modules(source_module, target_module):
"""Compare the filenames associated with the given modules names.
This tries to not actually load the modules.
"""
if not (source_module or target_module):
return False
if target_module == source_module:
return True
def _get_filename(module):
filename = None
try:
if not inspect.ismodule(module):
loader = pkgutil.get_loader(module)
if isinstance(loader, importlib.machinery.SourceFileLoader):
filename = loader.get_filename()
else:
filename = inspect.getfile(module)
except (TypeError, ImportError):
pass
return filename
source_filename = _get_filename(source_module)
target_filename = _get_filename(target_module)
return (source_filename and target_filename and
source_filename == target_filename)
def require(source_module, target_module, assignments, prefix=""):
"""Load macros from one module into the namespace of another.
This function is called from the macro also named `require`.
Parameters
----------
source_module: str or types.ModuleType
The module from which macros are to be imported.
target_module: str, types.ModuleType or None
The module into which the macros will be loaded. If `None`, then
the caller's namespace.
The latter is useful during evaluation of generated AST/bytecode.
assignments: str or list of tuples of strs
The string "ALL" or a list of macro name and alias pairs.
prefix: str, optional ("")
If nonempty, its value is prepended to the name of each imported macro.
This allows one to emulate namespaced macros, like
"mymacromodule.mymacro", which looks like an attribute of a module.
Returns
-------
out: boolean
Whether or not macros were actually transferred.
"""
if target_module is None:
parent_frame = inspect.stack()[1][0]
target_namespace = parent_frame.f_globals
target_module = target_namespace.get('__name__', None)
elif isinstance(target_module, str):
target_module = importlib.import_module(target_module)
target_namespace = target_module.__dict__
elif inspect.ismodule(target_module):
target_namespace = target_module.__dict__
else:
raise HyTypeError('`target_module` is not a recognized type: {}'.format(
type(target_module)))
# Let's do a quick check to make sure the source module isn't actually
# the module being compiled (e.g. when `runpy` executes a module's code
# in `__main__`).
# We use the module's underlying filename for this (when they exist), since
# it's the most "fixed" attribute.
if _same_modules(source_module, target_module):
return False
if not inspect.ismodule(source_module):
try:
if source_module.startswith("."):
source_dirs = source_module.split(".")
target_dirs = (getattr(target_module, "__name__", target_module)
.split("."))
while (len(source_dirs) > 1
and source_dirs[0] == ""
and target_dirs):
source_dirs.pop(0)
target_dirs.pop()
package = ".".join(target_dirs + source_dirs[:-1])
else:
package = None
source_module = importlib.import_module(source_module, package)
except ImportError as e:
raise HyRequireError(e.args[0]).with_traceback(None)
source_macros = source_module.__dict__.setdefault('__macros__', {})
if not source_module.__macros__:
if assignments != "ALL":
for name, alias in assignments:
try:
require(f"{source_module.__name__}.{mangle(name)}",
target_module,
"ALL",
prefix=alias)
except HyRequireError as e:
raise HyRequireError(f"Cannot import name '{name}'"
f" from '{source_module.__name__}'"
f" ({source_module.__file__})")
return True
else:
return False
target_macros = target_namespace.setdefault('__macros__', {})
if prefix:
prefix += "."
if assignments == "ALL":
name_assigns = [(k, k) for k in source_macros.keys()]
else:
name_assigns = assignments
for name, alias in name_assigns:
_name = mangle(name)
alias = mangle('#' + prefix + unmangle(alias)[1:]
if unmangle(alias).startswith('#')
else prefix + alias)
if _name in source_module.__macros__:
target_macros[alias] = source_macros[_name]
else:
raise HyRequireError('Could not require name {} from {}'.format(
_name, source_module))
return True
def load_macros(module):
"""Load the hy builtin macros into module `module_name`,
removing any prior macros set.
It is an error to call this on any module in `hy.core`.
"""
builtin_macros = EXTRA_MACROS
module.__macros__ = {}
for builtin_mod_name in builtin_macros:
builtin_mod = importlib.import_module(builtin_mod_name)
# This may overwrite macros in the module.
if hasattr(builtin_mod, '__macros__'):
module.__macros__.update(getattr(builtin_mod, '__macros__', {}))
class MacroExceptions():
"""wrap non ``HyLanguageError``'s in ``HyMacroExpansionError`` preserving stack trace
used in lieu of ``@contextmanager`` to ensure stack trace contains only internal hy
modules for consistent filtering.
"""
def __init__(self, module, macro_tree, compiler=None):
self.module = module
self.macro_tree = macro_tree
self.compiler = compiler
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type is None:
return True
elif not issubclass(exc_type, HyLanguageError):
if self.compiler:
filename = self.compiler.filename
source = self.compiler.source
else:
filename = None
source = None
exc_msg = ' '.join(traceback.format_exception_only(
sys.exc_info()[0], sys.exc_info()[1]))
msg = "expanding macro {}\n ".format(str(self.macro_tree[0]))
msg += exc_msg
raise HyMacroExpansionError(msg, self.macro_tree, filename, source)
else:
return False
def
|
(tree, module, compiler=None, once=False, result_ok=True):
"""Expand the toplevel macros for the given Hy AST tree.
Load the macros from the given `module`, then expand the (top-level) macros
in `tree` until we no longer can.
`Expression` resulting from macro expansions are assigned the module in
which the macro function is defined (determined using `inspect.getmodule`).
If the resulting `Expression` is itself macro expanded, then the namespace
of the assigned module is checked first for a macro corresponding to the
expression's head/car symbol. If the head/car symbol of such a `Expression`
is not found among the macros of its assigned module's namespace, the
outer-most namespace--e.g. the one given by the `module` parameter--is used
as a fallback.
Parameters
----------
tree: hy.models.Object or list
Hy AST tree.
module: str or types.ModuleType
Module used to determine the local namespace for macros.
compiler: HyASTCompiler, optional
The compiler object passed to expanded macros.
once: boolean, optional
Only expand the first macro in `tree`.
Returns
------
out: hy.models.Object
Returns a mutated tree with macros expanded.
"""
if not inspect.ismodule(module):
module = importlib.import_module(module)
assert not compiler or compiler.module == module
while isinstance(tree, Expression) and tree:
fn = tree[0]
if fn in ("quote", "quasiquote") or not isinstance(fn, Symbol):
break
fn = mangle(fn)
expr_modules = (([] if not hasattr(tree, 'module') else [tree.module])
+ [module])
expr_modules.append(builtins)
# Choose the first namespace with the macro.
m = next((mod.__macros__[fn]
for mod in expr_modules
if fn in getattr(mod, '__macros__', ())),
None)
if not m:
break
with MacroExceptions(module, tree, compiler):
if compiler:
compiler.this = tree
obj = m(compiler, *tree[1:])
if isinstance(obj, (hy.compiler.Result, AST)):
return obj if result_ok else tree
if isinstance(obj, Expression):
obj.module = inspect.getmodule(m)
tree = replace_hy_obj(obj, tree)
if once:
break
tree = as_model(tree)
return tree
def macroexpand_1(tree, module, compiler=None):
"""Expand the toplevel macro from `tree` once, in the context of
`compiler`."""
return macroexpand(tree, module, compiler, once=True)
def rename_function(func, new_name):
"""Creates a copy of a function and [re]sets the name at the code-object
level.
"""
c = func.__code__
new_code = type(c)(*[getattr(c, 'co_{}'.format(a))
if a != 'name' else str(new_name)
for a in code_obj_args])
_fn = type(func)(new_code, func.__globals__, str(new_name),
func.__defaults__, func.__closure__)
_fn.__dict__.update(func.__dict__)
return _fn
code_obj_args = ['argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize',
'flags', 'code', 'consts', 'names', 'varnames', 'filename', 'name',
'firstlineno', 'lnotab', 'freevars', 'cellvars']
if not PY3_8:
code_obj_args.remove("posonlyargcount")
|
macroexpand
|
networks.py
|
"""
General networks for pytorch.
Algorithm-specific networks should go else-where.
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
from rlkit.policies.base import Policy
from rlkit.torch import pytorch_util as ptu
from rlkit.torch.core import PyTorchModule
from rlkit.torch.data_management.normalizer import TorchFixedNormalizer
from rlkit.torch.modules import LayerNorm
import math
def identity(x):
return x
class Mlp(PyTorchModule):
def __init__(
self,
hidden_sizes,
output_size,
input_size,
init_w=3e-3,
hidden_activation=F.relu,
output_activation=identity,
hidden_init=ptu.fanin_init,
b_init_value=0.1,
layer_norm=False,
layer_norm_kwargs=None,
):
self.save_init_params(locals())
super().__init__()
if layer_norm_kwargs is None:
layer_norm_kwargs = dict()
self.input_size = input_size
self.output_size = output_size
self.hidden_sizes = hidden_sizes
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.layer_norm = layer_norm
self.fcs = []
self.layer_norms = []
in_size = input_size
for i, next_size in enumerate(hidden_sizes):
fc = nn.Linear(in_size, next_size)
in_size = next_size
hidden_init(fc.weight)
fc.bias.data.fill_(b_init_value)
self.__setattr__("fc{}".format(i), fc)
self.fcs.append(fc)
if self.layer_norm:
ln = LayerNorm(next_size)
self.__setattr__("layer_norm{}".format(i), ln)
self.layer_norms.append(ln)
self.last_fc = nn.Linear(in_size, output_size)
|
def forward(self, input, return_preactivations=False):
h = input
for i, fc in enumerate(self.fcs):
h = fc(h)
if self.layer_norm and i < len(self.fcs) - 1:
h = self.layer_norms[i](h)
h = self.hidden_activation(h)
preactivation = self.last_fc(h)
output = self.output_activation(preactivation)
if return_preactivations:
return output, preactivation
else:
return output
class FlattenMlp(Mlp):
"""
if there are multiple inputs, concatenate along dim 1
"""
def forward(self, *inputs, **kwargs):
flat_inputs = torch.cat(inputs, dim=1)
return super().forward(flat_inputs, **kwargs)
class MlpPolicy(Mlp, Policy):
"""
A simpler interface for creating policies.
"""
def __init__(
self,
*args,
obs_normalizer: TorchFixedNormalizer = None,
**kwargs
):
self.save_init_params(locals())
super().__init__(*args, **kwargs)
self.obs_normalizer = obs_normalizer
def forward(self, obs, **kwargs):
if self.obs_normalizer:
obs = self.obs_normalizer.normalize(obs)
return super().forward(obs, **kwargs)
def get_action(self, obs_np):
actions = self.get_actions(obs_np[None])
return actions[0, :], {}
def get_actions(self, obs):
return self.eval_np(obs)
class TanhMlpPolicy(MlpPolicy):
"""
A helper class since most policies have a tanh output activation.
"""
def __init__(self, *args, **kwargs):
self.save_init_params(locals())
super().__init__(*args, output_activation=torch.tanh, **kwargs)
class MlpEncoder(FlattenMlp):
'''
encode context via MLP
'''
def reset(self, num_tasks=1):
pass
def forward_seq(self,context):
t,b,_ = context.size()
input = context.view(t*b,-1)
out = self.forward(input)
return out.view(t,b,-1)
class RecurrentEncoder(FlattenMlp):
'''
encode context via recurrent network
'''
def __init__(self,
*args,
**kwargs
):
self.save_init_params(locals())
super().__init__(*args, **kwargs)
self.hidden_dim = self.hidden_sizes[-1]
self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim))
# input should be (task, seq, feat) and hidden should be (task, 1, feat)
self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True)
def forward(self, in_, return_preactivations=False):
# expects inputs of dimension (task, seq, feat)
task, seq, feat = in_.size()
out = in_.view(task * seq, feat)
# embed with MLP
for i, fc in enumerate(self.fcs):
out = fc(out)
out = self.hidden_activation(out)
out = out.view(task, seq, -1)
out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device)))
self.hidden = hn
# take the last hidden state to predict z
out = out[:, -1, :]
# output layer
preactivation = self.last_fc(out)
output = self.output_activation(preactivation)
if return_preactivations:
return output, preactivation
else:
return output
def reset(self, num_tasks=1):
self.hidden = self.hidden.new_full((1, num_tasks, self.hidden_dim), 0)
class RNN(FlattenMlp):
'''
encode context via recurrent network
'''
def __init__(self,
*args,
**kwargs
):
self.save_init_params(locals())
super().__init__(*args, **kwargs)
self.hidden_dim = self.hidden_sizes[-1]
self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim))
# input should be (task, seq, feat) and hidden should be (task, 1, feat)
self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True)
def inner_forward(self, in_, return_preactivations=False):
# expects inputs of dimension (task, seq, feat)
task, seq, feat = in_.size()
out = in_.view(task * seq, feat)
# embed with MLP
for i, fc in enumerate(self.fcs):
out = fc(out)
out = self.hidden_activation(out)
out = out.view(task, seq, -1)
out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device)))
self.hidden = hn
# take the last hidden state to predict z
out = out.contiguous()
out = out.view(task * seq, -1)
# output layer
#preactivation = self.last_fc(out)
#output = self.output_activation(preactivation)
if return_preactivations:
return out, out
else:
return out
def forward(self, in_, return_preactivations=False):
# expects inputs of dimension (task, seq, feat)
task, seq, feat = in_.size()
out = in_.view(task * seq, feat)
# embed with MLP
for i, fc in enumerate(self.fcs):
out = fc(out)
out = self.hidden_activation(out)
out = out.view(task, seq, -1)
out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device)))
self.hidden = hn
# take the last hidden state to predict z
out = out.contiguous()
out = out.view(task * seq, -1)
# output layer
preactivation = self.last_fc(out)
output = self.output_activation(preactivation)
if return_preactivations:
return output, output
else:
return output
def inner_reset(self, num_tasks=1):
self.hidden = self.hidden.new_full((1, num_tasks, self.hidden_dim), 0)
class SnailEncoder(FlattenMlp):
def __init__(self,
input_length,
*args,
**kwargs
):
self.save_init_params(locals())
super().__init__(*args, **kwargs)
self.hidden_dim = self.hidden_sizes[-1]
self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim))
self.input_length = input_length
# input should be (task, seq, feat) and hidden should be (1, task, feat)
#self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True)
layer_count = math.ceil(math.log(input_length)/math.log(2))
self.TC1 = TCBlock(self.hidden_dim,input_length,16)
self.atten1 = AttentionBlock(self.hidden_dim+16*layer_count,32,32)
self.TC2 = TCBlock(self.hidden_dim+16*layer_count+32,input_length,16)
self.atten2 = AttentionBlock(self.hidden_dim+16*layer_count*2+32,32,32)
self.out_layer = nn.Linear(self.hidden_dim+16*layer_count*2+32+32,self.output_size)
self.var_start = int(self.output_size / 2)
def forward(self, in_, return_preactivations=False):
# expects inputs of dimension (task, seq, feat)
task, seq, feat = in_.size()
out = in_.view(task * seq, feat)
# embed with MLP
for i, fc in enumerate(self.fcs):
out = fc(out)
out = self.hidden_activation(out)
out = out.view(task, seq, -1)
out = out.permute(0,2,1)
#print(out.shape)
out = self.TC1(out)
out = self.atten1(out)
out = self.TC2(out)
out = self.atten2(out)
out = out[:, :, -1]
#print('o',out.shape)
# output layer
preactivation = self.out_layer(out)
output = self.output_activation(preactivation)
#temp = F.softplus(output[..., self.var_start:])
#output[..., self.var_start:] = temp
if return_preactivations:
return output, preactivation
else:
return output
def forward_seq(self, in_, return_preactivations=False):
# expects inputs of dimension (task, seq, feat)
task, seq, feat = in_.size()
in_ = in_.contiguous()
out = in_.view(task * seq, feat)
# embed with MLP
for i, fc in enumerate(self.fcs):
out = fc(out)
out = self.hidden_activation(out)
out = out.view(task, seq, -1)
out = out.permute(0,2,1)
#print(out.shape)
out = self.TC1(out)
out = self.atten1(out)
out = self.TC2(out)
out = self.atten2(out)
out = out.permute(0,2,1)
out = out.view(task * seq,-1)
preactivation = self.out_layer(out)
output = self.output_activation(preactivation)
#temp = F.softplus(output[..., self.var_start:])
#output[..., self.var_start:] = temp
#output = output.view(task,seq,-1)
if return_preactivations:
return output, preactivation
else:
return output
def reset(self,num_tasks=1):
return
class MyMlpEncoder(FlattenMlp):
'''
encode context via MLP
'''
def reset(self, num_tasks=1):
pass
def forward_seq(self,context):
t,b,_ = context.size()
input = context.view(t*b,-1)
out = self.forward(input)
return out
def forward(self,context):
t,b,_ = context.size()
input = context.view(t*b,-1)
out = self.forward(input)
return out
class CausalConv1d(nn.Module):
"""A 1D causal convolution layer.
Input: (B, D_in, T), where B is the minibatch size, D_in is the number of
dimensions per step, and T is the number of steps.
Output: (B, D_out, T), where B is the minibatch size, D_out is the number
of dimensions in the output, and T is the number of steps.
Arguments:
in_channels (int): number of input channels
out_channels (int): number of output channels
"""
def __init__(self, in_channels, out_channels, dilation=1):
super(CausalConv1d, self).__init__()
self.padding = dilation
self.causal_conv = nn.Conv1d(
in_channels,
out_channels,
2,
padding = self.padding,
dilation = dilation
)
def forward(self, minibatch):
return self.causal_conv(minibatch)[:, :, :-self.padding]
class DenseBlock(nn.Module):
"""Two parallel 1D causal convolution layers w/tanh and sigmoid activations
Input: (B, D_in, T), where B is the minibatch size, D_in is the number of
dimensions of the input, and T is the number of steps.
Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the
number of dimensions of the input, `F` is the number of filters, and `T`
is the length of the input sequence.
Arguments:
in_channels (int): number of input channels
filters (int): number of filters per channel
"""
def __init__(self, in_channels, filters, dilation=1):
super(DenseBlock, self).__init__()
self.causal_conv1 = CausalConv1d(
in_channels,
filters,
dilation=dilation
)
self.causal_conv2 = CausalConv1d(
in_channels,
filters,
dilation=dilation
)
def forward(self, minibatch):
tanh = F.tanh(self.causal_conv1(minibatch))
sig = F.sigmoid(self.causal_conv2(minibatch))
out = torch.cat([minibatch, tanh*sig], dim=1)
return out
class TCBlock(nn.Module):
"""A stack of DenseBlocks which dilates to desired sequence length
The TCBlock adds `ceil(log_2(seq_len))*filters` channels to the output.
Input: (B, D_in, T), where B is the minibatch size, D_in is the number of
dimensions of the input, and T is the number of steps.
Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the
number of dimensions of the input, `F` is the number of filters, and `T`
is the length of the input sequence.
Arguments:
in_channels (int): channels for the input
seq_len (int): length of the sequence. The number of denseblock layers
is log base 2 of `seq_len`.
filters (int): number of filters per channel
"""
def __init__(self, in_channels, seq_len, filters):
super(TCBlock, self).__init__()
layer_count = math.ceil(math.log(seq_len)/math.log(2))
blocks = []
channel_count = in_channels
for layer in range(layer_count):
block = DenseBlock(channel_count, filters, dilation=2**layer)
blocks.append(block)
channel_count += filters
self.blocks = nn.Sequential(*blocks)
def forward(self, minibatch):
return self.blocks(minibatch)
class AttentionBlock(nn.Module):
"""An attention mechanism similar to Vaswani et al (2017)
The input of the AttentionBlock is `BxDxT` where `B` is the input
minibatch size, `D` is the dimensions of each feature, `T` is the length of
the sequence.
The output of the AttentionBlock is `Bx(D+V)xT` where `V` is the size of the
attention values.
Arguments:
input_dims (int): the number of dimensions (or channels) of each element
in the input sequence
k_size (int): the size of the attention keys
v_size (int): the size of the attention values
"""
def __init__(self, input_dims, k_size, v_size):
super(AttentionBlock, self).__init__()
self.key_layer = nn.Linear(input_dims, k_size)
self.query_layer = nn.Linear(input_dims, k_size)
self.value_layer = nn.Linear(input_dims, v_size)
self.sqrt_k = math.sqrt(k_size)
def forward(self, minibatch):
minibatch = minibatch.permute(0,2,1)
keys = self.key_layer(minibatch)
queries = self.query_layer(minibatch)
values = self.value_layer(minibatch)
logits = torch.bmm(queries, keys.transpose(2,1))
mask = logits.data.new(logits.size(1), logits.size(2)).fill_(1).byte()
mask = torch.triu(mask, 1)
mask = mask.unsqueeze(0).expand_as(logits)
logits.data.masked_fill_(mask, float('-inf'))
probs = F.softmax(logits / self.sqrt_k, dim=2)
read = torch.bmm(probs, values)
return torch.cat([minibatch, read], dim=2).permute(0,2,1)
|
self.last_fc.weight.data.uniform_(-init_w, init_w)
self.last_fc.bias.data.uniform_(-init_w, init_w)
|
main.go
|
//lint:file-ignore U1000 using template
package main
import (
"bufio"
"fmt"
"math"
"math/bits"
"os"
"sort"
"strconv"
"strings"
)
// INF18 は最大値を表す数
const INF18 = int(1e18)
// INF9 は最大値を表す数
const INF9 = int(1e9)
func main() {
a, b, c := nextInt3()
if a*a+b*b < c*c {
fmt.Println("Yes")
} else {
|
func debug(args ...interface{}) {
fmt.Fprintln(os.Stderr, args...)
}
// ==================================================
// 入力操作
// ==================================================
var stdin = initStdin()
func initStdin() *bufio.Scanner {
bufsize := 1 * 1024 * 1024 // 1 MB
var stdin = bufio.NewScanner(os.Stdin)
stdin.Buffer(make([]byte, bufsize), bufsize)
stdin.Split(bufio.ScanWords)
return stdin
}
func nextString() string {
stdin.Scan()
return stdin.Text()
}
// 遅いから極力使わない。
func nextBytes() []byte {
return []byte(nextString())
}
func nextInt() int {
i, _ := strconv.Atoi(nextString())
return i
}
func nextInt2() (int, int) {
return nextInt(), nextInt()
}
func nextInt3() (int, int, int) {
return nextInt(), nextInt(), nextInt()
}
func nextInt4() (int, int, int, int) {
return nextInt(), nextInt(), nextInt(), nextInt()
}
func nextInts(n int) sort.IntSlice {
a := make([]int, n)
for i := 0; i < n; i++ {
a[i] = nextInt()
}
return sort.IntSlice(a)
}
// toi は byteの数値をintに変換します。
func toi(b byte) int {
return int(b - '0')
}
func nextLongIntAsArray() []int {
s := nextString()
l := len(s)
arr := make([]int, l)
for i := 0; i < l; i++ {
arr[i] = toi(s[i])
}
return arr
}
func nextFloat() float64 {
f, _ := strconv.ParseFloat(nextString(), 64)
return f
}
// nextFloatAsInt は 数を 10^base 倍した整数値を取得します。
func nextFloatAsInt(base int) int {
s := nextString()
index := strings.IndexByte(s, '.')
if index == -1 {
n, _ := strconv.Atoi(s)
return n * pow(10, base)
}
for s[len(s)-1] == '0' {
s = s[:len(s)-1]
}
s1, s2 := s[:index], s[index+1:]
n, _ := strconv.Atoi(s1)
m, _ := strconv.Atoi(s2)
return n*pow(10, base) + m*pow(10, base-len(s2))
}
// ==================================================
// 数値操作
// ==================================================
// max は aとbのうち大きい方を返します。
func max(a, b int) int {
if a > b {
return a
}
return b
}
// min は aとbのうち小さい方を返します。
func min(a, b int) int {
if a < b {
return a
}
return b
}
// abs は aの絶対値を返します。
func abs(a int) int {
if a > 0 {
return a
}
return -a
}
// pow は aのb乗を返します。
func pow(a, b int) int {
return int(math.Pow(float64(a), float64(b)))
}
// divceil は a/b の結果を正の無限大に近づけるように丸めて返します。
func divceil(a, b int) int {
if a%b == 0 || a/b < 0 {
return a / b
}
return (a + b - 1) / b
}
// divfloor は a/b の結果を負の無限大に近づけるように丸めて返します。
func divfloor(a, b int) int {
if a%b == 0 || a/b > 0 {
return a / b
}
if b < 0 {
a, b = -a, -b
}
return (a - b + 1) / b
}
// powmod は (x^n) mod m を返します。
func powmod(x, n, m int) int {
ans := 1
for n > 0 {
if n%2 == 1 {
ans = (ans * x) % m
}
x = (x * x) % m
n /= 2
}
return ans
}
// binarysearch は judgeがtrueを返す最小の数値を返します。
func binarysearch(ok, ng int, judge func(int) bool) int {
for abs(ok-ng) > 1 {
mid := (ok + ng) / 2
if judge(mid) {
ok = mid
} else {
ng = mid
}
}
return ok
}
// ch は condがtrueのときok, falseのときngを返します。
func ch(cond bool, ok, ng int) int {
if cond {
return ok
}
return ng
}
func mul(a, b int) (int, int) {
if a < 0 {
a, b = -a, -b
}
if a == 0 || b == 0 {
return 0, 0
} else if a > 0 && b > 0 && a > math.MaxInt64/b {
return 0, +1
} else if a > math.MinInt64/b {
return 0, -1
}
return a * b, 0
}
// ==================================================
// ビット操作
// ==================================================
// nthbit はaのn番目のビットを返します。
func nthbit(a int, n int) int { return int((a >> uint(n)) & 1) }
// popcount はaのうち立っているビットを数えて返します。
func popcount(a int) int {
return bits.OnesCount(uint(a))
}
func xor(a, b bool) bool { return a != b }
// ==================================================
// 文字列操作
// ==================================================
// toLowerCase は sをすべて小文字にした文字列を返します。
func toLowerCase(s string) string {
return strings.ToLower(s)
}
// toUpperCase は sをすべて大文字にした文字列を返します。
func toUpperCase(s string) string {
return strings.ToUpper(s)
}
// isLower はbが小文字かどうかを判定します
func isLower(b byte) bool {
return 'a' <= b && b <= 'z'
}
// isUpper はbが大文字かどうかを判定します
func isUpper(b byte) bool {
return 'A' <= b && b <= 'Z'
}
// ==================================================
// 配列
// ==================================================0
func reverse(arr *[]interface{}) {
for i, j := 0, len(*arr)-1; i < j; i, j = i+1, j-1 {
(*arr)[i], (*arr)[j] = (*arr)[j], (*arr)[i]
}
}
func reverseInt(arr *[]int) {
for i, j := 0, len(*arr)-1; i < j; i, j = i+1, j-1 {
(*arr)[i], (*arr)[j] = (*arr)[j], (*arr)[i]
}
}
func uniqueInt(arr []int) []int {
hist := map[int]bool{}
j := 0
for i := 0; i < len(arr); i++ {
if hist[arr[i]] {
continue
}
a := arr[i]
arr[j] = a
hist[a] = true
j++
}
return arr[:j]
}
// ==================================================
// 構造体
// ==================================================
// Point は 座標を表す構造体です。
type Point struct {
x int
y int
}
// Pointf は座標を表す構造体です。
type Pointf struct {
x float64
y float64
}
|
fmt.Println("No")
}
}
|
emailme.js
|
import React, { Component } from 'react';
export default class
|
extends Component {
render() {
return(
<div className="email-container">
<p>Hello there!</p>
<p>First of all, thank you so much for visiting!</p>
<p>The goal of Fight The Bias is to bring awareness to passive racial bias.
All information on this website from any platform including pictures,
videos, and articles are factual and backed up by peer review and scientific
study.</p>
<p>If you have any information to contribute, please email <span>[email protected]</span> with sources so we can help change the world together.</p>
</div>
);
}
}
|
EmailMe
|
i2c.rs
|
#![no_std]
#![no_main]
use core::panic::PanicInfo;
use embedded_graphics::{
fonts::{Font8x16, Text},
pixelcolor::BinaryColor,
prelude::*,
style::TextStyle,
};
use embedded_hal::blocking::i2c::{Write, WriteRead};
use esp32_hal::{
clock_control::{self, sleep, CPUSource, ClockControl},
delay::Delay,
dport::Split,
dprintln,
i2c::{self, Error, I2C},
prelude::*,
target::{Peripherals, I2C0},
timer::Timer,
};
use mpu6050::Mpu6050;
use ssd1306::{prelude::*, Builder};
use xtensa_lx::mutex::SpinLockMutex;
#[entry]
fn main() -> ! {
let dp = Peripherals::take().unwrap();
let (mut dport, dport_clock_control) = dp.DPORT.split();
// setup clocks & watchdog
let mut clkcntrl = ClockControl::new(
dp.RTCCNTL,
dp.APB_CTRL,
dport_clock_control,
clock_control::XTAL_FREQUENCY_AUTO,
)
.unwrap();
// set desired clock frequencies
clkcntrl
.set_cpu_frequencies(
CPUSource::PLL,
80.MHz(),
CPUSource::PLL,
240.MHz(),
CPUSource::PLL,
80.MHz(),
)
.unwrap();
// disable RTC watchdog
let (clkcntrl_config, mut watchdog) = clkcntrl.freeze().unwrap();
watchdog.disable();
// disable MST watchdogs
let (.., mut watchdog0) = Timer::new(dp.TIMG0, clkcntrl_config);
let (.., mut watchdog1) = Timer::new(dp.TIMG1, clkcntrl_config);
watchdog0.disable();
watchdog1.disable();
let pins = dp.GPIO.split();
let i2c0 = i2c::I2C::new(
dp.I2C0,
i2c::Pins {
sda: pins.gpio4,
scl: pins.gpio15,
},
400_000,
&mut dport,
);
let i2c0 = SpinLockMutex::new(i2c0);
// Display
let mut display = {
let i2c_wrapper = I2CWrapper::new(&i2c0);
let mut display: GraphicsMode<_> = Builder::new().connect_i2c(i2c_wrapper).into();
let mut rst = pins.gpio16.into_push_pull_output();
rst.set_low().unwrap();
sleep(10.ms());
rst.set_high().unwrap();
display.init().unwrap();
display.clear();
display.flush().unwrap();
display
};
// IMU
let mut imu = {
let i2c_wrapper = I2CWrapper::new(&i2c0);
let mut imu = Mpu6050::new(i2c_wrapper);
let mut delay = Delay::new();
imu.init(&mut delay).unwrap();
imu
};
Text::new("Hello world!", Point::new(2, 28))
.into_styled(TextStyle::new(Font8x16, BinaryColor::On))
.draw(&mut display)
.unwrap();
display.flush().unwrap();
sleep(3.s());
loop {
let temp = imu.get_temp().unwrap();
let gyro = imu.get_gyro().unwrap();
let acc = imu.get_acc().unwrap();
dprintln!("temp: {}, gyro: {:?}, acc: {:?}", temp, gyro, acc);
sleep(1.s());
}
}
struct I2CWrapper<'a> {
i2c: &'a SpinLockMutex<I2C<I2C0>>,
}
impl<'a> I2CWrapper<'a> {
fn new(i2c: &'a SpinLockMutex<I2C<I2C0>>) -> Self {
Self { i2c }
}
}
impl<'a> Write for I2CWrapper<'a> {
type Error = Error;
fn write(&mut self, addr: u8, bytes: &[u8]) -> Result<(), Self::Error> {
self.i2c.lock(|x| x.write(addr, bytes))
}
}
impl<'a> WriteRead for I2CWrapper<'a> {
type Error = Error;
fn write_read(
&mut self,
address: u8,
|
}
}
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
dprintln!("----- PANIC -----");
dprintln!("{:?}", info);
loop {}
}
|
bytes: &[u8],
buffer: &mut [u8],
) -> Result<(), Self::Error> {
self.i2c.lock(|x| x.write_read(address, bytes, buffer))
|
bitcoin_zh_TW.ts
|
<?xml version="1.0" ?><!DOCTYPE TS><TS language="zh_TW" version="2.1">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About EMC</source>
<translation>關于黑幣</translation>
</message>
<message>
<location line="+39"/>
<source><b>EMC</b> version</source>
<translation><b>黑幣客戶端</b> 版本</translation>
</message>
<message>
<location line="+41"/>
<source>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The EMC developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>
This is experimental software.
Distributed under the MIT software license, see the accompanying file COPYING or <a href="http://www.opensource.org/licenses/mit-license.php">http://www.opensource.org/licenses/mit-license.php</a>.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (<a href="https://www.openssl.org/">https://www.openssl.org/</a>) and cryptographic software written by Eric Young (<a href="mailto:[email protected]">[email protected]</a>) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>地址簿</translation>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation>按兩下來編輯位址或標記</translation>
</message>
<message>
<location line="+24"/>
<source>Create a new address</source>
<translation>製造新的位址</translation>
</message>
<message>
<location line="+10"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>複製目前選擇的位址到系統剪貼簿</translation>
</message>
<message>
<location line="-7"/>
<source>&New Address</source>
<translation>新建地址(&N)</translation>
</message>
<message>
<location line="-43"/>
<source>These are your EMC addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>這是您用來接收支付的黑幣地址列表。爲不同的支付方建立不同的地址以便于了解支付來源。</translation>
</message>
<message>
<location line="+53"/>
<source>&Copy Address</source>
<translation>複製地址(&C)</translation>
</message>
<message>
<location line="+7"/>
<source>Show &QR Code</source>
<translation>顯示二維碼(&Q)</translation>
</message>
<message>
<location line="+7"/>
<source>Sign a message to prove you own a EMC address</source>
<translation>對信息進行簽名以證明您對該黑幣地址的所有權</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>簽署訊息(&M)</translation>
</message>
<message>
<location line="+17"/>
<source>Delete the currently selected address from the list</source>
<translation>把目前選擇的位址從列表中刪掉</translation>
</message>
<message>
<location line="-10"/>
<source>Verify a message to ensure it was signed with a specified EMC address</source>
<translation>驗證信息以保證其經過指定黑幣地址的簽名</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>驗證訊息(&V)</translation>
</message>
<message>
<location line="+10"/>
<source>&Delete</source>
<translation>刪掉(&D)</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+65"/>
<source>Copy &Label</source>
<translation>複製標記</translation>
</message>
<message>
<location line="+2"/>
<source>&Edit</source>
<translation>編輯</translation>
</message>
<message>
<location line="+250"/>
<source>Export Address Book Data</source>
<translation>導出地址簿數據</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>逗號分隔資料檔(*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>導出時發生錯誤</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>無法寫入文件 %1 。</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+145"/>
<source>Label</source>
<translation>標記</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>位址</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(無標記)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>密碼對話視窗</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>請輸入密碼</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>新密碼</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>重複新密碼</translation>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation>在系統允許的情況下用于防止sendmoney欺詐,並未提供真正的安全防護措施。</translation>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation>仅用于权益增值</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+38"/>
<source>Encrypt wallet</source>
<translation>加密錢包</translation>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>這個動作需要你的錢包密碼來解鎖錢包。</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>解鎖錢包</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>這個動作需要你的錢包密碼來把錢包解密。</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>解密錢包</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>改變密碼</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>請輸入錢包的舊密碼及新密碼。</translation>
</message>
<message>
<location line="+45"/>
<source>Confirm wallet encryption</source>
<translation>確認錢包加密</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation>警告:如果您丟失了加密該錢包的密碼,其中所有的黑幣將會丟失!</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>你確定要把錢包加密嗎?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>重要: 請改用新產生有加密的錢包檔,來取代舊錢包檔的備份。為了安全性的理由,當你開始使用新的有加密的錢包後,舊錢包檔的備份就不能再使用了。</translation>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>警告: 大寫字母鎖定作用中!</translation>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation>錢包已加密</translation>
</message>
<message>
<location line="-140"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>ten or more random characters</b>, or <b>eight or more words</b>.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<source>EMC will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation>黑幣客戶端即將關閉以完成加密過程。請記住,加密錢包並不能完全防止您的電子貨幣被入侵您計算機的木馬軟件盜竊。</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>錢包加密失敗</translation>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>因為內部錯誤導致錢包加密失敗。你的錢包還是沒加密。</translation>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation>提供的密碼不一樣。</translation>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation>錢包解鎖失敗</translation>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>輸入要用來解密錢包的密碼不對。</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>錢包解密失敗</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>錢包密碼改成功了。</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+297"/>
<source>Sign &message...</source>
<translation>簽署訊息...</translation>
</message>
<message>
<location line="-64"/>
<source>Show general overview of wallet</source>
<translation>顯示錢包一般總覽</translation>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>交易</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>瀏覽交易紀錄</translation>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation>地址簿(&A)</translation>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation>管理已儲存的地址和標簽</translation>
</message>
<message>
<location line="-18"/>
<source>Show the list of addresses for receiving payments</source>
<translation>顯示用于接收支付的地址列表 </translation>
</message>
<message>
<location line="+34"/>
<source>E&xit</source>
<translation>結束</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>結束應用程式</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about EMC</source>
<translation>關于黑幣 </translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>關於 &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>顯示 Qt 相關資訊</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>選項...</translation>
</message>
<message>
<location line="+4"/>
<source>&Encrypt Wallet...</source>
<translation>加密錢包...</translation>
</message>
<message>
<location line="+2"/>
<source>&Backup Wallet...</source>
<translation>備份錢包...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>改變密碼...</translation>
</message>
<message>
<location line="+9"/>
<source>&Export...</source>
<translation>導出(&E)</translation>
</message>
<message>
<location line="-55"/>
<source>Send coins to a EMC address</source>
<translation>向指定的地址發送黑幣</translation>
</message>
<message>
<location line="+39"/>
<source>Modify configuration options for EMC</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-13"/>
<source>Encrypt or decrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Backup wallet to another location</source>
<translation>把錢包備份到其它地方</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>改變錢包加密用的密碼</translation>
</message>
<message>
<location line="+10"/>
<source>&Debug window</source>
<translation>除錯視窗</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>開啓除錯和診斷主控台</translation>
</message>
<message>
<location line="-5"/>
<source>&Verify message...</source>
<translation>驗證訊息...</translation>
</message>
<message>
<location line="-214"/>
<location line="+551"/>
<source>EMC</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-551"/>
<source>Wallet</source>
<translation>錢包</translation>
</message>
<message>
<location line="+193"/>
<source>&About EMC</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>顯示或隱藏</translation>
</message>
<message>
<location line="+8"/>
<source>Unlock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>檔案</translation>
</message>
<message>
<location line="+8"/>
<source>&Settings</source>
<translation>設定</translation>
</message>
<message>
<location line="+8"/>
<source>&Help</source>
<translation>說明</translation>
</message>
<message>
<location line="+17"/>
<source>Tabs toolbar</source>
<translation>分頁工具列</translation>
</message>
<message>
<location line="+46"/>
<location line="+9"/>
<source>[testnet]</source>
<translation>[測試網絡]</translation>
</message>
<message>
<location line="+0"/>
<location line="+58"/>
<source>EMC client</source>
<translation>黑幣客戶端</translation>
</message>
<message numerus="yes">
<location line="+70"/>
<source>%n active connection(s) to EMC network</source>
<translation><numerusform>與黑幣網絡建立了 %n 個連接</numerusform></translation>
</message>
<message>
<location line="+488"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation>權益增值中 <br>您的權重爲 %1 <br>網絡總權重爲 %2<br>預計將在 %3 之後獲得收益</translation>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-808"/>
<source>&Dashboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Receive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>&Unlock Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+273"/>
<source>Up to date</source>
<translation>最新狀態</translation>
</message>
<message>
<location line="+43"/>
<source>Catching up...</source>
<translation>正在趕進度...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation>付款交易</translation>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation>收款交易</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>日期: %1
金額: %2
種類: %3
位址: %4
</translation>
</message>
<message>
<location line="+100"/>
<location line="+15"/>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid EMC address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Wallet is <b>not encrypted</b></source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>錢包<b>已加密</b>並且<b>解鎖中</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>錢包<b>已加密</b>並且<b>上鎖中</b></translation>
</message>
<message>
<location line="+24"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+91"/>
<source>%n second(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="-429"/>
<location line="+433"/>
<source>%n hour(s)</source>
<translation><numerusform>%n 個小時</numerusform></translation>
</message>
<message>
<location line="-456"/>
<source>Processed %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+27"/>
<location line="+433"/>
<source>%n day(s)</source>
<translation><numerusform>%n 天</numerusform></translation>
</message>
<message numerus="yes">
<location line="-429"/>
<location line="+6"/>
<source>%n week(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+0"/>
<source>%1 and %2</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+0"/>
<source>%n year(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+69"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+324"/>
<source>Not staking</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="+104"/>
<source>A fatal error occurred. EMC can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+110"/>
<source>Network Alert</source>
<translation>網路警報</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation>數目:</translation>
</message>
<message>
<location line="+32"/>
<source>Bytes:</source>
<translation>位元組數:</translation>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation>金額:</translation>
</message>
<message>
<location line="+32"/>
<source>Priority:</source>
<translation>優先度:</translation>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation>手續費:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation>低輸出:</translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+552"/>
<source>no</source>
<translation>否</translation>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation>計費後金額:</translation>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation>找零金額:</translation>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation>全選或全不選</translation>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation>樹狀模式</translation>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation>列表模式</translation>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation>金額</translation>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>位址</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation>確認次數</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation>已確定</translation>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation>優先度</translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-515"/>
<source>Copy address</source>
<translation>複製位址</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>複製標記</translation>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation>複製金額</translation>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation>複製交易識別碼</translation>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation>複製數目</translation>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation>複製手續費</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>複製計費後金額</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>複製位元組數</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>複製優先度</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation>複製低輸出</translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>複製找零金額</translation>
</message>
<message>
<location line="+317"/>
<source>highest</source>
<translation>最高</translation>
</message>
<message>
<location line="+1"/>
<source>high</source>
<translation>高</translation>
</message>
<message>
<location line="+1"/>
<source>medium-high</source>
<translation>中高</translation>
</message>
<message>
<location line="+1"/>
<source>medium</source>
<translation>中等</translation>
</message>
<message>
<location line="+4"/>
<source>low-medium</source>
<translation>中低</translation>
</message>
<message>
<location line="+1"/>
<source>low</source>
<translation>低</translation>
</message>
<message>
<location line="+1"/>
<source>lowest</source>
<translation>最低</translation>
</message>
<message>
<location line="+155"/>
<source>DUST</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation>是</translation>
</message>
<message>
<location line="+10"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transactions with higher priority get more likely into a block.
This label turns red, if the priority is smaller than "medium".
This means a fee of at least %1 per kb is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+36"/>
<location line="+66"/>
<source>(no label)</source>
<translation>(無標記)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation>找零前是 %1 (%2)</translation>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation>(找零)</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>編輯位址</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>標記</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>位址</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>造新的收款位址</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>造新的付款位址</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>編輯收款位址</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>編輯付款位址</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>輸入的位址 %1 在位址簿中已經有了。</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid EMC address.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>沒辦法把錢包解鎖。</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>產生新的密鑰失敗了。</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+426"/>
<location line="+12"/>
<source>EMC-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>選項</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>主要</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>付交易手續費</translation>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start EMC after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start EMC on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation>網路</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the EMC client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>用 &UPnP 設定通訊埠對應</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the EMC network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>代理位址:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>埠號:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>代理伺服器的通訊埠(像是 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>SOCKS 版本:</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>代理伺服器的 SOCKS 協定版本(像是 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>視窗</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>視窗縮到最小後只在通知區域顯示圖示。</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>縮到最小到通知區域而不是工作列</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>當視窗關閉時,把應用程式縮到最小,而不是結束。當勾選這個選項時,只能夠用選單中的結束來關掉應用程式。</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>關閉時縮到最小</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>顯示</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>使用界面語言:</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting EMC.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>金額顯示單位:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>選擇操作界面和付款時,預設顯示金額的細分單位。</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show coin control features or not.</source>
<translation>是否要顯示錢幣控制功能。</translation>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Whether to select the coin outputs randomly or with minimal coin age.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Minimize weight consumption (experimental)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use black visual theme (requires restart)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>好</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>取消</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation>預設值</translation>
</message>
<message>
<location line="+149"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting EMC.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>提供的代理伺服器位址無效。</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>表單</translation>
</message>
<message>
<location line="+46"/>
<location line="+247"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the EMC network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-173"/>
<source>Stake:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Unconfirmed:</source>
<translation>未確定金額:</translation>
</message>
<message>
<location line="-113"/>
<source>Wallet</source>
<translation>錢包</translation>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation>目前可用餘額</translation>
</message>
<message>
<location line="+80"/>
<source>Immature:</source>
<translation>未成熟金額:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>還沒成熟的開採金額</translation>
</message>
<message>
<location line="+23"/>
<source>Total:</source>
<translation>總金額:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation>目前全部餘額</translation>
</message>
<message>
<location line="+50"/>
<source><b>Recent transactions</b></source>
<translation><b>最近交易</b></translation>
</message>
<message>
<location line="-118"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-32"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>還沒同步</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start transfer: click-to-pay handler</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>客戶端軟體名稱</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<source>N/A</source>
<translation>未知</translation>
</message>
<message>
<location line="-194"/>
<source>Client version</source>
<translation>客戶端軟體版本</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>資訊</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>使用的 OpenSSL 版本</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>啓動時間</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>網路</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>連線數</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>區塊鏈</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>目前區塊數</translation>
</message>
<message>
<location line="+197"/>
<source>&Network Traffic</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+52"/>
<source>&Clear</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Totals</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<source>In:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+80"/>
<source>Out:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-383"/>
<source>Last block time</source>
<translation>最近區塊時間</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>開啓</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the EMC-Qt help message to get a list with possible EMC command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>主控台</translation>
</message>
<message>
<location line="-237"/>
<source>Build date</source>
<translation>建置日期</translation>
</message>
<message>
<location line="-104"/>
<source>EMC - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>EMC Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+256"/>
<source>Debug log file</source>
<translation>除錯紀錄檔</translation>
</message>
<message>
<location line="+7"/>
<source>Open the EMC debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>清主控台</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="+325"/>
<source>Welcome to the EMC RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>請用上下游標鍵來瀏覽先前指令的紀錄,並用 <b>Ctrl-L</b> 來清理畫面。</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>請打 <b>help</b> 來看可用指令的簡介。</translation>
</message>
<message>
<location line="+127"/>
<source>%1 B</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 KB</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 MB</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 GB</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>%1 m</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>%1 h</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 h %2 m</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+181"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>付款</translation>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation>錢幣控制功能</translation>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation>輸入...</translation>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation>自動選擇</translation>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation>累計金額不足!</translation>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation>數目:</translation>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation>位元組數:</translation>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation>金額:</translation>
</message>
<message>
<location line="+22"/>
<location line="+86"/>
<location line="+86"/>
<location line="+32"/>
<source>0.00 EMC</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-191"/>
<source>Priority:</source>
<translation>優先度:</translation>
</message>
<message>
<location line="+19"/>
<source>medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Fee:</source>
<translation>手續費:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation>低輸出:</translation>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation>計費後金額:</translation>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation>一次付給多個收款人</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>增加收款人</translation>
</message>
<message>
<location line="+16"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>全部清掉</translation>
</message>
<message>
<location line="+24"/>
<source>Balance:</source>
<translation>餘額:</translation>
</message>
<message>
<location line="+16"/>
<source>123.456 EMC</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>確認付款動作</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>付款</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-173"/>
<source>Enter a EMC address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation>複製數目</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>複製金額</translation>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation>複製手續費</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>複製計費後金額</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>複製位元組數</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>複製優先度</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation>複製低輸出</translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>複製找零金額</translation>
</message>
<message>
<location line="+86"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>確認付款金額</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>收款位址無效,請再檢查看看。</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>付款金額必須大於零。</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>金額超過餘額了。</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>包含 %1 的交易手續費後,總金額超過你的餘額了。</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>發現有重複的位址。每個付款動作中,只能付給個別的位址一次。</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+247"/>
<source>WARNING: Invalid EMC address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(無標記)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>金額:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>付給:</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>請輸入這個位址的標記來把它加進位址簿中</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>標記:</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>貼上剪貼簿裡的位址</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a EMC address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>簽章 - 簽署或驗證訊息</translation>
</message>
<message>
<location line="+13"/>
<location line="+124"/>
<source>&Sign Message</source>
<translation>簽署訊息</translation>
</message>
<message>
<location line="-118"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>你可以用自己的位址簽署訊息,來證明你對位址的所有權。但是請小心,不要簽署語意含糊不清的內容,因為釣魚式詐騙可能會用騙你簽署的手法來冒充是你。只有在語句中的細節你都同意時才簽署。</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+203"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-193"/>
<location line="+203"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-193"/>
<source>Paste address from clipboard</source>
<translation>貼上剪貼簿裡的位址</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>請在這裡輸入你想簽署的訊息</translation>
</message>
<message>
<location line="+24"/>
<source>Copy the current signature to the system clipboard</source>
<translation>複製目前的簽章到系統剪貼簿</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this EMC address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation>重置所有訊息簽署欄位</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>全部清掉</translation>
</message>
<message>
<location line="-87"/>
<location line="+70"/>
<source>&Verify Message</source>
<translation>驗證訊息</translation>
</message>
<message>
<location line="-64"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>請在下面輸入簽署的位址,訊息(請確定完整複製了所包含的換行,空格,跳位符號等等),以及簽章,來驗證這個訊息。請小心,除了訊息內容以外,不要對簽章本身過度解讀,以避免被用「中間人攻擊法」詐騙。</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified EMC address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation>重置所有訊息驗證欄位</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a EMC address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>請按一下「簽署訊息」來產生簽章</translation>
</message>
<message>
<location line="+3"/>
<source>Enter EMC signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>輸入的位址無效。</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>請檢查位址是否正確後再試一次。</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>輸入的位址沒有對應到你的任何密鑰。</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>錢包解鎖已取消。</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>沒有對應輸入位址的密鑰。</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>訊息簽署失敗。</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>訊息簽署好了。</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>沒辦法把這個簽章解碼。</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>請檢查簽章是否正確後再試一次。</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>這個簽章跟訊息的數位摘要不符。</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>訊息驗證失敗。</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>訊息驗證沒錯。</translation>
</message>
</context>
<context>
<name>TrafficGraphWidget</name>
<message>
<location filename="../trafficgraphwidget.cpp" line="+75"/>
<source>KB/s</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+25"/>
<source>Open until %1</source>
<translation>要到 %1 才確定</translation>
</message>
<message>
<location line="+6"/>
<source>conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation>%1/離線中</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/未確定</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 次確認</translation>
</message>
<message>
<location line="+17"/>
<source>Status</source>
<translation>狀態</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>,已公告給 %n 個節點</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>來源</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>生產出來</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>來源</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>目的</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>自己的位址</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>標記</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>入帳</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>再等 %n 個區塊生出來後成熟</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>不被接受</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>出帳</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>交易手續費</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>淨額</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>訊息</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>附註</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>交易識別碼</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>除錯資訊</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>交易</translation>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation>輸入</translation>
</message>
<message>
<location line="+21"/>
<source>Amount</source>
<translation>金額</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>是</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>否</translation>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>,還沒成功公告出去</translation>
</message>
<message numerus="yes">
<location line="-36"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform></translation>
</message>
<message>
<location line="+71"/>
<source>unknown</source>
<translation>未知</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>交易明細</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>這個版面顯示這次交易的詳細說明</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+231"/>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>種類</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>位址</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>金額</translation>
</message>
<message>
<location line="+52"/>
<source>Open until %1</source>
<translation>要到 %1 才確定</translation>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation>已確定(%1 次確認)</translation>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>再等 %n 個區塊生出來後才確定</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation>未成熟(%1 次確認,會在 %2 次確認後可用)</translation>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>沒有其他節點收到這個區塊,也許它不會被接受!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>生產出來但是不被接受</translation>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation>收款在</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>收款自</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>付款給</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>付給自己</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>開採所得</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(不適用)</translation>
</message>
<message>
<location line="+194"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>交易狀態。把游標停在欄位上會顯示確認次數。</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>收到交易的日期和時間。</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>交易的種類。</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>交易的目的地位址。</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>要減掉或加進餘額的金額。</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+54"/>
<location line="+17"/>
<source>All</source>
<translation>全部</translation>
</message>
<message>
<location line="-16"/>
<source>Today</source>
<translation>今天</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>這星期</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>這個月</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>上個月</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>今年</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>指定範圍...</translation>
</message>
<message>
<location line="+12"/>
<source>Received with</source>
<translation>收款</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>付款</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>給自己</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>開採所得</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>其它</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>請輸入要搜尋的位址或標記</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>最小金額</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>複製位址</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>複製標記</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>複製金額</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>複製交易識別碼</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>編輯標記</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>顯示交易明細</translation>
</message>
<message>
<location line="+138"/>
<source>Export Transaction Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>逗點分隔資料檔(*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>已確定</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>日期</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>種類</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>標記</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>位址</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>金額</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>識別碼</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>範圍:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>到</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+208"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+173"/>
<source>EMC version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation>用法:</translation>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or emcd</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation>列出指令</translation>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation>取得指令說明</translation>
</message>
<message>
<location line="-147"/>
<source>Options:</source>
<translation>選項:</translation>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: emc.conf)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: emcd.pid)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation>指定錢包檔(會在資料目錄中)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>指定資料目錄</translation>
</message>
<message>
<location line="-25"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=emcrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "EMC Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>設定資料庫快取大小成多少百萬位元組(MB;預設值: 25)</translation>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Listen for connections on <port> (default: 15714 or testnet: 25714)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>維持連線節點數的上限為 <n> 個(預設值: 125)</translation>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>連線到某個節點來取得其它節點的位址,然後斷線</translation>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation>指定自己的公開位址</translation>
</message>
<message>
<location line="+4"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Always query for peer addresses via DNS lookup (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>把異常節點斷線的臨界值(預設值: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>拒絕跟異常節點連線的秒數(預設值: 86400)</translation>
</message>
<message>
<location line="-37"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>設定在 IPv4 網路上以通訊埠 %u 聽取 RPC 連線時發生錯誤: %s</translation>
</message>
<message>
<location line="+65"/>
<source>Listen for JSON-RPC connections on <port> (default: 15715 or testnet: 25715)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-17"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>接受指令列和 JSON-RPC 指令
</translation>
</message>
<message>
<location line="+1"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>用護靈模式在背後執行並接受指令</translation>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation>使用測試網路</translation>
</message>
<message>
<location line="-24"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>是否接受外來連線(預設值: 當沒有 -proxy 或 -connect 時為 1)</translation>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>設定在 IPv6 網路上以通訊埠 %u 聽候 RPC 連線失敗,退而改用 IPv4 網路: %s</translation>
</message>
<message>
<location line="+96"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>警告: -paytxfee 設定了很高的金額!這可是你交易付款所要付的手續費。</translation>
</message>
<message>
<location line="-103"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong EMC will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+132"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>警告: 讀取錢包檔 wallet.dat 時發生錯誤!所有的密鑰都正確讀取了,但是交易資料或位址簿資料可能會缺少或不正確。</translation>
</message>
<message>
<location line="-18"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>警告: 錢包檔 wallet.dat 壞掉,但資料被拯救回來了!原來的 wallet.dat 會改儲存在 %s, 檔名是 wallet.{timestamp}.bak. 如果餘額或交易資料有誤,你應該要用備份資料復原回來。</translation>
</message>
<message>
<location line="-31"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>嘗試從壞掉的錢包檔 wallet.dat 復原密鑰</translation>
</message>
<message>
<location line="+5"/>
<source>Block creation options:</source>
<translation>區塊製造選項:</translation>
</message>
<message>
<location line="-69"/>
<source>Connect only to the specified node(s)</source>
<translation>只連線到指定節點(可多個)</translation>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>找出自己的網際網路位址(預設值: 當有聽候連線且沒有 -externalip 時為 1)</translation>
</message>
<message>
<location line="+101"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>在任意的通訊埠聽候失敗。如果你希望這樣的話,可以設定 -listen=0.</translation>
</message>
<message>
<location line="-91"/>
<source>Sync checkpoints policy (default: strict)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+89"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation type="unfinished"/>
</message>
<message>
<location line="-88"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>每個連線的接收緩衝區大小上限為 <n>*1000 個位元組(預設值: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>每個連線的傳送緩衝區大小上限為 <n>*1000 位元組(預設值: 1000)</translation>
</message>
<message>
<location line="-17"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>只和 <net> 網路上的節點連線(IPv4, IPv6, 或 Tor)</translation>
</message>
<message>
<location line="+31"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+41"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation>SSL 選項: (SSL 設定程序請見 Bitcoin Wiki)</translation>
</message>
<message>
<location line="-81"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>在終端機顯示追蹤或除錯資訊,而不是寫到檔案 debug.log 中</translation>
</message>
<message>
<location line="+5"/>
<source>Send trace/debug info to debugger</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+30"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>設定區塊大小下限成多少位元組(預設值: 0)</translation>
</message>
<message>
<location line="-35"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>客戶端軟體啓動時把 debug.log 檔縮小(預設值: 當沒有 -debug 時為 1)</translation>
</message>
<message>
<location line="-43"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>指定連線在幾毫秒後逾時(預設值: 5000)</translation>
</message>
<message>
<location line="+116"/>
<source>Unable to sign checkpoint, wrong checkpointkey?
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-86"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>是否要使用通用即插即用(UPnP)協定,來設定聽候連線的通訊埠的對應(預設值: 0)</translation>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>是否要使用通用即插即用(UPnP)協定,來設定聽候連線的通訊埠的對應(預設值: 當有聽候連線時為 1)</translation>
</message>
<message>
<location line="-26"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+47"/>
<source>Username for JSON-RPC connections</source>
<translation>JSON-RPC 連線使用者名稱</translation>
</message>
<message>
<location line="+51"/>
<source>Verifying database integrity...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>WARNING: syncronized checkpoint violation detected, but skipped!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>警告: 這個版本已經被淘汰了,必須要升級!</translation>
</message>
<message>
<location line="-54"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>錢包檔 weallet.dat 壞掉了,拯救失敗</translation>
</message>
<message>
<location line="-56"/>
<source>Password for JSON-RPC connections</source>
<translation>JSON-RPC 連線密碼</translation>
</message>
<message>
<location line="-32"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Output debugging information (default: 0, supplying <category> is optional)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>If <category> is not supplied, output all debugging information.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source><category> can be:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly. This is intended for regression testing tools and app development.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>允許指定的來源 IP 位址進行 JSON-RPC 連線</translation>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>傳送指令給在 <ip> 的節點(預設值: 127.0.0.1)</translation>
</message>
<message>
<location line="+1"/>
<source>Wait for RPC server to start</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>當最新區塊改變時要執行的指令(指令中的 %s 會被取代成區塊雜湊值)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>當錢包有交易改變時要執行的指令(指令中的 %s 會被取代成交易識別碼)</translation>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation>把錢包檔案升級成最新的格式</translation>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>設定密鑰池大小成 <n> (預設值: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>重新掃描區塊鏈,來尋找錢包可能漏掉的交易。</translation>
</message>
<message>
<location line="+3"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>在 JSON-RPC 連線使用 OpenSSL (https)</translation>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation>伺服器憑證檔(預設值: server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>伺服器私鑰檔(預設值: server.pem)</translation>
</message>
<message>
<location line="+10"/>
<source>Initialization sanity check failed. EMC is shutting down.</source>
<translation type="unfinished"/>
</message>
<message>
|
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Error: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-174"/>
<source>This help message</source>
<translation>這些說明訊息</translation>
</message>
<message>
<location line="+104"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>沒辦法和這台電腦上的 %s 繫結(回傳錯誤 %d, %s)</translation>
</message>
<message>
<location line="-133"/>
<source>Connect through socks proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>允許對 -addnode, -seednode, -connect 的參數使用域名查詢 </translation>
</message>
<message>
<location line="+126"/>
<source>Loading addresses...</source>
<translation>正在載入位址資料...</translation>
</message>
<message>
<location line="-12"/>
<source>Error loading blkindex.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>載入檔案 wallet.dat 時發生錯誤: 錢包損毀了</translation>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of EMC</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart EMC to complete</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation>載入錢包檔 wallet.dat 時發生錯誤</translation>
</message>
<message>
<location line="-16"/>
<source>Invalid -proxy address: '%s'</source>
<translation>無效的 -proxy 位址: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>在 -onlynet 指定了不明的網路別: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>在 -socks 指定了不明的代理協定版本: %i</translation>
</message>
<message>
<location line="+4"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>沒辦法解析 -bind 位址: '%s'</translation>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>沒辦法解析 -externalip 位址: '%s'</translation>
</message>
<message>
<location line="-23"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>設定 -paytxfee=<金額> 的金額無效: '%s'</translation>
</message>
<message>
<location line="+60"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation>無效的金額</translation>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation>累積金額不足</translation>
</message>
<message>
<location line="-40"/>
<source>Loading block index...</source>
<translation>正在載入區塊索引...</translation>
</message>
<message>
<location line="-110"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>增加一個要連線的節線,並試著保持對它的連線暢通</translation>
</message>
<message>
<location line="+125"/>
<source>Unable to bind to %s on this computer. EMC is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-101"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>Minimize weight consumption (experimental) (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>How many blocks to check at startup (default: 500, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Keep at most <n> unconnectable blocks in memory (default: %u)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Deprecated argument -debugnet ignored, use -debug=net</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. EMC is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Loading wallet...</source>
<translation>正在載入錢包資料...</translation>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation>沒辦法把錢包格式降級</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot write default address</source>
<translation>沒辦法把預設位址寫進去</translation>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation>正在重新掃描...</translation>
</message>
<message>
<location line="+2"/>
<source>Done loading</source>
<translation>載入完成</translation>
</message>
<message>
<location line="-161"/>
<source>To use the %s option</source>
<translation>為了要使用 %s 選項</translation>
</message>
<message>
<location line="+188"/>
<source>Error</source>
<translation>錯誤</translation>
</message>
<message>
<location line="-18"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>你必須在以下設定檔中設定 RPC 密碼(rpcpassword=<password>):
%s
如果還沒有這個檔案,請在造出來的時候,設定檔案權限成只有主人才能讀取。</translation>
</message>
</context>
</TS>
|
<location line="+50"/>
|
solution_test.go
|
package main
import "testing"
func runSample(t *testing.T, n int, expect bool) {
res := solve(n)
if expect != (len(res) > 0) {
t.Errorf("Sample expect %t, but got %v", expect, res)
} else if expect {
var xor int
for i := 1; i < n; i++ {
xor ^= abs(res[i] - res[i-1])
}
if xor != 0
|
}
}
func abs(num int) int {
if num < 0 {
return -num
}
return num
}
func TestSample1(t *testing.T) {
runSample(t, 6, true)
}
func TestSample2(t *testing.T) {
runSample(t, 100, true)
}
|
{
t.Errorf("Sample result %v, not correct, xor got %d", res, xor)
}
|
baAmChart.component.ts
|
import { Component, ViewChild, Input, Output, ElementRef, EventEmitter, OnInit, AfterViewInit } from '@angular/core';
import { BaThemePreloader } from '../../../theme/services';
// import 'amcharts3';
// import 'amcharts3/amcharts/plugins/responsive/responsive.js';
// import 'amcharts3/amcharts/serial.js';
// import 'ammap3';
// import 'ammap3/ammap/maps/js/worldLow';
import { BaAmChartThemeService } from './baAmChartTheme.service';
@Component({
selector: 'ba-am-chart',
templateUrl: './baAmChart.html',
styleUrls: ['./baAmChart.scss'],
providers: [BaAmChartThemeService],
})
export class
|
implements OnInit, AfterViewInit {
@Input() baAmChartConfiguration: Object;
@Input() baAmChartClass: string;
@Output() onChartReady = new EventEmitter<any>();
@ViewChild('baAmChart') public _selector: ElementRef;
constructor(private _baAmChartThemeService: BaAmChartThemeService) {
this._loadChartsLib();
}
ngOnInit() {
// AmCharts.themes.blur = this._baAmChartThemeService.getTheme();
}
ngAfterViewInit() {
// const chart = AmCharts.makeChart(this._selector.nativeElement, this.baAmChartConfiguration);
// this.onChartReady.emit(chart);
}
private _loadChartsLib(): void {
// BaThemePreloader.registerLoader(new Promise((resolve, reject) => {
// const amChartsReadyMsg = 'AmCharts ready';
// if (AmCharts.isReady) {
// resolve(amChartsReadyMsg);
// } else {
// AmCharts.ready(function () {
// resolve(amChartsReadyMsg);
// });
// }
// }));
}
}
|
BaAmChart
|
block_importer.rs
|
/*
Copyright 2021 Integritee AG and Supercomputing Systems AG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//! Imports parentchain blocks and executes any indirect calls found in the extrinsics.
use crate::{
beefy_merkle_tree::{merkle_root, Keccak256},
error::{Error, Result},
ImportParentchainBlocks,
};
use ita_stf::ParentchainHeader;
use itc_parentchain_indirect_calls_executor::ExecuteIndirectCalls;
use itc_parentchain_light_client::{
concurrent_access::ValidatorAccess, BlockNumberOps, LightClientState, Validator,
};
use itp_extrinsics_factory::CreateExtrinsics;
use itp_ocall_api::{EnclaveAttestationOCallApi, EnclaveOnChainOCallApi};
use itp_registry_storage::{RegistryStorage, RegistryStorageKeys};
use itp_settings::node::{
ACK_GAME, GAME_REGISTRY_MODULE, PROCESSED_PARENTCHAIN_BLOCK, TEEREX_MODULE,
};
use itp_stf_executor::traits::{StfExecuteShieldFunds, StfExecuteTrustedCall, StfUpdateState};
use itp_stf_state_handler::query_shard_state::QueryShardState;
use itp_types::{OpaqueCall, H256};
use log::*;
use pallet_ajuna_gameregistry::{game::GameEngine, Queue};
use sp_runtime::{
generic::SignedBlock as SignedBlockG,
traits::{Block as ParentchainBlockTrait, NumberFor},
};
use std::{format, marker::PhantomData, sync::Arc, vec::Vec};
/// Parentchain block import implementation.
pub struct ParentchainBlockImporter<
ParentchainBlock,
ValidatorAccessor,
OCallApi,
StfExecutor,
ExtrinsicsFactory,
IndirectCallsExecutor,
StateHandler,
> where
ParentchainBlock: ParentchainBlockTrait<Hash = H256>,
NumberFor<ParentchainBlock>: BlockNumberOps,
ValidatorAccessor: ValidatorAccess<ParentchainBlock>,
OCallApi: EnclaveOnChainOCallApi + EnclaveAttestationOCallApi,
StfExecutor: StfUpdateState + StfExecuteTrustedCall + StfExecuteShieldFunds,
ExtrinsicsFactory: CreateExtrinsics,
IndirectCallsExecutor: ExecuteIndirectCalls,
StateHandler: QueryShardState,
{
validator_accessor: Arc<ValidatorAccessor>,
ocall_api: Arc<OCallApi>,
stf_executor: Arc<StfExecutor>,
extrinsics_factory: Arc<ExtrinsicsFactory>,
indirect_calls_executor: Arc<IndirectCallsExecutor>,
file_state_handler: Arc<StateHandler>,
_phantom: PhantomData<ParentchainBlock>,
}
impl<
ParentchainBlock,
ValidatorAccessor,
OCallApi,
StfExecutor,
ExtrinsicsFactory,
IndirectCallsExecutor,
StateHandler,
>
ParentchainBlockImporter<
ParentchainBlock,
ValidatorAccessor,
OCallApi,
StfExecutor,
ExtrinsicsFactory,
IndirectCallsExecutor,
StateHandler,
> where
ParentchainBlock: ParentchainBlockTrait<Hash = H256, Header = ParentchainHeader>,
NumberFor<ParentchainBlock>: BlockNumberOps,
ValidatorAccessor: ValidatorAccess<ParentchainBlock>,
OCallApi: EnclaveOnChainOCallApi + EnclaveAttestationOCallApi,
StfExecutor: StfUpdateState + StfExecuteTrustedCall + StfExecuteShieldFunds,
ExtrinsicsFactory: CreateExtrinsics,
IndirectCallsExecutor: ExecuteIndirectCalls,
StateHandler: QueryShardState,
{
pub fn new(
validator_accessor: Arc<ValidatorAccessor>,
ocall_api: Arc<OCallApi>,
stf_executor: Arc<StfExecutor>,
extrinsics_factory: Arc<ExtrinsicsFactory>,
indirect_calls_executor: Arc<IndirectCallsExecutor>,
file_state_handler: Arc<StateHandler>,
) -> Self {
ParentchainBlockImporter {
validator_accessor,
ocall_api,
stf_executor,
extrinsics_factory,
indirect_calls_executor,
_phantom: Default::default(),
file_state_handler,
}
}
}
impl<
ParentchainBlock,
ValidatorAccessor,
OCallApi,
StfExecutor,
ExtrinsicsFactory,
IndirectCallsExecutor,
StateHandler,
> ImportParentchainBlocks
for ParentchainBlockImporter<
ParentchainBlock,
ValidatorAccessor,
OCallApi,
StfExecutor,
ExtrinsicsFactory,
IndirectCallsExecutor,
StateHandler,
> where
ParentchainBlock: ParentchainBlockTrait<Hash = H256, Header = ParentchainHeader>,
NumberFor<ParentchainBlock>: BlockNumberOps,
ValidatorAccessor: ValidatorAccess<ParentchainBlock>,
OCallApi: EnclaveOnChainOCallApi + EnclaveAttestationOCallApi,
StfExecutor: StfUpdateState + StfExecuteTrustedCall + StfExecuteShieldFunds,
ExtrinsicsFactory: CreateExtrinsics,
IndirectCallsExecutor: ExecuteIndirectCalls,
StateHandler: QueryShardState,
{
type SignedBlockType = SignedBlockG<ParentchainBlock>;
fn import_parentchain_blocks(
&self,
blocks_to_import: Vec<Self::SignedBlockType>,
) -> Result<()> {
let mut calls = Vec::<OpaqueCall>::new();
debug!("Import blocks to light-client!");
for signed_block in blocks_to_import.into_iter() {
let block = signed_block.block;
let justifications = signed_block.justifications.clone();
// Check if there are any extrinsics in the to-be-imported block that we sent and cached in the light-client before.
// If so, remove them now from the cache.
if let Err(e) = self.validator_accessor.execute_mut_on_validator(|v| {
v.check_xt_inclusion(v.num_relays(), &block)?;
v.submit_simple_header(v.num_relays(), block.header().clone(), justifications)
}) {
error!("[Validator] Header submission failed: {:?}", e);
return Err(e.into())
}
// Perform state updates.
if let Err(e) = self.stf_executor.update_states(block.header()) {
error!("Error performing state updates upon block import");
return Err(e.into())
}
// Execute indirect calls that were found in the extrinsics of the block,
// incl. shielding and unshielding.
match self.indirect_calls_executor.execute_indirect_calls_in_extrinsics(&block) {
Ok((unshielding_call_confirmations, executed_shielding_calls)) => {
// Include all unshielding confirmations that need to be executed on the parentchain.
calls.extend(unshielding_call_confirmations.into_iter());
// Include a processed parentchain block confirmation for each block.
calls.push(create_processed_parentchain_block_call(
block.hash(),
executed_shielding_calls,
));
},
Err(_) => error!("Error executing relevant extrinsics"),
};
// FIXME: Putting these blocks below in a separate function would be a little bit cleaner
let maybe_queue: Option<Queue<H256>> = self
.ocall_api
.get_storage_verified(RegistryStorage::queue_game(), block.header())
.map_err(|e| Error::StorageVerified(format!("{:?}", e)))?
.into_tuple()
.1;
match maybe_queue {
Some(mut queue) => {
if !queue.is_empty() {
//FIXME: hardcoded, because currently hardcoded in the GameRegistry pallet.
let game_engine = GameEngine::new(1u8, 1u8);
let mut games = Vec::<H256>::new();
while let Some(game) = queue.dequeue() {
games.push(game)
}
//FIXME: we currently only take the first shard. How we handle sharding in general?
let shard = self.file_state_handler.list_shards()?[0];
let ack_game_call = OpaqueCall::from_tuple(&(
[GAME_REGISTRY_MODULE, ACK_GAME],
&game_engine,
games,
shard,
));
calls.push(ack_game_call);
}
},
None => {
debug!("No game queued in GameRegistry pallet.");
},
}
info!(
"Successfully imported parentchain block (number: {}, hash: {})",
block.header().number,
block.header().hash()
);
}
// Create extrinsics for all `unshielding` and `block processed` calls we've gathered.
let parentchain_extrinsics = self.extrinsics_factory.create_extrinsics(calls.as_slice())?;
// Sending the extrinsic requires mut access because the validator caches the sent extrinsics internally.
self.validator_accessor.execute_mut_on_validator(|v| {
v.send_extrinsics(self.ocall_api.as_ref(), parentchain_extrinsics)
})?;
Ok(())
}
}
/// Creates a processed_parentchain_block extrinsic for a given parentchain block hash and the merkle executed extrinsics.
///
/// Calculates the merkle root of the extrinsics. In case no extrinsics are supplied, the root will be a hash filled with zeros.
fn create_processed_parentchain_block_call(block_hash: H256, extrinsics: Vec<H256>) -> OpaqueCall {
let root: H256 = merkle_root::<Keccak256, _, _>(extrinsics).into();
OpaqueCall::from_tuple(&([TEEREX_MODULE, PROCESSED_PARENTCHAIN_BLOCK], block_hash, root))
}
#[cfg(test)]
pub mod tests {
use super::*;
use codec::Encode;
#[test]
fn ensure_empty_extrinsic_vec_triggers_zero_filled_merkle_root() {
// given
let block_hash = H256::from([1; 32]);
let extrinsics = Vec::new();
let expected_call =
([TEEREX_MODULE, PROCESSED_PARENTCHAIN_BLOCK], block_hash, H256::default()).encode();
// when
let call = create_processed_parentchain_block_call(block_hash, extrinsics);
// then
assert_eq!(call.0, expected_call);
}
#[test]
fn ensure_non_empty_extrinsic_vec_triggers_non_zero_merkle_root()
|
}
|
{
// given
let block_hash = H256::from([1; 32]);
let extrinsics = vec![H256::from([4; 32]), H256::from([9; 32])];
let zero_root_call =
([TEEREX_MODULE, PROCESSED_PARENTCHAIN_BLOCK], block_hash, H256::default()).encode();
// when
let call = create_processed_parentchain_block_call(block_hash, extrinsics);
// then
assert_ne!(call.0, zero_root_call);
}
|
test_ops_binary.py
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import operator
import numpy as np
import pytest
import openvino.runtime.opset8 as ov
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node
@pytest.mark.parametrize(
"ng_api_helper,numpy_function",
[
(ov.add, np.add),
(ov.divide, np.divide),
(ov.multiply, np.multiply),
(ov.subtract, np.subtract),
(ov.minimum, np.minimum),
(ov.maximum, np.maximum),
(ov.mod, np.mod),
(ov.equal, np.equal),
(ov.not_equal, np.not_equal),
(ov.greater, np.greater),
(ov.greater_equal, np.greater_equal),
(ov.less, np.less),
(ov.less_equal, np.less_equal),
],
)
def test_binary_op(ng_api_helper, numpy_function):
runtime = get_runtime()
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
parameter_b = ov.parameter(shape, name="B", dtype=np.float32)
model = ng_api_helper(parameter_a, parameter_b)
computation = runtime.computation(model, parameter_a, parameter_b)
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
result = computation(value_a, value_b)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"ng_api_helper,numpy_function",
[
(ov.add, np.add),
(ov.divide, np.divide),
(ov.multiply, np.multiply),
(ov.subtract, np.subtract),
(ov.minimum, np.minimum),
(ov.maximum, np.maximum),
(ov.mod, np.mod),
(ov.equal, np.equal),
(ov.not_equal, np.not_equal),
(ov.greater, np.greater),
(ov.greater_equal, np.greater_equal),
(ov.less, np.less),
(ov.less_equal, np.less_equal),
],
)
def test_binary_op_with_scalar(ng_api_helper, numpy_function):
|
@pytest.mark.parametrize(
"ng_api_helper,numpy_function",
[(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)],
)
def test_binary_logical_op(ng_api_helper, numpy_function):
runtime = get_runtime()
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.bool)
parameter_b = ov.parameter(shape, name="B", dtype=np.bool)
model = ng_api_helper(parameter_a, parameter_b)
computation = runtime.computation(model, parameter_a, parameter_b)
value_a = np.array([[True, False], [False, True]], dtype=np.bool)
value_b = np.array([[False, True], [False, True]], dtype=np.bool)
result = computation(value_a, value_b)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"ng_api_helper,numpy_function",
[(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)],
)
def test_binary_logical_op_with_scalar(ng_api_helper, numpy_function):
runtime = get_runtime()
value_a = np.array([[True, False], [False, True]], dtype=np.bool)
value_b = np.array([[False, True], [False, True]], dtype=np.bool)
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.bool)
model = ng_api_helper(parameter_a, value_b)
computation = runtime.computation(model, parameter_a)
result = computation(value_a)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"operator,numpy_function",
[
(operator.add, np.add),
(operator.sub, np.subtract),
(operator.mul, np.multiply),
(operator.truediv, np.divide),
(operator.eq, np.equal),
(operator.ne, np.not_equal),
(operator.gt, np.greater),
(operator.ge, np.greater_equal),
(operator.lt, np.less),
(operator.le, np.less_equal),
],
)
def test_binary_operators(operator, numpy_function):
runtime = get_runtime()
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[4, 5], [1, 7]], dtype=np.float32)
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
model = operator(parameter_a, value_b)
computation = runtime.computation(model, parameter_a)
result = computation(value_a)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"operator,numpy_function",
[
(operator.add, np.add),
(operator.sub, np.subtract),
(operator.mul, np.multiply),
(operator.truediv, np.divide),
(operator.eq, np.equal),
(operator.ne, np.not_equal),
(operator.gt, np.greater),
(operator.ge, np.greater_equal),
(operator.lt, np.less),
(operator.le, np.less_equal),
],
)
def test_binary_operators_with_scalar(operator, numpy_function):
runtime = get_runtime()
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
model = operator(parameter_a, value_b)
computation = runtime.computation(model, parameter_a)
result = computation(value_a)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
def test_multiply():
A = np.arange(48, dtype=np.int32).reshape((8, 1, 6, 1))
B = np.arange(35, dtype=np.int32).reshape((7, 1, 5))
expected = np.multiply(A, B)
result = run_op_node([A, B], ov.multiply)
assert np.allclose(result, expected)
def test_power_v1():
A = np.arange(48, dtype=np.float32).reshape((8, 1, 6, 1))
B = np.arange(20, dtype=np.float32).reshape((4, 1, 5))
expected = np.power(A, B)
result = run_op_node([A, B], ov.power)
assert np.allclose(result, expected)
|
runtime = get_runtime()
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
model = ng_api_helper(parameter_a, value_b)
computation = runtime.computation(model, parameter_a)
result = computation(value_a)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
|
02_result_unwrap.rs
|
fn
|
() {
let x = "10".parse::<i32>().unwrap();
println!("x is: {}", x);
}
|
main
|
executor_test.go
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"archive/zip"
"context"
"flag"
"fmt"
"math"
"math/rand"
"net"
"os"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/golang/protobuf/proto"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/planner"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/server"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/store/copr"
error2 "github.com/pingcap/tidb/store/driver/error"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
testkit2 "github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/deadlockhistory"
"github.com/pingcap/tidb/util/gcutil"
"github.com/pingcap/tidb/util/israce"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/rowcodec"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tidb/util/testutil"
"github.com/pingcap/tidb/util/timeutil"
topsqlstate "github.com/pingcap/tidb/util/topsql/state"
"github.com/pingcap/tipb/go-tipb"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/oracle"
"github.com/tikv/client-go/v2/testutils"
"github.com/tikv/client-go/v2/tikv"
"github.com/tikv/client-go/v2/tikvrpc"
"google.golang.org/grpc"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
*CustomParallelSuiteFlag = true
TestingT(t)
}
var _ = Suite(&testSuite{&baseTestSuite{}})
var _ = Suite(&testSuiteP1{&baseTestSuite{}})
var _ = Suite(&testSuiteP2{&baseTestSuite{}})
var _ = Suite(&testSuite1{})
var _ = SerialSuites(&testSerialSuite2{})
var _ = SerialSuites(&testSuiteWithCliBaseCharset{})
var _ = Suite(&testSuite2{&baseTestSuite{}})
var _ = Suite(&testSuite3{&baseTestSuite{}})
var _ = Suite(&testSuite4{&baseTestSuite{}})
var _ = Suite(&testSuite5{&baseTestSuite{}})
var _ = Suite(&testSuiteJoin1{&baseTestSuite{}})
var _ = Suite(&testSuiteJoin2{&baseTestSuite{}})
var _ = Suite(&testSuiteJoin3{&baseTestSuite{}})
var _ = SerialSuites(&testSuiteJoinSerial{&baseTestSuite{}})
var _ = Suite(&testSuite6{&baseTestSuite{}})
var _ = Suite(&testSuite7{&baseTestSuite{}})
var _ = Suite(&testSuite8{&baseTestSuite{}})
var _ = Suite(&testUpdateSuite{})
var _ = Suite(&testPointGetSuite{})
var _ = SerialSuites(&testRecoverTable{})
var _ = SerialSuites(&testMemTableReaderSuite{&testClusterTableBase{}})
var _ = SerialSuites(&testFlushSuite{})
var _ = SerialSuites(&testAutoRandomSuite{&baseTestSuite{}})
var _ = SerialSuites(&testClusterTableSuite{})
var _ = SerialSuites(&testPrepareSerialSuite{&baseTestSuite{}})
var _ = SerialSuites(&testSplitTable{&baseTestSuite{}})
var _ = Suite(&testSuiteWithData{baseTestSuite: &baseTestSuite{}})
var _ = SerialSuites(&testSerialSuite1{&baseTestSuite{}})
var _ = SerialSuites(&testSlowQuery{&baseTestSuite{}})
var _ = Suite(&partitionTableSuite{&baseTestSuite{}})
var _ = SerialSuites(&tiflashTestSuite{})
var _ = SerialSuites(&globalIndexSuite{&baseTestSuite{}})
var _ = SerialSuites(&testSerialSuite{&baseTestSuite{}})
var _ = SerialSuites(&testStaleTxnSerialSuite{&baseTestSuite{}})
var _ = Suite(&testStaleTxnSuite{&baseTestSuite{}})
var _ = SerialSuites(&testCoprCache{})
var _ = SerialSuites(&testPrepareSuite{})
var _ = SerialSuites(&testResourceTagSuite{&baseTestSuite{}})
type testSuite struct{ *baseTestSuite }
type testSuiteP1 struct{ *baseTestSuite }
type testSuiteP2 struct{ *baseTestSuite }
type testSplitTable struct{ *baseTestSuite }
type testSuiteWithData struct {
*baseTestSuite
testData testutil.TestData
}
type testSlowQuery struct{ *baseTestSuite }
type partitionTableSuite struct{ *baseTestSuite }
type globalIndexSuite struct{ *baseTestSuite }
type testSerialSuite struct{ *baseTestSuite }
type testStaleTxnSerialSuite struct{ *baseTestSuite }
type testStaleTxnSuite struct{ *baseTestSuite }
type testCoprCache struct {
store kv.Storage
dom *domain.Domain
cls testutils.Cluster
}
type testPrepareSuite struct{ testData testutil.TestData }
type testResourceTagSuite struct{ *baseTestSuite }
type baseTestSuite struct {
cluster testutils.Cluster
store kv.Storage
domain *domain.Domain
*parser.Parser
ctx *mock.Context // nolint:structcheck
}
func newStoreWithBootstrap() (kv.Storage, *domain.Domain, error) {
store, err := mockstore.NewMockStore()
if err != nil {
return nil, nil, errors.Trace(err)
}
session.SetSchemaLease(0)
session.DisableStats4Test()
dom, err := session.BootstrapSession(store)
if err != nil {
return nil, nil, err
}
return store, dom, errors.Trace(err)
}
var mockTikv = flag.Bool("mockTikv", true, "use mock tikv store in executor test")
func (s *baseTestSuite) SetUpSuite(c *C) {
s.Parser = parser.New()
flag.Lookup("mockTikv")
useMockTikv := *mockTikv
if useMockTikv {
store, err := mockstore.NewMockStore(
mockstore.WithClusterInspector(func(c testutils.Cluster) {
mockstore.BootstrapWithSingleStore(c)
s.cluster = c
}),
)
c.Assert(err, IsNil)
s.store = store
session.SetSchemaLease(0)
session.DisableStats4Test()
}
d, err := session.BootstrapSession(s.store)
c.Assert(err, IsNil)
se, err := session.CreateSession4Test(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "set @@global.tidb_enable_alter_placement=1")
c.Assert(err, IsNil)
se.Close()
d.SetStatsUpdating(true)
s.domain = d
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionLog
})
}
func (s *testSuiteWithData) SetUpSuite(c *C) {
s.baseTestSuite.SetUpSuite(c)
var err error
s.testData, err = testutil.LoadTestSuiteData("testdata", "executor_suite")
c.Assert(err, IsNil)
}
func (s *testSuiteWithData) TearDownSuite(c *C) {
s.baseTestSuite.TearDownSuite(c)
c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil)
}
func (s *testPrepareSuite) SetUpSuite(c *C) {
var err error
s.testData, err = testutil.LoadTestSuiteData("testdata", "prepare_suite")
c.Assert(err, IsNil)
}
func (s *testPrepareSuite) TearDownSuite(c *C) {
c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil)
}
func (s *baseTestSuite) TearDownSuite(c *C) {
s.domain.Close()
c.Assert(s.store.Close(), IsNil)
}
func (s *globalIndexSuite) SetUpSuite(c *C) {
s.baseTestSuite.SetUpSuite(c)
config.UpdateGlobal(func(conf *config.Config) {
conf.EnableGlobalIndex = true
})
}
func (s *testSuiteP1) TestPessimisticSelectForUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int primary key, a int)")
tk.MustExec("insert into t values(1, 1)")
tk.MustExec("begin PESSIMISTIC")
tk.MustQuery("select a from t where id=1 for update").Check(testkit.Rows("1"))
tk.MustExec("update t set a=a+1 where id=1")
tk.MustExec("commit")
tk.MustQuery("select a from t where id=1").Check(testkit.Rows("2"))
}
func (s *testSuite) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
func (s *testSuiteP1) TestBind(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists testbind")
tk.MustExec("create table testbind(i int, s varchar(20))")
tk.MustExec("create index index_t on testbind(i,s)")
tk.MustExec("create global binding for select * from testbind using select * from testbind use index for join(index_t)")
c.Assert(len(tk.MustQuery("show global bindings").Rows()), Equals, 1)
tk.MustExec("create session binding for select * from testbind using select * from testbind use index for join(index_t)")
c.Assert(len(tk.MustQuery("show session bindings").Rows()), Equals, 1)
tk.MustExec("drop session binding for select * from testbind")
}
func (s *testSuiteP1) TestChangePumpAndDrainer(c *C) {
tk := testkit.NewTestKit(c, s.store)
// change pump or drainer's state need connect to etcd
// so will meet error "URL scheme must be http, https, unix, or unixs: /tmp/tidb"
err := tk.ExecToErr("change pump to node_state ='paused' for node_id 'pump1'")
c.Assert(err, ErrorMatches, "URL scheme must be http, https, unix, or unixs.*")
err = tk.ExecToErr("change drainer to node_state ='paused' for node_id 'drainer1'")
c.Assert(err, ErrorMatches, "URL scheme must be http, https, unix, or unixs.*")
}
func (s *testSuiteP1) TestLoadStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
c.Assert(tk.ExecToErr("load stats"), NotNil)
c.Assert(tk.ExecToErr("load stats ./xxx.json"), NotNil)
}
func (s *testSuiteP1) TestPlanReplayer(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx_a(a))")
tk.MustExec("plan replayer dump explain select * from t where a=10")
}
func (s *testSuiteP1) TestShow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database test_show;")
tk.MustExec("use test_show")
tk.MustQuery("show engines")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key)")
c.Assert(len(tk.MustQuery("show index in t").Rows()), Equals, 1)
c.Assert(len(tk.MustQuery("show index from t").Rows()), Equals, 1)
tk.MustQuery("show charset").Check(testkit.Rows(
"ascii US ASCII ascii_bin 1",
"binary binary binary 1",
"latin1 Latin1 latin1_bin 1",
"utf8 UTF-8 Unicode utf8_bin 3",
"utf8mb4 UTF-8 Unicode utf8mb4_bin 4",
))
c.Assert(len(tk.MustQuery("show master status").Rows()), Equals, 1)
tk.MustQuery("show create database test_show").Check(testkit.Rows("test_show CREATE DATABASE `test_show` /*!40100 DEFAULT CHARACTER SET utf8mb4 */"))
tk.MustQuery("show privileges").Check(testkit.Rows("Alter Tables To alter the table",
"Alter routine Functions,Procedures To alter or drop stored functions/procedures",
"Create Databases,Tables,Indexes To create new databases and tables",
"Create routine Databases To use CREATE FUNCTION/PROCEDURE",
"Create temporary tables Databases To use CREATE TEMPORARY TABLE",
"Create view Tables To create new views",
"Create user Server Admin To create new users",
"Delete Tables To delete existing rows",
"Drop Databases,Tables To drop databases, tables, and views",
"Event Server Admin To create, alter, drop and execute events",
"Execute Functions,Procedures To execute stored routines",
"File File access on server To read and write files on the server",
"Grant option Databases,Tables,Functions,Procedures To give to other users those privileges you possess",
"Index Tables To create or drop indexes",
"Insert Tables To insert data into tables",
"Lock tables Databases To use LOCK TABLES (together with SELECT privilege)",
"Process Server Admin To view the plain text of currently executing queries",
"Proxy Server Admin To make proxy user possible",
"References Databases,Tables To have references on tables",
"Reload Server Admin To reload or refresh tables, logs and privileges",
"Replication client Server Admin To ask where the slave or master servers are",
"Replication slave Server Admin To read binary log events from the master",
"Select Tables To retrieve rows from table",
"Show databases Server Admin To see all databases with SHOW DATABASES",
"Show view Tables To see views with SHOW CREATE VIEW",
"Shutdown Server Admin To shut down the server",
"Super Server Admin To use KILL thread, SET GLOBAL, CHANGE MASTER, etc.",
"Trigger Tables To use triggers",
"Create tablespace Server Admin To create/alter/drop tablespaces",
"Update Tables To update existing rows",
"Usage Server Admin No privileges - allow connect only",
"BACKUP_ADMIN Server Admin ",
"RESTORE_ADMIN Server Admin ",
"SYSTEM_USER Server Admin ",
"SYSTEM_VARIABLES_ADMIN Server Admin ",
"ROLE_ADMIN Server Admin ",
"CONNECTION_ADMIN Server Admin ",
"PLACEMENT_ADMIN Server Admin ",
"DASHBOARD_CLIENT Server Admin ",
"RESTRICTED_TABLES_ADMIN Server Admin ",
"RESTRICTED_STATUS_ADMIN Server Admin ",
"RESTRICTED_VARIABLES_ADMIN Server Admin ",
"RESTRICTED_USER_ADMIN Server Admin ",
"RESTRICTED_CONNECTION_ADMIN Server Admin ",
"RESTRICTED_REPLICA_WRITER_ADMIN Server Admin ",
))
c.Assert(len(tk.MustQuery("show table status").Rows()), Equals, 1)
}
func (s *testSuite3) TestAdmin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1))")
tk.MustExec("insert admin_test (c1) values (1),(2),(NULL)")
ctx := context.Background()
// cancel DDL jobs test
r, err := tk.Exec("admin cancel ddl jobs 1")
c.Assert(err, IsNil, Commentf("err %v", err))
req := r.NewChunk(nil)
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row := req.GetRow(0)
c.Assert(row.Len(), Equals, 2)
c.Assert(row.GetString(0), Equals, "1")
c.Assert(row.GetString(1), Matches, "*DDL Job:1 not found")
// show ddl test;
r, err = tk.Exec("admin show ddl")
c.Assert(err, IsNil)
req = r.NewChunk(nil)
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 6)
txn, err := s.store.Begin()
c.Assert(err, IsNil)
ddlInfo, err := admin.GetDDLInfo(txn)
c.Assert(err, IsNil)
c.Assert(row.GetInt64(0), Equals, ddlInfo.SchemaVer)
// TODO: Pass this test.
// rowOwnerInfos := strings.Split(row.Data[1].GetString(), ",")
// ownerInfos := strings.Split(ddlInfo.Owner.String(), ",")
// c.Assert(rowOwnerInfos[0], Equals, ownerInfos[0])
serverInfo, err := infosync.GetServerInfoByID(ctx, row.GetString(1))
c.Assert(err, IsNil)
c.Assert(row.GetString(2), Equals, serverInfo.IP+":"+
strconv.FormatUint(uint64(serverInfo.Port), 10))
c.Assert(row.GetString(3), Equals, "")
req = r.NewChunk(nil)
err = r.Next(ctx, req)
c.Assert(err, IsNil)
c.Assert(req.NumRows() == 0, IsTrue)
err = txn.Rollback()
c.Assert(err, IsNil)
// show DDL jobs test
r, err = tk.Exec("admin show ddl jobs")
c.Assert(err, IsNil)
req = r.NewChunk(nil)
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 11)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
historyJobs, err := admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
c.Assert(len(historyJobs), Greater, 1)
c.Assert(len(row.GetString(1)), Greater, 0)
c.Assert(err, IsNil)
c.Assert(row.GetInt64(0), Equals, historyJobs[0].ID)
c.Assert(err, IsNil)
r, err = tk.Exec("admin show ddl jobs 20")
c.Assert(err, IsNil)
req = r.NewChunk(nil)
err = r.Next(ctx, req)
c.Assert(err, IsNil)
row = req.GetRow(0)
c.Assert(row.Len(), Equals, 11)
c.Assert(row.GetInt64(0), Equals, historyJobs[0].ID)
c.Assert(err, IsNil)
// show DDL job queries test
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test2")
tk.MustExec("create table admin_test2 (c1 int, c2 int, c3 int default 1, index (c1))")
result := tk.MustQuery(`admin show ddl job queries 1, 1, 1`)
result.Check(testkit.Rows())
result = tk.MustQuery(`admin show ddl job queries 1, 2, 3, 4`)
result.Check(testkit.Rows())
historyJobs, err = admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
result = tk.MustQuery(fmt.Sprintf("admin show ddl job queries %d", historyJobs[0].ID))
result.Check(testkit.Rows(historyJobs[0].Query))
c.Assert(err, IsNil)
// check table test
tk.MustExec("create table admin_test1 (c1 int, c2 int default 1, index (c1))")
tk.MustExec("insert admin_test1 (c1) values (21),(22)")
r, err = tk.Exec("admin check table admin_test, admin_test1")
c.Assert(err, IsNil)
c.Assert(r, IsNil)
// error table name
err = tk.ExecToErr("admin check table admin_test_error")
c.Assert(err, NotNil)
// different index values
sctx := tk.Se.(sessionctx.Context)
dom := domain.GetDomain(sctx)
is := dom.InfoSchema()
c.Assert(is, NotNil)
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("admin_test"))
c.Assert(err, IsNil)
c.Assert(tb.Indices(), HasLen, 1)
_, err = tb.Indices()[0].Create(mock.NewContext(), txn, types.MakeDatums(int64(10)), kv.IntHandle(1), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
errAdmin := tk.ExecToErr("admin check table admin_test")
c.Assert(errAdmin, NotNil)
if config.CheckTableBeforeDrop {
err = tk.ExecToErr("drop table admin_test")
c.Assert(err.Error(), Equals, errAdmin.Error())
// Drop inconsistency index.
tk.MustExec("alter table admin_test drop index c1")
tk.MustExec("admin check table admin_test")
}
// checksum table test
tk.MustExec("create table checksum_with_index (id int, count int, PRIMARY KEY(id), KEY(count))")
tk.MustExec("create table checksum_without_index (id int, count int, PRIMARY KEY(id))")
r, err = tk.Exec("admin checksum table checksum_with_index, checksum_without_index")
c.Assert(err, IsNil)
res := tk.ResultSetToResult(r, Commentf("admin checksum table"))
// Mocktikv returns 1 for every table/index scan, then we will xor the checksums of a table.
// For "checksum_with_index", we have two checksums, so the result will be 1^1 = 0.
// For "checksum_without_index", we only have one checksum, so the result will be 1.
res.Sort().Check(testkit.Rows("test checksum_with_index 0 2 2", "test checksum_without_index 1 1 1"))
tk.MustExec("drop table if exists t1;")
tk.MustExec("CREATE TABLE t1 (c2 BOOL, PRIMARY KEY (c2));")
tk.MustExec("INSERT INTO t1 SET c2 = '0';")
tk.MustExec("ALTER TABLE t1 ADD COLUMN c3 DATETIME NULL DEFAULT '2668-02-03 17:19:31';")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx2 (c3);")
tk.MustExec("ALTER TABLE t1 ADD COLUMN c4 bit(10) default 127;")
tk.MustExec("ALTER TABLE t1 ADD INDEX idx3 (c4);")
tk.MustExec("admin check table t1;")
// Test admin show ddl jobs table name after table has been droped.
tk.MustExec("drop table if exists t1;")
re := tk.MustQuery("admin show ddl jobs 1")
rows := re.Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][2], Equals, "t1")
// Test for reverse scan get history ddl jobs when ddl history jobs queue has multiple regions.
txn, err = s.store.Begin()
c.Assert(err, IsNil)
historyJobs, err = admin.GetHistoryDDLJobs(txn, 20)
c.Assert(err, IsNil)
// Split region for history ddl job queues.
m := meta.NewMeta(txn)
startKey := meta.DDLJobHistoryKey(m, 0)
endKey := meta.DDLJobHistoryKey(m, historyJobs[0].ID)
s.cluster.SplitKeys(startKey, endKey, int(historyJobs[0].ID/5))
historyJobs2, err := admin.GetHistoryDDLJobs(txn, 20)
c.Assert(err, IsNil)
c.Assert(historyJobs, DeepEquals, historyJobs2)
}
func (s *testSuiteP2) TestAdminShowDDLJobs(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_admin_show_ddl_jobs")
tk.MustExec("use test_admin_show_ddl_jobs")
tk.MustExec("create table t (a int);")
re := tk.MustQuery("admin show ddl jobs 1")
row := re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
jobID, err := strconv.Atoi(row[0].(string))
c.Assert(err, IsNil)
err = kv.RunInNewTxn(context.Background(), s.store, true, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
job, err := t.GetHistoryDDLJob(int64(jobID))
c.Assert(err, IsNil)
c.Assert(job, NotNil)
// Test for compatibility. Old TiDB version doesn't have SchemaName field, and the BinlogInfo maybe nil.
// See PR: 11561.
job.BinlogInfo = nil
job.SchemaName = ""
err = t.AddHistoryDDLJob(job, true)
c.Assert(err, IsNil)
return nil
})
c.Assert(err, IsNil)
re = tk.MustQuery("admin show ddl jobs 1")
row = re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
re = tk.MustQuery("admin show ddl jobs 1 where job_type='create table'")
row = re.Rows()[0]
c.Assert(row[1], Equals, "test_admin_show_ddl_jobs")
c.Assert(row[9], Equals, "<nil>")
// Test the START_TIME and END_TIME field.
re = tk.MustQuery("admin show ddl jobs where job_type = 'create table' and start_time > str_to_date('20190101','%Y%m%d%H%i%s')")
row = re.Rows()[0]
c.Assert(row[2], Equals, "t")
c.Assert(row[9], Equals, "<nil>")
}
func (s *testSuiteP2) TestAdminShowDDLJobsInfo(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_admin_show_ddl_jobs")
defer tk.MustExec("drop database if exists test_admin_show_ddl_jobs")
tk.MustExec("use test_admin_show_ddl_jobs")
tk.MustExec("drop table if exists t, t1;")
tk.MustExec("create table t (a int);")
tk.MustExec("create table t1 (a int);")
// Test for issue: https://github.com/pingcap/tidb/issues/29915
tk.MustExec("drop placement policy if exists x;")
tk.MustExec("create placement policy x followers=4;")
tk.MustExec("alter table t placement policy x;")
c.Assert(tk.MustQuery("admin show ddl jobs 1").Rows()[0][3], Equals, "alter table placement")
tk.MustExec("rename table t to tt, t1 to tt1")
c.Assert(tk.MustQuery("admin show ddl jobs 1").Rows()[0][3], Equals, "rename tables")
tk.MustExec("create table tt2 (c int) PARTITION BY RANGE (c) " +
"(PARTITION p0 VALUES LESS THAN (6)," +
"PARTITION p1 VALUES LESS THAN (11)," +
"PARTITION p2 VALUES LESS THAN (16)," +
"PARTITION p3 VALUES LESS THAN (21));")
tk.MustExec("alter table tt2 partition p0 " +
"PRIMARY_REGION=\"cn-east-1\" " +
"REGIONS=\"cn-east-1, cn-east-2\" " +
"FOLLOWERS=2 ")
c.Assert(tk.MustQuery("admin show ddl jobs 1").Rows()[0][3], Equals, "alter table partition placement")
tk.MustExec("alter table tt1 cache")
c.Assert(tk.MustQuery("admin show ddl jobs 1").Rows()[0][3], Equals, "alter table cache")
tk.MustExec("alter table tt1 nocache")
c.Assert(tk.MustQuery("admin show ddl jobs 1").Rows()[0][3], Equals, "alter table nocache")
}
func (s *testSuiteP2) TestAdminChecksumOfPartitionedTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("USE test;")
tk.MustExec("DROP TABLE IF EXISTS admin_checksum_partition_test;")
tk.MustExec("CREATE TABLE admin_checksum_partition_test (a INT) PARTITION BY HASH(a) PARTITIONS 4;")
tk.MustExec("INSERT INTO admin_checksum_partition_test VALUES (1), (2);")
r := tk.MustQuery("ADMIN CHECKSUM TABLE admin_checksum_partition_test;")
r.Check(testkit.Rows("test admin_checksum_partition_test 1 5 5"))
}
func (s *baseTestSuite) fillData(tk *testkit.TestKit, table string) {
tk.MustExec("use test")
tk.MustExec(fmt.Sprintf("create table %s(id int not null default 1, name varchar(255), PRIMARY KEY(id));", table))
// insert data
tk.MustExec(fmt.Sprintf("insert INTO %s VALUES (1, \"hello\");", table))
tk.CheckExecResult(1, 0)
tk.MustExec(fmt.Sprintf("insert into %s values (2, \"hello\");", table))
tk.CheckExecResult(1, 0)
}
type testCase struct {
data1 []byte
data2 []byte
expected []string
restData []byte
expectedMsg string
}
func checkCases(tests []testCase, ld *executor.LoadDataInfo,
t *testing.T, tk *testkit2.TestKit, ctx sessionctx.Context, selectSQL, deleteSQL string) {
origin := ld.IgnoreLines
for _, tt := range tests {
ld.IgnoreLines = origin
require.Nil(t, ctx.NewTxn(context.Background()))
ctx.GetSessionVars().StmtCtx.DupKeyAsWarning = true
ctx.GetSessionVars().StmtCtx.BadNullAsWarning = true
ctx.GetSessionVars().StmtCtx.InLoadDataStmt = true
ctx.GetSessionVars().StmtCtx.InDeleteStmt = false
data, reachLimit, err1 := ld.InsertData(context.Background(), tt.data1, tt.data2)
require.NoError(t, err1)
require.False(t, reachLimit)
err1 = ld.CheckAndInsertOneBatch(context.Background(), ld.GetRows(), ld.GetCurBatchCnt())
require.NoError(t, err1)
ld.SetMaxRowsInBatch(20000)
if tt.restData == nil {
require.Len(t, data, 0, "data1:%v, data2:%v, data:%v", string(tt.data1), string(tt.data2), string(data))
} else {
require.Equal(t, tt.restData, data, "data1:%v, data2:%v, data:%v", string(tt.data1), string(tt.data2), string(data))
}
ld.SetMessage()
require.Equal(t, tt.expectedMsg, tk.Session().LastMessage())
ctx.StmtCommit()
txn, err := ctx.Txn(true)
require.NoError(t, err)
err = txn.Commit(context.Background())
require.NoError(t, err)
r := tk.MustQuery(selectSQL)
r.Check(testutil.RowsWithSep("|", tt.expected...))
tk.MustExec(deleteSQL)
}
}
func (s *testSuiteP1) TestSelectWithoutFrom(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("select 1 + 2*3;")
r.Check(testkit.Rows("7"))
r = tk.MustQuery(`select _utf8"string";`)
r.Check(testkit.Rows("string"))
r = tk.MustQuery("select 1 order by 1;")
r.Check(testkit.Rows("1"))
}
// TestSelectBackslashN Issue 3685.
func (s *testSuiteP1) TestSelectBackslashN(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select \N;`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "NULL")
c.Assert(rs.Close(), IsNil)
sql = `select "\N";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
c.Assert(rs.Close(), IsNil)
tk.MustExec("use test;")
tk.MustExec("create table test (`\\N` int);")
tk.MustExec("insert into test values (1);")
tk.CheckExecResult(1, 0)
sql = "select * from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
c.Assert(rs.Close(), IsNil)
sql = `select \N from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(err, IsNil)
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
c.Assert(rs.Close(), IsNil)
sql = `select (\N) from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
c.Assert(rs.Close(), IsNil)
sql = "select `\\N` from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
c.Assert(rs.Close(), IsNil)
sql = "select (`\\N`) from test;"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("1"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `\N`)
c.Assert(rs.Close(), IsNil)
sql = `select '\N' from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
c.Assert(rs.Close(), IsNil)
sql = `select ('\N') from test;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("N"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `N`)
c.Assert(rs.Close(), IsNil)
}
// TestSelectNull Issue #4053.
func (s *testSuiteP1) TestSelectNull(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select nUll;`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
c.Assert(rs.Close(), IsNil)
sql = `select (null);`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `NULL`)
c.Assert(rs.Close(), IsNil)
sql = `select null+NULL;`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("<nil>"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(err, IsNil)
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `null+NULL`)
c.Assert(rs.Close(), IsNil)
}
// TestSelectStringLiteral Issue #3686.
func (s *testSuiteP1) TestSelectStringLiteral(c *C) {
tk := testkit.NewTestKit(c, s.store)
sql := `select 'abc';`
r := tk.MustQuery(sql)
r.Check(testkit.Rows("abc"))
rs, err := tk.Exec(sql)
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `abc`)
c.Assert(rs.Close(), IsNil)
sql = `select (('abc'));`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("abc"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `abc`)
c.Assert(rs.Close(), IsNil)
sql = `select 'abc'+'def';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("0"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, `'abc'+'def'`)
c.Assert(rs.Close(), IsNil)
// Below checks whether leading invalid chars are trimmed.
sql = "select '\n';"
r = tk.MustQuery(sql)
r.Check(testkit.Rows("\n"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "")
c.Assert(rs.Close(), IsNil)
sql = "select '\t col';" // Lowercased letter is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "col")
c.Assert(rs.Close(), IsNil)
sql = "select '\t Col';" // Uppercased letter is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "Col")
c.Assert(rs.Close(), IsNil)
sql = "select '\n\t 中文 col';" // Chinese char is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "中文 col")
c.Assert(rs.Close(), IsNil)
sql = "select ' \r\n .col';" // Punctuation is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, ".col")
c.Assert(rs.Close(), IsNil)
sql = "select ' 😆col';" // Emoji is a valid char.
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "😆col")
c.Assert(rs.Close(), IsNil)
// Below checks whether trailing invalid chars are preserved.
sql = `select 'abc ';`
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "abc ")
c.Assert(rs.Close(), IsNil)
sql = `select ' abc 123 ';`
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "abc 123 ")
c.Assert(rs.Close(), IsNil)
// Issue #4239.
sql = `select 'a' ' ' 'string';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("a string"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "a")
c.Assert(rs.Close(), IsNil)
sql = `select 'a' " " "string";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("a string"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "a")
c.Assert(rs.Close(), IsNil)
sql = `select 'string' 'string';`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("stringstring"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "string")
c.Assert(rs.Close(), IsNil)
sql = `select "ss" "a";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
c.Assert(rs.Close(), IsNil)
sql = `select "ss" "a" "b";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssab"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
c.Assert(rs.Close(), IsNil)
sql = `select "ss" "a" ' ' "b";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa b"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
c.Assert(rs.Close(), IsNil)
sql = `select "ss" "a" ' ' "b" ' ' "d";`
r = tk.MustQuery(sql)
r.Check(testkit.Rows("ssa b d"))
rs, err = tk.Exec(sql)
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.O, Equals, "ss")
c.Assert(rs.Close(), IsNil)
}
func (s *testSuiteP1) TestSelectLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
s.fillData(tk, "select_limit")
tk.MustExec("insert INTO select_limit VALUES (3, \"hello\");")
tk.CheckExecResult(1, 0)
tk.MustExec("insert INTO select_limit VALUES (4, \"hello\");")
tk.CheckExecResult(1, 0)
r := tk.MustQuery("select * from select_limit limit 1;")
r.Check(testkit.Rows("1 hello"))
r = tk.MustQuery("select id from (select * from select_limit limit 1) k where id != 1;")
r.Check(testkit.Rows())
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 0;")
r.Check(testkit.Rows("1 hello", "2 hello", "3 hello", "4 hello"))
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 1;")
r.Check(testkit.Rows("2 hello", "3 hello", "4 hello"))
r = tk.MustQuery("select * from select_limit limit 18446744073709551615 offset 3;")
r.Check(testkit.Rows("4 hello"))
err := tk.ExecToErr("select * from select_limit limit 18446744073709551616 offset 3;")
c.Assert(err, NotNil)
}
func (s *testSuiteP1) TestSelectOrderBy(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
s.fillData(tk, "select_order_test")
// Test star field
r := tk.MustQuery("select * from select_order_test where id = 1 order by id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
r = tk.MustQuery("select id from select_order_test order by id desc limit 1 ")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select id from select_order_test order by id + 1 desc limit 1 ")
r.Check(testkit.Rows("2"))
// Test limit
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit
r = tk.MustQuery("select id as c1, name from select_order_test order by 2, id limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit overflow
r = tk.MustQuery("select * from select_order_test order by name, id limit 100 offset 0;")
r.Check(testkit.Rows("1 hello", "2 hello"))
// Test offset overflow
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 100;")
r.Check(testkit.Rows())
// Test limit exceeds int range.
r = tk.MustQuery("select id from select_order_test order by name, id limit 18446744073709551615;")
r.Check(testkit.Rows("1", "2"))
// Test multiple field
r = tk.MustQuery("select id, name from select_order_test where id = 1 group by id, name limit 1 offset 0;")
r.Check(testkit.Rows("1 hello"))
// Test limit + order by
for i := 3; i <= 10; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i))
}
tk.MustExec("insert INTO select_order_test VALUES (10086, \"hi\");")
for i := 11; i <= 20; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"hh\");", i))
}
for i := 21; i <= 30; i += 1 {
tk.MustExec(fmt.Sprintf("insert INTO select_order_test VALUES (%d, \"zz\");", i))
}
tk.MustExec("insert INTO select_order_test VALUES (1501, \"aa\");")
r = tk.MustQuery("select * from select_order_test order by name, id limit 1 offset 3;")
r.Check(testkit.Rows("11 hh"))
tk.MustExec("drop table select_order_test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
tk.MustExec("insert t values (1, 1)")
tk.MustExec("insert t values (1, 2)")
tk.MustExec("insert t values (1, 3)")
r = tk.MustQuery("select 1-d as d from t order by d;")
r.Check(testkit.Rows("-2", "-1", "0"))
r = tk.MustQuery("select 1-d as d from t order by d + 1;")
r.Check(testkit.Rows("0", "-1", "-2"))
r = tk.MustQuery("select t.d from t order by d;")
r.Check(testkit.Rows("1", "2", "3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, c int)")
tk.MustExec("insert t values (1, 2, 3)")
r = tk.MustQuery("select b from (select a,b from t order by a,c) t")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select b from (select a,b from t order by a,c limit 1) t")
r.Check(testkit.Rows("2"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("insert into t values(1, 1), (2, 2)")
tk.MustQuery("select * from t where 1 order by b").Check(testkit.Rows("1 1", "2 2"))
tk.MustQuery("select * from t where a between 1 and 2 order by a desc").Check(testkit.Rows("2 2", "1 1"))
// Test double read and topN is pushed down to first read plannercore.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int, index idx(b))")
tk.MustExec("insert into t values(1, 3, 1)")
tk.MustExec("insert into t values(2, 2, 2)")
tk.MustExec("insert into t values(3, 1, 3)")
tk.MustQuery("select * from t use index(idx) order by a desc limit 1").Check(testkit.Rows("3 1 3"))
// Test double read which needs to keep order.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, key b (b))")
tk.Se.GetSessionVars().IndexLookupSize = 3
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t values(%d, %d)", i, 10-i))
}
tk.MustQuery("select a from t use index(b) order by b").Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0"))
}
func (s *testSuiteP1) TestOrderBy(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 int, c2 int, c3 varchar(20))")
tk.MustExec("insert into t values (1, 2, 'abc'), (2, 1, 'bcd')")
// Fix issue https://github.com/pingcap/tidb/issues/337
tk.MustQuery("select c1 as a, c1 as b from t order by c1").Check(testkit.Rows("1 1", "2 2"))
tk.MustQuery("select c1 as a, t.c1 as a from t order by a desc").Check(testkit.Rows("2 2", "1 1"))
tk.MustQuery("select c1 as c2 from t order by c2").Check(testkit.Rows("1", "2"))
tk.MustQuery("select sum(c1) from t order by sum(c1)").Check(testkit.Rows("3"))
tk.MustQuery("select c1 as c2 from t order by c2 + 1").Check(testkit.Rows("2", "1"))
// Order by position.
tk.MustQuery("select * from t order by 1").Check(testkit.Rows("1 2 abc", "2 1 bcd"))
tk.MustQuery("select * from t order by 2").Check(testkit.Rows("2 1 bcd", "1 2 abc"))
// Order by binary.
tk.MustQuery("select c1, c3 from t order by binary c1 desc").Check(testkit.Rows("2 bcd", "1 abc"))
tk.MustQuery("select c1, c2 from t order by binary c3").Check(testkit.Rows("1 2", "2 1"))
}
func (s *testSuiteP1) TestSelectErrorRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
err := tk.ExecToErr("select row(1, 1) from test")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test group by row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test order by row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test having row(1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select (select 1, 1) from test;")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test group by (select 1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test order by (select 1, 1);")
c.Assert(err, NotNil)
err = tk.ExecToErr("select * from test having (select 1, 1);")
c.Assert(err, NotNil)
}
// TestIssue2612 is related with https://github.com/pingcap/tidb/issues/2612
func (s *testSuiteP1) TestIssue2612(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t (
create_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00',
finish_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00');`)
tk.MustExec(`insert into t values ('2016-02-13 15:32:24', '2016-02-11 17:23:22');`)
rs, err := tk.Exec(`select timediff(finish_at, create_at) from t;`)
c.Assert(err, IsNil)
req := rs.NewChunk(nil)
err = rs.Next(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(req.GetRow(0).GetDuration(0, 0).String(), Equals, "-46:09:02")
c.Assert(rs.Close(), IsNil)
}
// TestIssue345 is related with https://github.com/pingcap/tidb/issues/345
func (s *testSuiteP1) TestIssue345(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t1, t2`)
tk.MustExec(`create table t1 (c1 int);`)
tk.MustExec(`create table t2 (c2 int);`)
tk.MustExec(`insert into t1 values (1);`)
tk.MustExec(`insert into t2 values (2);`)
tk.MustExec(`update t1, t2 set t1.c1 = 2, t2.c2 = 1;`)
tk.MustExec(`update t1, t2 set c1 = 2, c2 = 1;`)
tk.MustExec(`update t1 as a, t2 as b set a.c1 = 2, b.c2 = 1;`)
// Check t1 content
r := tk.MustQuery("SELECT * FROM t1;")
r.Check(testkit.Rows("2"))
// Check t2 content
r = tk.MustQuery("SELECT * FROM t2;")
r.Check(testkit.Rows("1"))
tk.MustExec(`update t1 as a, t2 as t1 set a.c1 = 1, t1.c2 = 2;`)
// Check t1 content
r = tk.MustQuery("SELECT * FROM t1;")
r.Check(testkit.Rows("1"))
// Check t2 content
r = tk.MustQuery("SELECT * FROM t2;")
r.Check(testkit.Rows("2"))
_, err := tk.Exec(`update t1 as a, t2 set t1.c1 = 10;`)
c.Assert(err, NotNil)
}
func (s *testSuiteP1) TestIssue5055(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t1, t2`)
tk.MustExec(`create table t1 (a int);`)
tk.MustExec(`create table t2 (a int);`)
tk.MustExec(`insert into t1 values(1);`)
tk.MustExec(`insert into t2 values(1);`)
result := tk.MustQuery("select tbl1.* from (select t1.a, 1 from t1) tbl1 left join t2 tbl2 on tbl1.a = tbl2.a order by tbl1.a desc limit 1;")
result.Check(testkit.Rows("1 1"))
}
func (s *testSuiteWithData) TestSetOperation(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1, t2, t3`)
tk.MustExec(`create table t1(a int)`)
tk.MustExec(`create table t2 like t1`)
tk.MustExec(`create table t3 like t1`)
tk.MustExec(`insert into t1 values (1),(1),(2),(3),(null)`)
tk.MustExec(`insert into t2 values (1),(2),(null),(null)`)
tk.MustExec(`insert into t3 values (2),(3)`)
var input []string
var output []struct {
SQL string
Plan []string
Res []string
}
s.testData.GetTestCases(c, &input, &output)
for i, tt := range input {
s.testData.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + tt).Rows())
output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
})
tk.MustQuery("explain " + tt).Check(testkit.Rows(output[i].Plan...))
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
}
}
func (s *testSuiteWithData) TestSetOperationOnDiffColType(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1, t2, t3`)
tk.MustExec(`create table t1(a int, b int)`)
tk.MustExec(`create table t2(a int, b varchar(20))`)
tk.MustExec(`create table t3(a int, b decimal(30,10))`)
tk.MustExec(`insert into t1 values (1,1),(1,1),(2,2),(3,3),(null,null)`)
tk.MustExec(`insert into t2 values (1,'1'),(2,'2'),(null,null),(null,'3')`)
tk.MustExec(`insert into t3 values (2,2.1),(3,3)`)
var input []string
var output []struct {
SQL string
Plan []string
Res []string
}
s.testData.GetTestCases(c, &input, &output)
for i, tt := range input {
s.testData.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + tt).Rows())
output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
})
tk.MustQuery("explain " + tt).Check(testkit.Rows(output[i].Plan...))
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
}
}
// issue-23038: wrong key range of index scan for year column
func (s *testSuiteWithData) TestIndexScanWithYearCol(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (c1 year(4), c2 int, key(c1));")
tk.MustExec("insert into t values(2001, 1);")
var input []string
var output []struct {
SQL string
Plan []string
Res []string
}
s.testData.GetTestCases(c, &input, &output)
for i, tt := range input {
s.testData.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + tt).Rows())
output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
})
tk.MustQuery("explain format = 'brief' " + tt).Check(testkit.Rows(output[i].Plan...))
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
}
}
func (s *testSuiteP2) TestUnion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
testSQL := `drop table if exists union_test; create table union_test(id int);`
tk.MustExec(testSQL)
testSQL = `drop table if exists union_test;`
tk.MustExec(testSQL)
testSQL = `create table union_test(id int);`
tk.MustExec(testSQL)
testSQL = `insert union_test values (1),(2)`
tk.MustExec(testSQL)
testSQL = `select * from (select id from union_test union select id from union_test) t order by id;`
r := tk.MustQuery(testSQL)
r.Check(testkit.Rows("1", "2"))
r = tk.MustQuery("select 1 union all select 1")
r.Check(testkit.Rows("1", "1"))
r = tk.MustQuery("select 1 union all select 1 union select 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select 1 as a union (select 2) order by a limit 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select 1 as a union (select 2) order by a limit 1, 1")
r.Check(testkit.Rows("2"))
r = tk.MustQuery("select id from union_test union all (select 1) order by id desc")
r.Check(testkit.Rows("2", "1", "1"))
r = tk.MustQuery("select id as a from union_test union (select 1) order by a desc")
r.Check(testkit.Rows("2", "1"))
r = tk.MustQuery(`select null as a union (select "abc") order by a`)
r.Check(testkit.Rows("<nil>", "abc"))
r = tk.MustQuery(`select "abc" as a union (select 1) order by a`)
r.Check(testkit.Rows("1", "abc"))
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (c int, d int)")
tk.MustExec("insert t1 values (NULL, 1)")
tk.MustExec("insert t1 values (1, 1)")
tk.MustExec("insert t1 values (1, 2)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2 (c int, d int)")
tk.MustExec("insert t2 values (1, 3)")
tk.MustExec("insert t2 values (1, 1)")
tk.MustExec("drop table if exists t3")
tk.MustExec("create table t3 (c int, d int)")
tk.MustExec("insert t3 values (3, 2)")
tk.MustExec("insert t3 values (4, 3)")
r = tk.MustQuery(`select sum(c1), c2 from (select c c1, d c2 from t1 union all select d c1, c c2 from t2 union all select c c1, d c2 from t3) x group by c2 order by c2`)
r.Check(testkit.Rows("5 1", "4 2", "4 3"))
tk.MustExec("drop table if exists t1, t2, t3")
tk.MustExec("create table t1 (a int primary key)")
tk.MustExec("create table t2 (a int primary key)")
tk.MustExec("create table t3 (a int primary key)")
tk.MustExec("insert t1 values (7), (8)")
tk.MustExec("insert t2 values (1), (9)")
tk.MustExec("insert t3 values (2), (3)")
r = tk.MustQuery("select * from t1 union all select * from t2 union all (select * from t3) order by a limit 2")
r.Check(testkit.Rows("1", "2"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (a int)")
tk.MustExec("create table t2 (a int)")
tk.MustExec("insert t1 values (2), (1)")
tk.MustExec("insert t2 values (3), (4)")
r = tk.MustQuery("select * from t1 union all (select * from t2) order by a limit 1")
r.Check(testkit.Rows("1"))
r = tk.MustQuery("select (select * from t1 where a != t.a union all (select * from t2 where a != t.a) order by a limit 1) from t1 t")
r.Check(testkit.Rows("1", "2"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int unsigned primary key auto_increment, c1 int, c2 int, index c1_c2 (c1, c2))")
tk.MustExec("insert into t (c1, c2) values (1, 1)")
tk.MustExec("insert into t (c1, c2) values (1, 2)")
tk.MustExec("insert into t (c1, c2) values (2, 3)")
r = tk.MustQuery("select * from (select * from t where t.c1 = 1 union select * from t where t.id = 1) s order by s.id")
r.Check(testkit.Rows("1 1 1", "2 1 2"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (f1 DATE)")
tk.MustExec("INSERT INTO t VALUES ('1978-11-26')")
r = tk.MustQuery("SELECT f1+0 FROM t UNION SELECT f1+0 FROM t")
r.Check(testkit.Rows("19781126"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a int, b int)")
tk.MustExec("INSERT INTO t VALUES ('1', '1')")
r = tk.MustQuery("select b from (SELECT * FROM t UNION ALL SELECT a, b FROM t order by a) t")
r.Check(testkit.Rows("1", "1"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a DECIMAL(4,2))")
tk.MustExec("INSERT INTO t VALUE(12.34)")
r = tk.MustQuery("SELECT 1 AS c UNION select a FROM t")
r.Sort().Check(testkit.Rows("1.00", "12.34"))
// #issue3771
r = tk.MustQuery("SELECT 'a' UNION SELECT CONCAT('a', -4)")
r.Sort().Check(testkit.Rows("a", "a-4"))
// test race
tk.MustQuery("SELECT @x:=0 UNION ALL SELECT @x:=0 UNION ALL SELECT @x")
// test field tp
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("CREATE TABLE t1 (a date)")
tk.MustExec("CREATE TABLE t2 (a date)")
tk.MustExec("SELECT a from t1 UNION select a FROM t2")
tk.MustQuery("show create table t1").Check(testkit.Rows("t1 CREATE TABLE `t1` (\n" + " `a` date DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// Move from session test.
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (c double);")
tk.MustExec("create table t2 (c double);")
tk.MustExec("insert into t1 value (73);")
tk.MustExec("insert into t2 value (930);")
// If set unspecified column flen to 0, it will cause bug in union.
// This test is used to prevent the bug reappear.
tk.MustQuery("select c from t1 union (select c from t2) order by c").Check(testkit.Rows("73", "930"))
// issue 5703
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a date)")
tk.MustExec("insert into t value ('2017-01-01'), ('2017-01-02')")
r = tk.MustQuery("(select a from t where a < 0) union (select a from t where a > 0) order by a")
r.Check(testkit.Rows("2017-01-01", "2017-01-02"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t value(0),(0)")
tk.MustQuery("select 1 from (select a from t union all select a from t) tmp").Check(testkit.Rows("1", "1", "1", "1"))
tk.MustQuery("select 10 as a from dual union select a from t order by a desc limit 1 ").Check(testkit.Rows("10"))
tk.MustQuery("select -10 as a from dual union select a from t order by a limit 1 ").Check(testkit.Rows("-10"))
tk.MustQuery("select count(1) from (select a from t union all select a from t) tmp").Check(testkit.Rows("4"))
err := tk.ExecToErr("select 1 from (select a from t limit 1 union all select a from t limit 1) tmp")
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrWrongUsage))
err = tk.ExecToErr("select 1 from (select a from t order by a union all select a from t limit 1) tmp")
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrWrongUsage))
_, err = tk.Exec("(select a from t order by a) union all select a from t limit 1 union all select a from t limit 1")
c.Assert(terror.ErrorEqual(err, plannercore.ErrWrongUsage), IsTrue, Commentf("err %v", err))
_, err = tk.Exec("(select a from t limit 1) union all select a from t limit 1")
c.Assert(err, IsNil)
_, err = tk.Exec("(select a from t order by a) union all select a from t order by a")
c.Assert(err, IsNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t value(1),(2),(3)")
tk.MustQuery("(select a from t order by a limit 2) union all (select a from t order by a desc limit 2) order by a desc limit 1,2").Check(testkit.Rows("2", "2"))
tk.MustQuery("select a from t union all select a from t order by a desc limit 5").Check(testkit.Rows("3", "3", "2", "2", "1"))
tk.MustQuery("(select a from t order by a desc limit 2) union all select a from t group by a order by a").Check(testkit.Rows("1", "2", "2", "3", "3"))
tk.MustQuery("(select a from t order by a desc limit 2) union all select 33 as a order by a desc limit 2").Check(testkit.Rows("33", "3"))
tk.MustQuery("select 1 union select 1 union all select 1").Check(testkit.Rows("1", "1"))
tk.MustQuery("select 1 union all select 1 union select 1").Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec(`create table t1(a bigint, b bigint);`)
tk.MustExec(`create table t2(a bigint, b bigint);`)
tk.MustExec(`insert into t1 values(1, 1);`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t1 select * from t1;`)
tk.MustExec(`insert into t2 values(1, 1);`)
tk.MustExec(`set @@tidb_init_chunk_size=2;`)
tk.MustExec(`set @@sql_mode="";`)
tk.MustQuery(`select count(*) from (select t1.a, t1.b from t1 left join t2 on t1.a=t2.a union all select t1.a, t1.a from t1 left join t2 on t1.a=t2.a) tmp;`).Check(testkit.Rows("128"))
tk.MustQuery(`select tmp.a, count(*) from (select t1.a, t1.b from t1 left join t2 on t1.a=t2.a union all select t1.a, t1.a from t1 left join t2 on t1.a=t2.a) tmp;`).Check(testkit.Rows("1 128"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t value(1 ,2)")
tk.MustQuery("select a, b from (select a, 0 as d, b from t union all select a, 0 as d, b from t) test;").Check(testkit.Rows("1 2", "1 2"))
// #issue 8141
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("insert into t1 value(1,2),(1,1),(2,2),(2,2),(3,2),(3,2)")
tk.MustExec("set @@tidb_init_chunk_size=2;")
tk.MustQuery("select count(*) from (select a as c, a as d from t1 union all select a, b from t1) t;").Check(testkit.Rows("12"))
// #issue 8189 and #issue 8199
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("CREATE TABLE t1 (a int not null, b char (10) not null)")
tk.MustExec("insert into t1 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustExec("CREATE TABLE t2 (a int not null, b char (10) not null)")
tk.MustExec("insert into t2 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustQuery("select a from t1 union select a from t1 order by (select a+1);").Check(testkit.Rows("1", "2", "3"))
// #issue 8201
for i := 0; i < 4; i++ {
tk.MustQuery("SELECT(SELECT 0 AS a FROM dual UNION SELECT 1 AS a FROM dual ORDER BY a ASC LIMIT 1) AS dev").Check(testkit.Rows("0"))
}
// #issue 8231
tk.MustExec("drop table if exists t1")
tk.MustExec("CREATE TABLE t1 (uid int(1))")
tk.MustExec("INSERT INTO t1 SELECT 150")
tk.MustQuery("SELECT 'a' UNION SELECT uid FROM t1 order by 1 desc;").Check(testkit.Rows("a", "150"))
// #issue 8196
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("CREATE TABLE t1 (a int not null, b char (10) not null)")
tk.MustExec("insert into t1 values(1,'a'),(2,'b'),(3,'c'),(3,'c')")
tk.MustExec("CREATE TABLE t2 (a int not null, b char (10) not null)")
tk.MustExec("insert into t2 values(3,'c'),(4,'d'),(5,'f'),(6,'e')")
tk.MustExec("analyze table t1")
tk.MustExec("analyze table t2")
_, err = tk.Exec("(select a,b from t1 limit 2) union all (select a,b from t2 order by a limit 1) order by t1.b")
c.Assert(err.Error(), Equals, "[planner:1250]Table 't1' from one of the SELECTs cannot be used in global ORDER clause")
// #issue 9900
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b decimal(6, 3))")
tk.MustExec("insert into t values(1, 1.000)")
tk.MustQuery("select count(distinct a), sum(distinct a), avg(distinct a) from (select a from t union all select b from t) tmp;").Check(testkit.Rows("1 1.000 1.0000000"))
// #issue 23832
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bit(20), b float, c double, d int)")
tk.MustExec("insert into t values(10, 10, 10, 10), (1, -1, 2, -2), (2, -2, 1, 1), (2, 1.1, 2.1, 10.1)")
tk.MustQuery("select a from t union select 10 order by a").Check(testkit.Rows("1", "2", "10"))
}
func (s *testSuite2) TestUnionLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists union_limit")
tk.MustExec("create table union_limit (id int) partition by hash(id) partitions 30")
for i := 0; i < 60; i++ {
tk.MustExec(fmt.Sprintf("insert into union_limit values (%d)", i))
}
// Cover the code for worker count limit in the union executor.
tk.MustQuery("select * from union_limit limit 10")
}
func (s *testSuiteP1) TestNeighbouringProj(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int, b int)")
tk.MustExec("create table t2(a int, b int)")
tk.MustExec("insert into t1 value(1, 1), (2, 2)")
tk.MustExec("insert into t2 value(1, 1), (2, 2)")
tk.MustQuery("select sum(c) from (select t1.a as a, t1.a as c, length(t1.b) from t1 union select a, b, b from t2) t;").Check(testkit.Rows("5"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint, b bigint, c bigint);")
tk.MustExec("insert into t values(1, 1, 1), (2, 2, 2), (3, 3, 3);")
rs := tk.MustQuery("select cast(count(a) as signed), a as another, a from t group by a order by cast(count(a) as signed), a limit 10;")
rs.Check(testkit.Rows("1 1 1", "1 2 2", "1 3 3"))
}
func (s *testSuiteP1) TestIn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t (c1 int primary key, c2 int, key c (c2));`)
for i := 0; i <= 200; i++ {
tk.MustExec(fmt.Sprintf("insert t values(%d, %d)", i, i))
}
queryStr := `select c2 from t where c1 in ('7', '10', '112', '111', '98', '106', '100', '9', '18', '17') order by c2`
r := tk.MustQuery(queryStr)
r.Check(testkit.Rows("7", "9", "10", "17", "18", "98", "100", "106", "111", "112"))
queryStr = `select c2 from t where c1 in ('7a')`
tk.MustQuery(queryStr).Check(testkit.Rows("7"))
}
func (s *testSuiteP1) TestTablePKisHandleScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int PRIMARY KEY AUTO_INCREMENT)")
tk.MustExec("insert t values (),()")
tk.MustExec("insert t values (-100),(0)")
tests := []struct {
sql string
result [][]interface{}
}{
{
"select * from t",
testkit.Rows("-100", "1", "2", "3"),
},
{
"select * from t where a = 1",
testkit.Rows("1"),
},
{
"select * from t where a != 1",
testkit.Rows("-100", "2", "3"),
},
{
"select * from t where a >= '1.1'",
testkit.Rows("2", "3"),
},
{
"select * from t where a < '1.1'",
testkit.Rows("-100", "1"),
},
{
"select * from t where a > '-100.1' and a < 2",
testkit.Rows("-100", "1"),
},
{
"select * from t where a is null",
testkit.Rows(),
}, {
"select * from t where a is true",
testkit.Rows("-100", "1", "2", "3"),
}, {
"select * from t where a is false",
testkit.Rows(),
},
{
"select * from t where a in (1, 2)",
testkit.Rows("1", "2"),
},
{
"select * from t where a between 1 and 2",
testkit.Rows("1", "2"),
},
}
for _, tt := range tests {
result := tk.MustQuery(tt.sql)
result.Check(tt.result)
}
}
func (s *testSuite8) TestIndexScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique)")
tk.MustExec("insert t values (-1), (2), (3), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select a from t where a < 0 or (a >= 2.1 and a < 5.1) or ( a > 5.9 and a <= 7.9) or a > '8.1'")
result.Check(testkit.Rows("-1", "3", "5", "6", "7", "9"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique)")
tk.MustExec("insert t values (0)")
result = tk.MustQuery("select NULL from t ")
result.Check(testkit.Rows("<nil>"))
// test for double read
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique, b int)")
tk.MustExec("insert t values (5, 0)")
tk.MustExec("insert t values (4, 0)")
tk.MustExec("insert t values (3, 0)")
tk.MustExec("insert t values (2, 0)")
tk.MustExec("insert t values (1, 0)")
tk.MustExec("insert t values (0, 0)")
result = tk.MustQuery("select * from t order by a limit 3")
result.Check(testkit.Rows("0 0", "1 0", "2 0"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unique, b int)")
tk.MustExec("insert t values (0, 1)")
tk.MustExec("insert t values (1, 2)")
tk.MustExec("insert t values (2, 1)")
tk.MustExec("insert t values (3, 2)")
tk.MustExec("insert t values (4, 1)")
tk.MustExec("insert t values (5, 2)")
result = tk.MustQuery("select * from t where a < 5 and b = 1 limit 2")
result.Check(testkit.Rows("0 1", "2 1"))
tk.MustExec("drop table if exists tab1")
tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, col0 INTEGER, col1 FLOAT, col3 INTEGER, col4 FLOAT)")
tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (col0)")
tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (col1)")
tk.MustExec("CREATE INDEX idx_tab1_3 on tab1 (col3)")
tk.MustExec("CREATE INDEX idx_tab1_4 on tab1 (col4)")
tk.MustExec("INSERT INTO tab1 VALUES(1,37,20.85,30,10.69)")
result = tk.MustQuery("SELECT pk FROM tab1 WHERE ((col3 <= 6 OR col3 < 29 AND (col0 < 41)) OR col3 > 42) AND col1 >= 96.1 AND col3 = 30 AND col3 > 17 AND (col0 BETWEEN 36 AND 42)")
result.Check(testkit.Rows())
tk.MustExec("drop table if exists tab1")
tk.MustExec("CREATE TABLE tab1(pk INTEGER PRIMARY KEY, a INTEGER, b INTEGER)")
tk.MustExec("CREATE INDEX idx_tab1_0 on tab1 (a)")
tk.MustExec("INSERT INTO tab1 VALUES(1,1,1)")
tk.MustExec("INSERT INTO tab1 VALUES(2,2,1)")
tk.MustExec("INSERT INTO tab1 VALUES(3,1,2)")
tk.MustExec("INSERT INTO tab1 VALUES(4,2,2)")
result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 3 AND a = 1")
result.Check(testkit.Rows("1 1 1", "3 1 2"))
result = tk.MustQuery("SELECT * FROM tab1 WHERE pk <= 4 AND a = 1 AND b = 2")
result.Check(testkit.Rows("3 1 2"))
tk.MustExec("CREATE INDEX idx_tab1_1 on tab1 (b, a)")
result = tk.MustQuery("SELECT pk FROM tab1 WHERE b > 1")
result.Check(testkit.Rows("3", "4"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a varchar(3), index(a))")
tk.MustExec("insert t values('aaa'), ('aab')")
result = tk.MustQuery("select * from t where a >= 'aaaa' and a < 'aabb'")
result.Check(testkit.Rows("aab"))
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a int primary key, b int, c int, index(c))")
tk.MustExec("insert t values(1, 1, 1), (2, 2, 2), (4, 4, 4), (3, 3, 3), (5, 5, 5)")
// Test for double read and top n.
result = tk.MustQuery("select a from t where c >= 2 order by b desc limit 1")
result.Check(testkit.Rows("5"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a varchar(50) primary key, b int, c int, index idx(b))")
tk.MustExec("insert into t values('aa', 1, 1)")
tk.MustQuery("select * from t use index(idx) where a > 'a'").Check(testkit.Rows("aa 1 1"))
// fix issue9636
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE `t` (a int, KEY (a))")
result = tk.MustQuery(`SELECT * FROM (SELECT * FROM (SELECT a as d FROM t WHERE a IN ('100')) AS x WHERE x.d < "123" ) tmp_count`)
result.Check(testkit.Rows())
}
func (s *testSuiteP1) TestIndexReverseOrder(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int, index idx (b))")
tk.MustExec("insert t (b) values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select b from t order by b desc")
result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1", "0"))
result = tk.MustQuery("select b from t where b <3 or (b >=6 and b < 8) order by b desc")
result.Check(testkit.Rows("7", "6", "2", "1", "0"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, index idx (b, a))")
tk.MustExec("insert t values (0, 2), (1, 2), (2, 2), (0, 1), (1, 1), (2, 1), (0, 0), (1, 0), (2, 0)")
result = tk.MustQuery("select b, a from t order by b, a desc")
result.Check(testkit.Rows("0 2", "0 1", "0 0", "1 2", "1 1", "1 0", "2 2", "2 1", "2 0"))
}
func (s *testSuiteP1) TestTableReverseOrder(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int)")
tk.MustExec("insert t (b) values (1), (2), (3), (4), (5), (6), (7), (8), (9)")
result := tk.MustQuery("select b from t order by a desc")
result.Check(testkit.Rows("9", "8", "7", "6", "5", "4", "3", "2", "1"))
result = tk.MustQuery("select a from t where a <3 or (a >=6 and a < 8) order by a desc")
result.Check(testkit.Rows("7", "6", "2", "1"))
}
func (s *testSuiteP1) TestDefaultNull(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key auto_increment, b int default 1, c int)")
tk.MustExec("insert t values ()")
tk.MustQuery("select * from t").Check(testkit.Rows("1 1 <nil>"))
tk.MustExec("update t set b = NULL where a = 1")
tk.MustQuery("select * from t").Check(testkit.Rows("1 <nil> <nil>"))
tk.MustExec("update t set c = 1")
tk.MustQuery("select * from t ").Check(testkit.Rows("1 <nil> 1"))
tk.MustExec("delete from t where a = 1")
tk.MustExec("insert t (a) values (1)")
tk.MustQuery("select * from t").Check(testkit.Rows("1 1 <nil>"))
}
func (s *testSuiteP1) TestUnsignedPKColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int unsigned primary key, b int, c int, key idx_ba (b, c, a));")
tk.MustExec("insert t values (1, 1, 1)")
result := tk.MustQuery("select * from t;")
result.Check(testkit.Rows("1 1 1"))
tk.MustExec("update t set c=2 where a=1;")
result = tk.MustQuery("select * from t where b=1;")
result.Check(testkit.Rows("1 1 2"))
}
func (s *testSuiteP1) TestJSON(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists test_json")
tk.MustExec("create table test_json (id int, a json)")
tk.MustExec(`insert into test_json (id, a) values (1, '{"a":[1,"2",{"aa":"bb"},4],"b":true}')`)
tk.MustExec(`insert into test_json (id, a) values (2, "null")`)
tk.MustExec(`insert into test_json (id, a) values (3, null)`)
tk.MustExec(`insert into test_json (id, a) values (4, 'true')`)
tk.MustExec(`insert into test_json (id, a) values (5, '3')`)
tk.MustExec(`insert into test_json (id, a) values (5, '4.0')`)
tk.MustExec(`insert into test_json (id, a) values (6, '"string"')`)
result := tk.MustQuery(`select tj.a from test_json tj order by tj.id`)
result.Check(testkit.Rows(`{"a": [1, "2", {"aa": "bb"}, 4], "b": true}`, "null", "<nil>", "true", "3", "4", `"string"`))
// Check json_type function
result = tk.MustQuery(`select json_type(a) from test_json tj order by tj.id`)
result.Check(testkit.Rows("OBJECT", "NULL", "<nil>", "BOOLEAN", "INTEGER", "DOUBLE", "STRING"))
// Check json compare with primitives.
result = tk.MustQuery(`select a from test_json tj where a = 3`)
result.Check(testkit.Rows("3"))
result = tk.MustQuery(`select a from test_json tj where a = 4.0`)
result.Check(testkit.Rows("4"))
result = tk.MustQuery(`select a from test_json tj where a = true`)
result.Check(testkit.Rows("true"))
result = tk.MustQuery(`select a from test_json tj where a = "string"`)
result.Check(testkit.Rows(`"string"`))
// Check cast(true/false as JSON).
result = tk.MustQuery(`select cast(true as JSON)`)
result.Check(testkit.Rows(`true`))
result = tk.MustQuery(`select cast(false as JSON)`)
result.Check(testkit.Rows(`false`))
// Check two json grammar sugar.
result = tk.MustQuery(`select a->>'$.a[2].aa' as x, a->'$.b' as y from test_json having x is not null order by id`)
result.Check(testkit.Rows(`bb true`))
result = tk.MustQuery(`select a->'$.a[2].aa' as x, a->>'$.b' as y from test_json having x is not null order by id`)
result.Check(testkit.Rows(`"bb" true`))
// Check some DDL limits for TEXT/BLOB/JSON column.
var err error
var terr *terror.Error
_, err = tk.Exec(`create table test_bad_json(a json default '{}')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrBlobCantHaveDefault))
_, err = tk.Exec(`create table test_bad_json(a blob default 'hello')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrBlobCantHaveDefault))
_, err = tk.Exec(`create table test_bad_json(a text default 'world')`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrBlobCantHaveDefault))
// check json fields cannot be used as key.
_, err = tk.Exec(`create table test_bad_json(id int, a json, key (a))`)
c.Assert(err, NotNil)
terr = errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrJSONUsedAsKey))
// check CAST AS JSON.
result = tk.MustQuery(`select CAST('3' AS JSON), CAST('{}' AS JSON), CAST(null AS JSON)`)
result.Check(testkit.Rows(`3 {} <nil>`))
tk.MustQuery("select a, count(1) from test_json group by a order by a").Check(testkit.Rows(
"<nil> 1",
"null 1",
"3 1",
"4 1",
`"string" 1`,
"{\"a\": [1, \"2\", {\"aa\": \"bb\"}, 4], \"b\": true} 1",
"true 1"))
// Check cast json to decimal.
// NOTE: this test case contains a bug, it should be uncommented after the bug is fixed.
// TODO: Fix bug https://github.com/pingcap/tidb/issues/12178
// tk.MustExec("drop table if exists test_json")
// tk.MustExec("create table test_json ( a decimal(60,2) as (JSON_EXTRACT(b,'$.c')), b json );")
// tk.MustExec(`insert into test_json (b) values
// ('{"c": "1267.1"}'),
// ('{"c": "1267.01"}'),
// ('{"c": "1267.1234"}'),
// ('{"c": "1267.3456"}'),
// ('{"c": "1234567890123456789012345678901234567890123456789012345"}'),
// ('{"c": "1234567890123456789012345678901234567890123456789012345.12345"}');`)
//
// tk.MustQuery("select a from test_json;").Check(testkit.Rows("1267.10", "1267.01", "1267.12",
// "1267.35", "1234567890123456789012345678901234567890123456789012345.00",
// "1234567890123456789012345678901234567890123456789012345.12"))
}
func (s *testSuiteP1) TestMultiUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE test_mu (a int primary key, b int, c int)`)
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9)`)
// Test INSERT ... ON DUPLICATE UPDATE set_lists.
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE b = 3, c = b`)
result := tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 3 3`, `4 5 6`, `7 8 9`))
tk.MustExec(`INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE c = 2, b = c+5`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 5 6`, `7 8 9`))
// Test UPDATE ... set_lists.
tk.MustExec(`UPDATE test_mu SET b = 0, c = b WHERE a = 4`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 0 5`, `7 8 9`))
tk.MustExec(`UPDATE test_mu SET c = 8, b = c WHERE a = 4`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 5 8`, `7 8 9`))
tk.MustExec(`UPDATE test_mu SET c = b, b = c WHERE a = 7`)
result = tk.MustQuery(`SELECT * FROM test_mu ORDER BY a`)
result.Check(testkit.Rows(`1 7 2`, `4 5 8`, `7 9 8`))
}
func (s *testSuiteP1) TestGeneratedColumnWrite(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
_, err := tk.Exec(`CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (a+8) virtual)`)
c.Assert(err.Error(), Equals, ddl.ErrGeneratedColumnRefAutoInc.GenWithStackByArgs("c").Error())
tk.MustExec(`CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (b+8) virtual)`)
tk.MustExec(`CREATE TABLE test_gc_write_1 (a int primary key, b int, c int)`)
tests := []struct {
stmt string
err int
}{
// Can't modify generated column by values.
{`insert into test_gc_write (a, b, c) values (1, 1, 1)`, mysql.ErrBadGeneratedColumn},
{`insert into test_gc_write values (1, 1, 1)`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by select clause.
{`insert into test_gc_write select 1, 1, 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by on duplicate clause.
{`insert into test_gc_write (a, b) values (1, 1) on duplicate key update c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by set.
{`insert into test_gc_write set a = 1, b = 1, c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by update clause.
{`update test_gc_write set c = 1`, mysql.ErrBadGeneratedColumn},
// Can't modify generated column by multi-table update clause.
{`update test_gc_write, test_gc_write_1 set test_gc_write.c = 1`, mysql.ErrBadGeneratedColumn},
// Can insert without generated columns.
{`insert into test_gc_write (a, b) values (1, 1)`, 0},
{`insert into test_gc_write set a = 2, b = 2`, 0},
{`insert into test_gc_write (b) select c from test_gc_write`, 0},
// Can update without generated columns.
{`update test_gc_write set b = 2 where a = 2`, 0},
{`update test_gc_write t1, test_gc_write_1 t2 set t1.b = 3, t2.b = 4`, 0},
// But now we can't do this, just as same with MySQL 5.7:
{`insert into test_gc_write values (1, 1)`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write select 1, 1`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write (c) select a, b from test_gc_write`, mysql.ErrWrongValueCountOnRow},
{`insert into test_gc_write (b, c) select a, b from test_gc_write`, mysql.ErrBadGeneratedColumn},
}
for _, tt := range tests {
_, err := tk.Exec(tt.stmt)
if tt.err != 0 {
c.Assert(err, NotNil, Commentf("sql is `%v`", tt.stmt))
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(tt.err), Commentf("sql is %v", tt.stmt))
} else {
c.Assert(err, IsNil)
}
}
}
// TestGeneratedColumnRead tests select generated columns from table.
// They should be calculated from their generation expressions.
func (s *testSuiteP1) TestGeneratedColumnRead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE test_gc_read(a int primary key, b int, c int as (a+b), d int as (a*b) stored, e int as (c*2))`)
result := tk.MustQuery(`SELECT generation_expression FROM information_schema.columns WHERE table_name = 'test_gc_read' AND column_name = 'd'`)
result.Check(testkit.Rows("`a` * `b`"))
// Insert only column a and b, leave c and d be calculated from them.
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (0,null),(1,2),(3,4)`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`))
tk.MustExec(`INSERT INTO test_gc_read SET a = 5, b = 10`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 10 15 50 30`))
tk.MustExec(`REPLACE INTO test_gc_read (a, b) VALUES (5, 6)`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 6 11 30 22`))
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE b = 9`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `5 9 14 45 28`))
// Test select only-generated-column-without-dependences.
result = tk.MustQuery(`SELECT c, d FROM test_gc_read`)
result.Check(testkit.Rows(`<nil> <nil>`, `3 2`, `7 12`, `14 45`))
// Test select only virtual generated column that refers to other virtual generated columns.
result = tk.MustQuery(`SELECT e FROM test_gc_read`)
result.Check(testkit.Rows(`<nil>`, `6`, `14`, `28`))
// Test order of on duplicate key update list.
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE a = 6, b = a`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `6 6 12 36 24`))
tk.MustExec(`INSERT INTO test_gc_read (a, b) VALUES (6, 8) ON DUPLICATE KEY UPDATE b = 8, a = b`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
// Test where-conditions on virtual/stored generated columns.
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 7`)
result.Check(testkit.Rows(`3 4 7 12 14`))
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE d = 64`)
result.Check(testkit.Rows(`8 8 16 64 32`))
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE e = 6`)
result.Check(testkit.Rows(`1 2 3 2 6`))
// Test update where-conditions on virtual/generated columns.
tk.MustExec(`UPDATE test_gc_read SET a = a + 100 WHERE c = 7`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 107`)
result.Check(testkit.Rows(`103 4 107 412 214`))
// Test update where-conditions on virtual/generated columns.
tk.MustExec(`UPDATE test_gc_read m SET m.a = m.a + 100 WHERE c = 107`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE c = 207`)
result.Check(testkit.Rows(`203 4 207 812 414`))
tk.MustExec(`UPDATE test_gc_read SET a = a - 200 WHERE d = 812`)
result = tk.MustQuery(`SELECT * FROM test_gc_read WHERE d = 12`)
result.Check(testkit.Rows(`3 4 7 12 14`))
tk.MustExec(`INSERT INTO test_gc_read set a = 4, b = d + 1`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`, `3 4 7 12 14`,
`4 <nil> <nil> <nil> <nil>`, `8 8 16 64 32`))
tk.MustExec(`DELETE FROM test_gc_read where a = 4`)
// Test on-conditions on virtual/stored generated columns.
tk.MustExec(`CREATE TABLE test_gc_help(a int primary key, b int, c int, d int, e int)`)
tk.MustExec(`INSERT INTO test_gc_help(a, b, c, d, e) SELECT * FROM test_gc_read`)
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.c = t2.c ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.d = t2.d ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.e = t2.e ORDER BY t1.a`)
result.Check(testkit.Rows(`1 2 3 2 6`, `3 4 7 12 14`, `8 8 16 64 32`))
// Test generated column in subqueries.
result = tk.MustQuery(`SELECT * FROM test_gc_read t WHERE t.a not in (SELECT t.a FROM test_gc_read t where t.c > 5)`)
result.Sort().Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 2 3 2 6`))
result = tk.MustQuery(`SELECT * FROM test_gc_read t WHERE t.c in (SELECT t.c FROM test_gc_read t where t.c > 5)`)
result.Sort().Check(testkit.Rows(`3 4 7 12 14`, `8 8 16 64 32`))
result = tk.MustQuery(`SELECT tt.b FROM test_gc_read tt WHERE tt.a = (SELECT max(t.a) FROM test_gc_read t WHERE t.c = tt.c) ORDER BY b`)
result.Check(testkit.Rows(`2`, `4`, `8`))
// Test aggregation on virtual/stored generated columns.
result = tk.MustQuery(`SELECT c, sum(a) aa, max(d) dd, sum(e) ee FROM test_gc_read GROUP BY c ORDER BY aa`)
result.Check(testkit.Rows(`<nil> 0 <nil> <nil>`, `3 1 2 6`, `7 3 12 14`, `16 8 64 32`))
result = tk.MustQuery(`SELECT a, sum(c), sum(d), sum(e) FROM test_gc_read GROUP BY a ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil>`, `1 3 2 6`, `3 7 12 14`, `8 16 64 32`))
// Test multi-update on generated columns.
tk.MustExec(`UPDATE test_gc_read m, test_gc_read n SET m.b = m.b + 10, n.b = n.b + 10`)
result = tk.MustQuery(`SELECT * FROM test_gc_read ORDER BY a`)
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 12 13 12 26`, `3 14 17 42 34`, `8 18 26 144 52`))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(8)")
tk.MustExec("update test_gc_read set a = a+1 where a in (select a from t)")
result = tk.MustQuery("select * from test_gc_read order by a")
result.Check(testkit.Rows(`0 <nil> <nil> <nil> <nil>`, `1 12 13 12 26`, `3 14 17 42 34`, `9 18 27 162 54`))
// Test different types between generation expression and generated column.
tk.MustExec(`CREATE TABLE test_gc_read_cast(a VARCHAR(255), b VARCHAR(255), c INT AS (JSON_EXTRACT(a, b)), d INT AS (JSON_EXTRACT(a, b)) STORED)`)
tk.MustExec(`INSERT INTO test_gc_read_cast (a, b) VALUES ('{"a": "3"}', '$.a')`)
result = tk.MustQuery(`SELECT c, d FROM test_gc_read_cast`)
result.Check(testkit.Rows(`3 3`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_1(a VARCHAR(255), b VARCHAR(255), c ENUM("red", "yellow") AS (JSON_UNQUOTE(JSON_EXTRACT(a, b))))`)
tk.MustExec(`INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "yellow"}', '$.a')`)
result = tk.MustQuery(`SELECT c FROM test_gc_read_cast_1`)
result.Check(testkit.Rows(`yellow`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_2( a JSON, b JSON AS (a->>'$.a'))`)
tk.MustExec(`INSERT INTO test_gc_read_cast_2(a) VALUES ('{"a": "{ \\\"key\\\": \\\"\\u6d4b\\\" }"}')`)
result = tk.MustQuery(`SELECT b FROM test_gc_read_cast_2`)
result.Check(testkit.Rows(`{"key": "测"}`))
tk.MustExec(`CREATE TABLE test_gc_read_cast_3( a JSON, b JSON AS (a->>'$.a'), c INT AS (b * 3.14) )`)
tk.MustExec(`INSERT INTO test_gc_read_cast_3(a) VALUES ('{"a": "5"}')`)
result = tk.MustQuery(`SELECT c FROM test_gc_read_cast_3`)
result.Check(testkit.Rows(`16`))
_, err := tk.Exec(`INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "invalid"}', '$.a')`)
c.Assert(err, NotNil)
// Test read generated columns after drop some irrelevant column
tk.MustExec(`DROP TABLE IF EXISTS test_gc_read_m`)
tk.MustExec(`CREATE TABLE test_gc_read_m (a int primary key, b int, c int as (a+1), d int as (c*2))`)
tk.MustExec(`INSERT INTO test_gc_read_m(a) values (1), (2)`)
tk.MustExec(`ALTER TABLE test_gc_read_m DROP b`)
result = tk.MustQuery(`SELECT * FROM test_gc_read_m`)
result.Check(testkit.Rows(`1 2 4`, `2 3 6`))
// Test not null generated columns.
tk.MustExec(`CREATE TABLE test_gc_read_1(a int primary key, b int, c int as (a+b) not null, d int as (a*b) stored)`)
tk.MustExec(`CREATE TABLE test_gc_read_2(a int primary key, b int, c int as (a+b), d int as (a*b) stored not null)`)
tests := []struct {
stmt string
err int
}{
// Can't insert these records, because generated columns are not null.
{`insert into test_gc_read_1(a, b) values (1, null)`, mysql.ErrBadNull},
{`insert into test_gc_read_2(a, b) values (1, null)`, mysql.ErrBadNull},
}
for _, tt := range tests {
_, err := tk.Exec(tt.stmt)
if tt.err != 0 {
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(tt.err))
} else {
c.Assert(err, IsNil)
}
}
}
// TestGeneratedColumnRead tests generated columns using point get and batch point get
func (s *testSuiteP1) TestGeneratedColumnPointGet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists tu")
tk.MustExec("CREATE TABLE tu(a int, b int, c int GENERATED ALWAYS AS (a + b) VIRTUAL, d int as (a * b) stored, " +
"e int GENERATED ALWAYS as (b * 2) VIRTUAL, PRIMARY KEY (a), UNIQUE KEY ukc (c), unique key ukd(d), key ke(e))")
tk.MustExec("insert into tu(a, b) values(1, 2)")
tk.MustExec("insert into tu(a, b) values(5, 6)")
tk.MustQuery("select * from tu for update").Check(testkit.Rows("1 2 3 2 4", "5 6 11 30 12"))
tk.MustQuery("select * from tu where a = 1").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select * from tu where a in (1, 2)").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select * from tu where c in (1, 2, 3)").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select * from tu where c = 3").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select d, e from tu where c = 3").Check(testkit.Rows("2 4"))
tk.MustQuery("select * from tu where d in (1, 2, 3)").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select * from tu where d = 2").Check(testkit.Rows("1 2 3 2 4"))
tk.MustQuery("select c, d from tu where d = 2").Check(testkit.Rows("3 2"))
tk.MustQuery("select d, e from tu where e = 4").Check(testkit.Rows("2 4"))
tk.MustQuery("select * from tu where e = 4").Check(testkit.Rows("1 2 3 2 4"))
tk.MustExec("update tu set a = a + 1, b = b + 1 where c = 11")
tk.MustQuery("select * from tu for update").Check(testkit.Rows("1 2 3 2 4", "6 7 13 42 14"))
tk.MustQuery("select * from tu where a = 6").Check(testkit.Rows("6 7 13 42 14"))
tk.MustQuery("select * from tu where c in (5, 6, 13)").Check(testkit.Rows("6 7 13 42 14"))
tk.MustQuery("select b, c, e, d from tu where c = 13").Check(testkit.Rows("7 13 14 42"))
tk.MustQuery("select a, e, d from tu where c in (5, 6, 13)").Check(testkit.Rows("6 14 42"))
tk.MustExec("drop table if exists tu")
}
func (s *testSuiteP2) TestToPBExpr(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a decimal(10,6), b decimal, index idx_b (b))")
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values (1.1, 1.1)")
tk.MustExec("insert t values (2.4, 2.4)")
tk.MustExec("insert t values (3.3, 2.7)")
result := tk.MustQuery("select * from t where a < 2.399999")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where a > 1.5")
result.Check(testkit.Rows("2.400000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where a <= 1.1")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where b >= 3")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where not (b = 1)")
result.Check(testkit.Rows("2.400000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where b&1 = a|1")
result.Check(testkit.Rows("1.100000 1"))
result = tk.MustQuery("select * from t where b != 2 and b <=> 3")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where b in (3)")
result.Check(testkit.Rows("3.300000 3"))
result = tk.MustQuery("select * from t where b not in (1, 2)")
result.Check(testkit.Rows("3.300000 3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a varchar(255), b int)")
tk.MustExec("insert t values ('abc123', 1)")
tk.MustExec("insert t values ('ab123', 2)")
result = tk.MustQuery("select * from t where a like 'ab%'")
result.Check(testkit.Rows("abc123 1", "ab123 2"))
result = tk.MustQuery("select * from t where a like 'ab_12'")
result.Check(nil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key)")
tk.MustExec("insert t values (1)")
tk.MustExec("insert t values (2)")
result = tk.MustQuery("select * from t where not (a = 1)")
result.Check(testkit.Rows("2"))
result = tk.MustQuery("select * from t where not(not (a = 1))")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select * from t where not(a != 1 and a != 2)")
result.Check(testkit.Rows("1", "2"))
}
func (s *testSuiteP2) TestDatumXAPI(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a decimal(10,6), b decimal, index idx_b (b))")
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values (1.1, 1.1)")
tk.MustExec("insert t values (2.2, 2.2)")
tk.MustExec("insert t values (3.3, 2.7)")
result := tk.MustQuery("select * from t where a > 1.5")
result.Check(testkit.Rows("2.200000 2", "3.300000 3"))
result = tk.MustQuery("select * from t where b > 1.5")
result.Check(testkit.Rows("2.200000 2", "3.300000 3"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a time(3), b time, index idx_a (a))")
tk.MustExec("insert t values ('11:11:11', '11:11:11')")
tk.MustExec("insert t values ('11:11:12', '11:11:12')")
tk.MustExec("insert t values ('11:11:13', '11:11:13')")
result = tk.MustQuery("select * from t where a > '11:11:11.5'")
result.Check(testkit.Rows("11:11:12.000 11:11:12", "11:11:13.000 11:11:13"))
result = tk.MustQuery("select * from t where b > '11:11:11.5'")
result.Check(testkit.Rows("11:11:12.000 11:11:12", "11:11:13.000 11:11:13"))
}
func (s *testSuiteP2) TestSQLMode(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a tinyint not null)")
tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'")
_, err := tk.Exec("insert t values ()")
c.Check(err, NotNil)
_, err = tk.Exec("insert t values ('1000')")
c.Check(err, NotNil)
tk.MustExec("create table if not exists tdouble (a double(3,2))")
_, err = tk.Exec("insert tdouble values (10.23)")
c.Check(err, NotNil)
tk.MustExec("set sql_mode = ''")
tk.MustExec("insert t values ()")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1364 Field 'a' doesn't have a default value"))
tk.MustExec("insert t values (null)")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'a' cannot be null"))
tk.MustExec("insert ignore t values (null)")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'a' cannot be null"))
tk.MustExec("insert t select null")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'a' cannot be null"))
tk.MustExec("insert t values (1000)")
tk.MustQuery("select * from t order by a").Check(testkit.Rows("0", "0", "0", "0", "127"))
tk.MustExec("insert tdouble values (10.23)")
tk.MustQuery("select * from tdouble").Check(testkit.Rows("9.99"))
tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'")
tk.MustExec("set @@global.sql_mode = ''")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk2.MustExec("drop table if exists t2")
tk2.MustExec("create table t2 (a varchar(3))")
tk2.MustExec("insert t2 values ('abcd')")
tk2.MustQuery("select * from t2").Check(testkit.Rows("abc"))
// session1 is still in strict mode.
_, err = tk.Exec("insert t2 values ('abcd')")
c.Check(err, NotNil)
// Restore original global strict mode.
tk.MustExec("set @@global.sql_mode = 'STRICT_TRANS_TABLES'")
}
func (s *testSuiteP2) TestTableDual(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
result := tk.MustQuery("Select 1")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select 1 from dual")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select count(*) from dual")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("Select 1 from dual where 1")
result.Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key)")
tk.MustQuery("select t1.* from t t1, t t2 where t1.a=t2.a and 1=0").Check(testkit.Rows())
}
func (s *testSuiteP2) TestTableScan(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use information_schema")
result := tk.MustQuery("select * from schemata")
// There must be these tables: information_schema, mysql, performance_schema and test.
c.Assert(len(result.Rows()), GreaterEqual, 4)
tk.MustExec("use test")
tk.MustExec("create database mytest")
rowStr1 := fmt.Sprintf("%s %s %s %s %v %v %v", "def", "mysql", "utf8mb4", "utf8mb4_bin", nil, nil, nil)
rowStr2 := fmt.Sprintf("%s %s %s %s %v %v %v", "def", "mytest", "utf8mb4", "utf8mb4_bin", nil, nil, nil)
tk.MustExec("use information_schema")
result = tk.MustQuery("select * from schemata where schema_name = 'mysql'")
result.Check(testkit.Rows(rowStr1))
result = tk.MustQuery("select * from schemata where schema_name like 'my%'")
result.Check(testkit.Rows(rowStr1, rowStr2))
result = tk.MustQuery("select 1 from tables limit 1")
result.Check(testkit.Rows("1"))
}
func (s *testSuiteP2) TestAdapterStatement(c *C) {
se, err := session.CreateSession4Test(s.store)
c.Check(err, IsNil)
se.GetSessionVars().TxnCtx.InfoSchema = domain.GetDomain(se).InfoSchema()
compiler := &executor.Compiler{Ctx: se}
stmtNode, err := s.ParseOneStmt("select 1", "", "")
c.Check(err, IsNil)
stmt, err := compiler.Compile(context.TODO(), stmtNode)
c.Check(err, IsNil)
c.Check(stmt.OriginText(), Equals, "select 1")
stmtNode, err = s.ParseOneStmt("create table test.t (a int)", "", "")
c.Check(err, IsNil)
stmt, err = compiler.Compile(context.TODO(), stmtNode)
c.Check(err, IsNil)
c.Check(stmt.OriginText(), Equals, "create table test.t (a int)")
}
func (s *testSuiteP2) TestIsPointGet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use mysql")
ctx := tk.Se.(sessionctx.Context)
tests := map[string]bool{
"select * from help_topic where name='aaa'": false,
"select 1 from help_topic where name='aaa'": false,
"select * from help_topic where help_topic_id=1": true,
"select * from help_topic where help_category_id=1": false,
}
for sqlStr, result := range tests {
stmtNode, err := s.ParseOneStmt(sqlStr, "", "")
c.Check(err, IsNil)
preprocessorReturn := &plannercore.PreprocessorReturn{}
err = plannercore.Preprocess(ctx, stmtNode, plannercore.WithPreprocessorReturn(preprocessorReturn))
c.Check(err, IsNil)
p, _, err := planner.Optimize(context.TODO(), ctx, stmtNode, preprocessorReturn.InfoSchema)
c.Check(err, IsNil)
ret, err := plannercore.IsPointGetWithPKOrUniqueKeyByAutoCommit(ctx, p)
c.Assert(err, IsNil)
c.Assert(ret, Equals, result)
}
}
func (s *testSuiteP2) TestClusteredIndexIsPointGet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("drop database if exists test_cluster_index_is_point_get;")
tk.MustExec("create database test_cluster_index_is_point_get;")
tk.MustExec("use test_cluster_index_is_point_get;")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a varchar(255), b int, c char(10), primary key (c, a));")
ctx := tk.Se.(sessionctx.Context)
tests := map[string]bool{
"select 1 from t where a='x'": false,
"select * from t where c='x'": false,
"select * from t where a='x' and c='x'": true,
"select * from t where a='x' and c='x' and b=1": false,
}
for sqlStr, result := range tests {
stmtNode, err := s.ParseOneStmt(sqlStr, "", "")
c.Check(err, IsNil)
preprocessorReturn := &plannercore.PreprocessorReturn{}
err = plannercore.Preprocess(ctx, stmtNode, plannercore.WithPreprocessorReturn(preprocessorReturn))
c.Check(err, IsNil)
p, _, err := planner.Optimize(context.TODO(), ctx, stmtNode, preprocessorReturn.InfoSchema)
c.Check(err, IsNil)
ret, err := plannercore.IsPointGetWithPKOrUniqueKeyByAutoCommit(ctx, p)
c.Assert(err, IsNil)
c.Assert(ret, Equals, result)
}
}
func (s *testSerialSuite) TestPointGetRepeatableRead(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec(`create table point_get (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into point_get values (1, 1, 1)")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
var (
step1 = "github.com/pingcap/tidb/executor/pointGetRepeatableReadTest-step1"
step2 = "github.com/pingcap/tidb/executor/pointGetRepeatableReadTest-step2"
)
c.Assert(failpoint.Enable(step1, "return"), IsNil)
c.Assert(failpoint.Enable(step2, "pause"), IsNil)
updateWaitCh := make(chan struct{})
go func() {
ctx := context.WithValue(context.Background(), "pointGetRepeatableReadTest", updateWaitCh)
ctx = failpoint.WithHook(ctx, func(ctx context.Context, fpname string) bool {
return fpname == step1 || fpname == step2
})
rs, err := tk1.Se.Execute(ctx, "select c from point_get where b = 1")
c.Assert(err, IsNil)
result := tk1.ResultSetToResultWithCtx(ctx, rs[0], Commentf("execute sql fail"))
result.Check(testkit.Rows("1"))
}()
<-updateWaitCh // Wait `POINT GET` first time `get`
c.Assert(failpoint.Disable(step1), IsNil)
tk2.MustExec("update point_get set b = 2, c = 2 where a = 1")
c.Assert(failpoint.Disable(step2), IsNil)
}
func (s *testSerialSuite) TestBatchPointGetRepeatableRead(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec(`create table batch_point_get (a int, b int, c int, unique key k_b(a, b, c))`)
tk1.MustExec("insert into batch_point_get values (1, 1, 1), (2, 3, 4), (3, 4, 5)")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
var (
step1 = "github.com/pingcap/tidb/executor/batchPointGetRepeatableReadTest-step1"
step2 = "github.com/pingcap/tidb/executor/batchPointGetRepeatableReadTest-step2"
)
c.Assert(failpoint.Enable(step1, "return"), IsNil)
c.Assert(failpoint.Enable(step2, "pause"), IsNil)
updateWaitCh := make(chan struct{})
go func() {
ctx := context.WithValue(context.Background(), "batchPointGetRepeatableReadTest", updateWaitCh)
ctx = failpoint.WithHook(ctx, func(ctx context.Context, fpname string) bool {
return fpname == step1 || fpname == step2
})
rs, err := tk1.Se.Execute(ctx, "select c from batch_point_get where (a, b, c) in ((1, 1, 1))")
c.Assert(err, IsNil)
result := tk1.ResultSetToResultWithCtx(ctx, rs[0], Commentf("execute sql fail"))
result.Check(testkit.Rows("1"))
}()
<-updateWaitCh // Wait `POINT GET` first time `get`
c.Assert(failpoint.Disable(step1), IsNil)
tk2.MustExec("update batch_point_get set b = 2, c = 2 where a = 1")
c.Assert(failpoint.Disable(step2), IsNil)
}
func (s *testSerialSuite) TestSplitRegionTimeout(c *C) {
c.Assert(failpoint.Enable("tikvclient/mockSplitRegionTimeout", `return(true)`), IsNil)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a varchar(100),b int, index idx1(b,a))")
tk.MustExec(`split table t index idx1 by (10000,"abcd"),(10000000);`)
tk.MustExec(`set @@tidb_wait_split_region_timeout=1`)
// result 0 0 means split 0 region and 0 region finish scatter regions before timeout.
tk.MustQuery(`split table t between (0) and (10000) regions 10`).Check(testkit.Rows("0 0"))
err := failpoint.Disable("tikvclient/mockSplitRegionTimeout")
c.Assert(err, IsNil)
// Test scatter regions timeout.
c.Assert(failpoint.Enable("tikvclient/mockScatterRegionTimeout", `return(true)`), IsNil)
tk.MustQuery(`split table t between (0) and (10000) regions 10`).Check(testkit.Rows("10 1"))
err = failpoint.Disable("tikvclient/mockScatterRegionTimeout")
c.Assert(err, IsNil)
// Test pre-split with timeout.
tk.MustExec("drop table if exists t")
tk.MustExec("set @@global.tidb_scatter_region=1;")
c.Assert(failpoint.Enable("tikvclient/mockScatterRegionTimeout", `return(true)`), IsNil)
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
start := time.Now()
tk.MustExec("create table t (a int, b int) partition by hash(a) partitions 5;")
c.Assert(time.Since(start).Seconds(), Less, 10.0)
err = failpoint.Disable("tikvclient/mockScatterRegionTimeout")
c.Assert(err, IsNil)
}
func (s *testSuiteP2) TestRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
tk.MustExec("insert t values (1, 1)")
tk.MustExec("insert t values (1, 3)")
tk.MustExec("insert t values (2, 1)")
tk.MustExec("insert t values (2, 3)")
result := tk.MustQuery("select * from t where (c, d) < (2,2)")
result.Check(testkit.Rows("1 1", "1 3", "2 1"))
result = tk.MustQuery("select * from t where (1,2,3) > (3,2,1)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t where row(1,2,3) > (3,2,1)")
result.Check(testkit.Rows())
result = tk.MustQuery("select * from t where (c, d) = (select * from t where (c,d) = (1,1))")
result.Check(testkit.Rows("1 1"))
result = tk.MustQuery("select * from t where (c, d) = (select * from t k where (t.c,t.d) = (c,d))")
result.Check(testkit.Rows("1 1", "1 3", "2 1", "2 3"))
result = tk.MustQuery("select (1, 2, 3) < (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 3)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) <= (2, 1, 4)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (2, 3, 4) >= (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) = (2, 3, 4)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select (2, 3, 4) != (2, 3, 4)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select row(1, 1) in (row(1, 1))")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 0) in (row(1, 1))")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select row(1, 1) in (select 1, 1)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 1) > row(1, 0)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select row(1, 1) > (select 1, 0)")
result.Check(testkit.Rows("1"))
result = tk.MustQuery("select 1 > (select 1)")
result.Check(testkit.Rows("0"))
result = tk.MustQuery("select (select 1)")
result.Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int)")
tk.MustExec("insert t1 values (1,2),(1,null)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2 (c int, d int)")
tk.MustExec("insert t2 values (0,0)")
tk.MustQuery("select * from t2 where (1,2) in (select * from t1)").Check(testkit.Rows("0 0"))
tk.MustQuery("select * from t2 where (1,2) not in (select * from t1)").Check(testkit.Rows())
tk.MustQuery("select * from t2 where (1,1) not in (select * from t1)").Check(testkit.Rows())
tk.MustQuery("select * from t2 where (1,null) in (select * from t1)").Check(testkit.Rows())
tk.MustQuery("select * from t2 where (null,null) in (select * from t1)").Check(testkit.Rows())
tk.MustExec("delete from t1 where a=1 and b=2")
tk.MustQuery("select (1,1) in (select * from t2) from t1").Check(testkit.Rows("0"))
tk.MustQuery("select (1,1) not in (select * from t2) from t1").Check(testkit.Rows("1"))
tk.MustQuery("select (1,1) in (select 1,1 from t2) from t1").Check(testkit.Rows("1"))
tk.MustQuery("select (1,1) not in (select 1,1 from t2) from t1").Check(testkit.Rows("0"))
// MySQL 5.7 returns 1 for these 2 queries, which is wrong.
tk.MustQuery("select (1,null) not in (select 1,1 from t2) from t1").Check(testkit.Rows("<nil>"))
tk.MustQuery("select (t1.a,null) not in (select 1,1 from t2) from t1").Check(testkit.Rows("<nil>"))
tk.MustQuery("select (1,null) in (select * from t1)").Check(testkit.Rows("<nil>"))
tk.MustQuery("select (1,null) not in (select * from t1)").Check(testkit.Rows("<nil>"))
}
func (s *testSuiteP2) TestColumnName(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c int, d int)")
// disable only full group by
tk.MustExec("set sql_mode='STRICT_TRANS_TABLES'")
rs, err := tk.Exec("select 1 + c, count(*) from t")
c.Check(err, IsNil)
fields := rs.Fields()
c.Check(len(fields), Equals, 2)
c.Check(fields[0].Column.Name.L, Equals, "1 + c")
c.Check(fields[0].ColumnAsName.L, Equals, "1 + c")
c.Check(fields[1].Column.Name.L, Equals, "count(*)")
c.Check(fields[1].ColumnAsName.L, Equals, "count(*)")
c.Assert(rs.Close(), IsNil)
rs, err = tk.Exec("select (c) > all (select c from t) from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 1)
c.Check(fields[0].Column.Name.L, Equals, "(c) > all (select c from t)")
c.Check(fields[0].ColumnAsName.L, Equals, "(c) > all (select c from t)")
c.Assert(rs.Close(), IsNil)
tk.MustExec("begin")
tk.MustExec("insert t values(1,1)")
rs, err = tk.Exec("select c d, d c from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(len(fields), Equals, 2)
c.Check(fields[0].Column.Name.L, Equals, "c")
c.Check(fields[0].ColumnAsName.L, Equals, "d")
c.Check(fields[1].Column.Name.L, Equals, "d")
c.Check(fields[1].ColumnAsName.L, Equals, "c")
c.Assert(rs.Close(), IsNil)
// Test case for query a column of a table.
// In this case, all attributes have values.
rs, err = tk.Exec("select c as a from t as t2")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(fields[0].Column.Name.L, Equals, "c")
c.Check(fields[0].ColumnAsName.L, Equals, "a")
c.Check(fields[0].Table.Name.L, Equals, "t")
c.Check(fields[0].TableAsName.L, Equals, "t2")
c.Check(fields[0].DBName.L, Equals, "test")
c.Assert(rs.Close(), IsNil)
// Test case for query a expression which only using constant inputs.
// In this case, the table, org_table and database attributes will all be empty.
rs, err = tk.Exec("select hour(1) as a from t as t2")
c.Check(err, IsNil)
fields = rs.Fields()
c.Check(fields[0].Column.Name.L, Equals, "a")
c.Check(fields[0].ColumnAsName.L, Equals, "a")
c.Check(fields[0].Table.Name.L, Equals, "")
c.Check(fields[0].TableAsName.L, Equals, "")
c.Check(fields[0].DBName.L, Equals, "")
c.Assert(rs.Close(), IsNil)
// Test case for query a column wrapped with parentheses and unary plus.
// In this case, the column name should be its original name.
rs, err = tk.Exec("select (c), (+c), +(c), +(+(c)), ++c from t")
c.Check(err, IsNil)
fields = rs.Fields()
for i := 0; i < 5; i++ {
c.Check(fields[i].Column.Name.L, Equals, "c")
c.Check(fields[i].ColumnAsName.L, Equals, "c")
}
c.Assert(rs.Close(), IsNil)
// Test issue https://github.com/pingcap/tidb/issues/9639 .
// Both window function and expression appear in final result field.
tk.MustExec("set @@tidb_enable_window_function = 1")
rs, err = tk.Exec("select 1+1, row_number() over() num from t")
c.Check(err, IsNil)
fields = rs.Fields()
c.Assert(fields[0].Column.Name.L, Equals, "1+1")
c.Assert(fields[0].ColumnAsName.L, Equals, "1+1")
c.Assert(fields[1].Column.Name.L, Equals, "num")
c.Assert(fields[1].ColumnAsName.L, Equals, "num")
tk.MustExec("set @@tidb_enable_window_function = 0")
c.Assert(rs.Close(), IsNil)
rs, err = tk.Exec("select if(1,c,c) from t;")
c.Check(err, IsNil)
fields = rs.Fields()
c.Assert(fields[0].Column.Name.L, Equals, "if(1,c,c)")
// It's a compatibility issue. Should be empty instead.
c.Assert(fields[0].ColumnAsName.L, Equals, "if(1,c,c)")
c.Assert(rs.Close(), IsNil)
}
func (s *testSuiteP2) TestSelectVar(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (d int)")
tk.MustExec("insert into t values(1), (2), (1)")
// This behavior is different from MySQL.
result := tk.MustQuery("select @a, @a := d+1 from t")
result.Check(testkit.Rows("<nil> 2", "2 3", "3 2"))
// Test for PR #10658.
tk.MustExec("select SQL_BIG_RESULT d from t group by d")
tk.MustExec("select SQL_SMALL_RESULT d from t group by d")
tk.MustExec("select SQL_BUFFER_RESULT d from t group by d")
}
func (s *testSuiteP2) TestHistoryRead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists history_read")
tk.MustExec("create table history_read (a int)")
tk.MustExec("insert history_read values (1)")
// For mocktikv, safe point is not initialized, we manually insert it for snapshot to use.
safePointName := "tikv_gc_safe_point"
safePointValue := "20060102-15:04:05 -0700"
safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)"
updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s')
ON DUPLICATE KEY
UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment)
tk.MustExec(updateSafePoint)
// Set snapshot to a time before save point will fail.
_, err := tk.Exec("set @@tidb_snapshot = '2006-01-01 15:04:05.999999'")
c.Assert(terror.ErrorEqual(err, variable.ErrSnapshotTooOld), IsTrue, Commentf("err %v", err))
// SnapshotTS Is not updated if check failed.
c.Assert(tk.Se.GetSessionVars().SnapshotTS, Equals, uint64(0))
// Setting snapshot to a time in the future will fail. (One day before the 2038 problem)
_, err = tk.Exec("set @@tidb_snapshot = '2038-01-18 03:14:07'")
c.Assert(err, ErrorMatches, "cannot set read timestamp to a future time")
// SnapshotTS Is not updated if check failed.
c.Assert(tk.Se.GetSessionVars().SnapshotTS, Equals, uint64(0))
curVer1, _ := s.store.CurrentVersion(kv.GlobalTxnScope)
time.Sleep(time.Millisecond)
snapshotTime := time.Now()
time.Sleep(time.Millisecond)
curVer2, _ := s.store.CurrentVersion(kv.GlobalTxnScope)
tk.MustExec("insert history_read values (2)")
tk.MustQuery("select * from history_read").Check(testkit.Rows("1", "2"))
tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'")
ctx := tk.Se.(sessionctx.Context)
snapshotTS := ctx.GetSessionVars().SnapshotTS
c.Assert(snapshotTS, Greater, curVer1.Ver)
c.Assert(snapshotTS, Less, curVer2.Ver)
tk.MustQuery("select * from history_read").Check(testkit.Rows("1"))
_, err = tk.Exec("insert history_read values (2)")
c.Assert(err, NotNil)
_, err = tk.Exec("update history_read set a = 3 where a = 1")
c.Assert(err, NotNil)
_, err = tk.Exec("delete from history_read where a = 1")
c.Assert(err, NotNil)
tk.MustExec("set @@tidb_snapshot = ''")
tk.MustQuery("select * from history_read").Check(testkit.Rows("1", "2"))
tk.MustExec("insert history_read values (3)")
tk.MustExec("update history_read set a = 4 where a = 3")
tk.MustExec("delete from history_read where a = 1")
time.Sleep(time.Millisecond)
snapshotTime = time.Now()
time.Sleep(time.Millisecond)
tk.MustExec("alter table history_read add column b int")
tk.MustExec("insert history_read values (8, 8), (9, 9)")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2 <nil>", "4 <nil>", "8 8", "9 9"))
tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4"))
tsoStr := strconv.FormatUint(oracle.GoTimeToTS(snapshotTime), 10)
tk.MustExec("set @@tidb_snapshot = '" + tsoStr + "'")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2", "4"))
tk.MustExec("set @@tidb_snapshot = ''")
tk.MustQuery("select * from history_read order by a").Check(testkit.Rows("2 <nil>", "4 <nil>", "8 8", "9 9"))
}
func (s *testSuite2) TestLowResolutionTSORead(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@autocommit=1")
tk.MustExec("use test")
tk.MustExec("drop table if exists low_resolution_tso")
tk.MustExec("create table low_resolution_tso(a int)")
tk.MustExec("insert low_resolution_tso values (1)")
// enable low resolution tso
c.Assert(tk.Se.GetSessionVars().LowResolutionTSO, IsFalse)
_, err := tk.Exec("set @@tidb_low_resolution_tso = 'on'")
c.Assert(err, IsNil)
c.Assert(tk.Se.GetSessionVars().LowResolutionTSO, IsTrue)
time.Sleep(3 * time.Second)
tk.MustQuery("select * from low_resolution_tso").Check(testkit.Rows("1"))
_, err = tk.Exec("update low_resolution_tso set a = 2")
c.Assert(err, NotNil)
tk.MustExec("set @@tidb_low_resolution_tso = 'off'")
tk.MustExec("update low_resolution_tso set a = 2")
tk.MustQuery("select * from low_resolution_tso").Check(testkit.Rows("2"))
}
func (s *testSuite2) TestStaleReadFutureTime(c *C) {
tk := testkit.NewTestKit(c, s.store)
// Setting tx_read_ts to a time in the future will fail. (One day before the 2038 problem)
_, err := tk.Exec("set @@tx_read_ts = '2038-01-18 03:14:07'")
c.Assert(err, ErrorMatches, "cannot set read timestamp to a future time")
// TxnReadTS Is not updated if check failed.
c.Assert(tk.Se.GetSessionVars().TxnReadTS.PeakTxnReadTS(), Equals, uint64(0))
}
func (s *testSuite) TestScanControlSelection(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int, index idx_b(b))")
tk.MustExec("insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3)")
tk.MustQuery("select (select count(1) k from t s where s.b = t1.c) from t t1").Sort().Check(testkit.Rows("0", "1", "3", "3"))
}
func (s *testSuite) TestSimpleDAG(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int)")
tk.MustExec("insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3)")
tk.MustQuery("select a from t").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustQuery("select * from t where a = 4").Check(testkit.Rows("4 2 3"))
tk.MustQuery("select a from t limit 1").Check(testkit.Rows("1"))
tk.MustQuery("select a from t order by a desc").Check(testkit.Rows("4", "3", "2", "1"))
tk.MustQuery("select a from t order by a desc limit 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t order by b desc limit 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t where a < 3").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t where b > 1").Check(testkit.Rows("4"))
tk.MustQuery("select a from t where b > 1 and a < 3").Check(testkit.Rows())
tk.MustQuery("select count(*) from t where b > 1 and a < 3").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t").Check(testkit.Rows("4"))
tk.MustQuery("select count(*), c from t group by c order by c").Check(testkit.Rows("2 1", "1 2", "1 3"))
tk.MustQuery("select sum(c) as s from t group by b order by s").Check(testkit.Rows("3", "4"))
tk.MustQuery("select avg(a) as s from t group by b order by s").Check(testkit.Rows("2.0000", "4.0000"))
tk.MustQuery("select sum(distinct c) from t group by b").Check(testkit.Rows("3", "3"))
tk.MustExec("create index i on t(c,b)")
tk.MustQuery("select a from t where c = 1").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t where c = 1 and a < 2").Check(testkit.Rows("1"))
tk.MustQuery("select a from t where c = 1 order by a limit 1").Check(testkit.Rows("1"))
tk.MustQuery("select count(*) from t where c = 1 ").Check(testkit.Rows("2"))
tk.MustExec("create index i1 on t(b)")
tk.MustQuery("select c from t where b = 2").Check(testkit.Rows("3"))
tk.MustQuery("select * from t where b = 2").Check(testkit.Rows("4 2 3"))
tk.MustQuery("select count(*) from t where b = 1").Check(testkit.Rows("3"))
tk.MustQuery("select * from t where b = 1 and a > 1 limit 1").Check(testkit.Rows("2 1 1"))
// Test time push down.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int, c1 datetime);")
tk.MustExec("insert into t values (1, '2015-06-07 12:12:12')")
tk.MustQuery("select id from t where c1 = '2015-06-07 12:12:12'").Check(testkit.Rows("1"))
// Test issue 17816
tk.MustExec("drop table if exists t0")
tk.MustExec("CREATE TABLE t0(c0 INT)")
tk.MustExec("INSERT INTO t0 VALUES (100000)")
tk.MustQuery("SELECT * FROM t0 WHERE NOT SPACE(t0.c0)").Check(testkit.Rows("100000"))
}
func (s *testSuite) TestTimestampTimeZone(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (ts timestamp)")
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t values ('2017-04-27 22:40:42')")
// The timestamp will get different value if time_zone session variable changes.
tests := []struct {
timezone string
expect string
}{
{"+10:00", "2017-04-28 08:40:42"},
{"-6:00", "2017-04-27 16:40:42"},
}
for _, tt := range tests {
tk.MustExec(fmt.Sprintf("set time_zone = '%s'", tt.timezone))
tk.MustQuery("select * from t").Check(testkit.Rows(tt.expect))
}
// For issue https://github.com/pingcap/tidb/issues/3467
tk.MustExec("drop table if exists t1")
tk.MustExec(`CREATE TABLE t1 (
id bigint(20) NOT NULL AUTO_INCREMENT,
uid int(11) DEFAULT NULL,
datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
ip varchar(128) DEFAULT NULL,
PRIMARY KEY (id),
KEY i_datetime (datetime),
KEY i_userid (uid)
);`)
tk.MustExec(`INSERT INTO t1 VALUES (123381351,1734,"2014-03-31 08:57:10","127.0.0.1");`)
r := tk.MustQuery("select datetime from t1;") // Cover TableReaderExec
r.Check(testkit.Rows("2014-03-31 08:57:10"))
r = tk.MustQuery("select datetime from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("2014-03-31 08:57:10")) // Cover IndexReaderExec
r = tk.MustQuery("select * from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("123381351 1734 2014-03-31 08:57:10 127.0.0.1")) // Cover IndexLookupExec
// For issue https://github.com/pingcap/tidb/issues/3485
tk.MustExec("set time_zone = 'Asia/Shanghai'")
tk.MustExec("drop table if exists t1")
tk.MustExec(`CREATE TABLE t1 (
id bigint(20) NOT NULL AUTO_INCREMENT,
datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);`)
tk.MustExec(`INSERT INTO t1 VALUES (123381351,"2014-03-31 08:57:10");`)
r = tk.MustQuery(`select * from t1 where datetime="2014-03-31 08:57:10";`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
tk.MustExec(`alter table t1 add key i_datetime (datetime);`)
r = tk.MustQuery(`select * from t1 where datetime="2014-03-31 08:57:10";`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
r = tk.MustQuery(`select * from t1;`)
r.Check(testkit.Rows("123381351 2014-03-31 08:57:10"))
r = tk.MustQuery("select datetime from t1 where datetime='2014-03-31 08:57:10';")
r.Check(testkit.Rows("2014-03-31 08:57:10"))
}
func (s *testSuite) TestTimestampDefaultValueTimeZone(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("set time_zone = '+08:00'")
tk.MustExec(`create table t (a int, b timestamp default "2019-01-17 14:46:14")`)
tk.MustExec("insert into t set a=1")
r := tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-17 14:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t set a=2")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-17 06:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 06:46:14", "2 2019-01-17 06:46:14"))
// Test the column's version is greater than ColumnInfoVersion1.
sctx := tk.Se.(sessionctx.Context)
is := domain.GetDomain(sctx).InfoSchema()
c.Assert(is, NotNil)
tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tb.Cols()[1].Version = model.ColumnInfoVersion1 + 1
tk.MustExec("insert into t set a=3")
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 06:46:14", "2 2019-01-17 06:46:14", "3 2019-01-17 06:46:14"))
tk.MustExec("delete from t where a=3")
// Change time zone back.
tk.MustExec("set time_zone = '+08:00'")
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 2019-01-17 14:46:14", "2 2019-01-17 14:46:14"))
tk.MustExec("set time_zone = '-08:00'")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '2019-01-16 22:46:14'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// test zero default value in multiple time zone.
defer tk.MustExec(fmt.Sprintf("set @@sql_mode='%s'", tk.MustQuery("select @@sql_mode").Rows()[0][0]))
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION';")
tk.MustExec("drop table if exists t")
tk.MustExec("set time_zone = '+08:00'")
tk.MustExec(`create table t (a int, b timestamp default "0000-00-00 00")`)
tk.MustExec("insert into t set a=1")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '+00:00'")
tk.MustExec("insert into t set a=2")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("set time_zone = '-08:00'")
tk.MustExec("insert into t set a=3")
r = tk.MustQuery(`show create table t`)
r.Check(testkit.Rows("t CREATE TABLE `t` (\n" + " `a` int(11) DEFAULT NULL,\n" + " `b` timestamp DEFAULT '0000-00-00 00:00:00'\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
r = tk.MustQuery(`select a,b from t order by a`)
r.Check(testkit.Rows("1 0000-00-00 00:00:00", "2 0000-00-00 00:00:00", "3 0000-00-00 00:00:00"))
// test add timestamp column default current_timestamp.
tk.MustExec(`drop table if exists t`)
tk.MustExec(`set time_zone = 'Asia/Shanghai'`)
tk.MustExec(`create table t (a int)`)
tk.MustExec(`insert into t set a=1`)
tk.MustExec(`alter table t add column b timestamp not null default current_timestamp;`)
timeIn8 := tk.MustQuery("select b from t").Rows()[0][0]
tk.MustExec(`set time_zone = '+00:00'`)
timeIn0 := tk.MustQuery("select b from t").Rows()[0][0]
c.Assert(timeIn8 != timeIn0, IsTrue, Commentf("%v == %v", timeIn8, timeIn0))
datumTimeIn8, err := expression.GetTimeValue(tk.Se, timeIn8, mysql.TypeTimestamp, 0)
c.Assert(err, IsNil)
tIn8To0 := datumTimeIn8.GetMysqlTime()
timeZoneIn8, err := time.LoadLocation("Asia/Shanghai")
c.Assert(err, IsNil)
err = tIn8To0.ConvertTimeZone(timeZoneIn8, time.UTC)
c.Assert(err, IsNil)
c.Assert(timeIn0 == tIn8To0.String(), IsTrue, Commentf("%v != %v", timeIn0, tIn8To0.String()))
// test add index.
tk.MustExec(`alter table t add index(b);`)
tk.MustExec("admin check table t")
tk.MustExec(`set time_zone = '+05:00'`)
tk.MustExec("admin check table t")
}
func (s *testSuite) TestTiDBCurrentTS(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0"))
tk.MustExec("begin")
rows := tk.MustQuery("select @@tidb_current_ts").Rows()
tsStr := rows[0][0].(string)
txn, err := tk.Se.Txn(true)
c.Assert(err, IsNil)
c.Assert(tsStr, Equals, fmt.Sprintf("%d", txn.StartTS()))
tk.MustExec("begin")
rows = tk.MustQuery("select @@tidb_current_ts").Rows()
newTsStr := rows[0][0].(string)
txn, err = tk.Se.Txn(true)
c.Assert(err, IsNil)
c.Assert(newTsStr, Equals, fmt.Sprintf("%d", txn.StartTS()))
c.Assert(newTsStr, Not(Equals), tsStr)
tk.MustExec("commit")
tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0"))
_, err = tk.Exec("set @@tidb_current_ts = '1'")
c.Assert(terror.ErrorEqual(err, variable.ErrIncorrectScope), IsTrue, Commentf("err %v", err))
}
func (s *testSuite) TestTiDBLastTxnInfo(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key)")
tk.MustQuery("select @@tidb_last_txn_info").Check(testkit.Rows(""))
tk.MustExec("insert into t values (1)")
rows1 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(rows1[0][0].(string), Greater, "0")
c.Assert(rows1[0][0].(string), Less, rows1[0][1].(string))
tk.MustExec("begin")
tk.MustQuery("select a from t where a = 1").Check(testkit.Rows("1"))
rows2 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts'), @@tidb_current_ts").Rows()
tk.MustExec("commit")
rows3 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(rows2[0][0], Equals, rows1[0][0])
c.Assert(rows2[0][1], Equals, rows1[0][1])
c.Assert(rows3[0][0], Equals, rows1[0][0])
c.Assert(rows3[0][1], Equals, rows1[0][1])
c.Assert(rows2[0][1], Less, rows2[0][2])
tk.MustExec("begin")
tk.MustExec("update t set a = a + 1 where a = 1")
rows4 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts'), @@tidb_current_ts").Rows()
tk.MustExec("commit")
rows5 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(rows4[0][0], Equals, rows1[0][0])
c.Assert(rows4[0][1], Equals, rows1[0][1])
c.Assert(rows4[0][2], Equals, rows5[0][0])
c.Assert(rows4[0][1], Less, rows4[0][2])
c.Assert(rows4[0][2], Less, rows5[0][1])
tk.MustExec("begin")
tk.MustExec("update t set a = a + 1 where a = 2")
tk.MustExec("rollback")
rows6 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(rows6[0][0], Equals, rows5[0][0])
c.Assert(rows6[0][1], Equals, rows5[0][1])
tk.MustExec("begin optimistic")
tk.MustExec("insert into t values (2)")
_, err := tk.Exec("commit")
c.Assert(err, NotNil)
rows7 := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts'), json_extract(@@tidb_last_txn_info, '$.error')").Rows()
c.Assert(rows7[0][0], Greater, rows5[0][0])
c.Assert(rows7[0][1], Equals, "0")
c.Assert(strings.Contains(err.Error(), rows7[0][1].(string)), IsTrue)
_, err = tk.Exec("set @@tidb_last_txn_info = '{}'")
c.Assert(terror.ErrorEqual(err, variable.ErrIncorrectScope), IsTrue, Commentf("err %v", err))
}
func (s *testSerialSuite) TestTiDBLastTxnInfoCommitMode(c *C) {
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.SafeWindow = time.Second
})
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, v int)")
tk.MustExec("insert into t values (1, 1)")
tk.MustExec("set @@tidb_enable_async_commit = 1")
tk.MustExec("set @@tidb_enable_1pc = 0")
tk.MustExec("update t set v = v + 1 where a = 1")
rows := tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Log(rows)
c.Assert(rows[0][0], Equals, `"async_commit"`)
c.Assert(rows[0][1], Equals, "false")
c.Assert(rows[0][2], Equals, "false")
tk.MustExec("set @@tidb_enable_async_commit = 0")
tk.MustExec("set @@tidb_enable_1pc = 1")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Assert(rows[0][0], Equals, `"1pc"`)
c.Assert(rows[0][1], Equals, "false")
c.Assert(rows[0][2], Equals, "false")
tk.MustExec("set @@tidb_enable_async_commit = 0")
tk.MustExec("set @@tidb_enable_1pc = 0")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Assert(rows[0][0], Equals, `"2pc"`)
c.Assert(rows[0][1], Equals, "false")
c.Assert(rows[0][2], Equals, "false")
c.Assert(failpoint.Enable("tikvclient/invalidMaxCommitTS", "return"), IsNil)
defer func() {
c.Assert(failpoint.Disable("tikvclient/invalidMaxCommitTS"), IsNil)
}()
tk.MustExec("set @@tidb_enable_async_commit = 1")
tk.MustExec("set @@tidb_enable_1pc = 0")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Log(rows)
c.Assert(rows[0][0], Equals, `"2pc"`)
c.Assert(rows[0][1], Equals, "true")
c.Assert(rows[0][2], Equals, "false")
tk.MustExec("set @@tidb_enable_async_commit = 0")
tk.MustExec("set @@tidb_enable_1pc = 1")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Log(rows)
c.Assert(rows[0][0], Equals, `"2pc"`)
c.Assert(rows[0][1], Equals, "false")
c.Assert(rows[0][2], Equals, "true")
tk.MustExec("set @@tidb_enable_async_commit = 1")
tk.MustExec("set @@tidb_enable_1pc = 1")
tk.MustExec("update t set v = v + 1 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.txn_commit_mode'), json_extract(@@tidb_last_txn_info, '$.async_commit_fallback'), json_extract(@@tidb_last_txn_info, '$.one_pc_fallback')").Rows()
c.Log(rows)
c.Assert(rows[0][0], Equals, `"2pc"`)
c.Assert(rows[0][1], Equals, "true")
c.Assert(rows[0][2], Equals, "true")
}
func (s *testSuite) TestTiDBLastQueryInfo(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, v int)")
tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.start_ts')").Check(testkit.Rows("0 0"))
toUint64 := func(str interface{}) uint64 {
res, err := strconv.ParseUint(str.(string), 10, 64)
c.Assert(err, IsNil)
return res
}
tk.MustExec("select * from t")
rows := tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0], Equals, rows[0][1])
tk.MustExec("insert into t values (1, 10)")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0], Equals, rows[0][1])
// tidb_last_txn_info is still valid after checking query info.
rows = tk.MustQuery("select json_extract(@@tidb_last_txn_info, '$.start_ts'), json_extract(@@tidb_last_txn_info, '$.commit_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0].(string), Less, rows[0][1].(string))
tk.MustExec("begin pessimistic")
tk.MustExec("select * from t")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0], Equals, rows[0][1])
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk2.MustExec("update t set v = 11 where a = 1")
tk.MustExec("select * from t")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(rows[0][0], Equals, rows[0][1])
tk.MustExec("update t set v = 12 where a = 1")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(toUint64(rows[0][0]), Less, toUint64(rows[0][1]))
tk.MustExec("commit")
tk.MustExec("set transaction isolation level read committed")
tk.MustExec("begin pessimistic")
tk.MustExec("select * from t")
rows = tk.MustQuery("select json_extract(@@tidb_last_query_info, '$.start_ts'), json_extract(@@tidb_last_query_info, '$.for_update_ts')").Rows()
c.Assert(toUint64(rows[0][0]), Greater, uint64(0))
c.Assert(toUint64(rows[0][0]), Less, toUint64(rows[0][1]))
tk.MustExec("rollback")
}
func (s *testSuite) TestSelectForUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk.MustExec("drop table if exists t, t1")
txn, err := tk.Se.Txn(true)
c.Assert(kv.ErrInvalidTxn.Equal(err), IsTrue)
c.Assert(txn.Valid(), IsFalse)
tk.MustExec("create table t (c1 int, c2 int, c3 int)")
tk.MustExec("insert t values (11, 2, 3)")
tk.MustExec("insert t values (12, 2, 3)")
tk.MustExec("insert t values (13, 2, 3)")
tk.MustExec("create table t1 (c1 int)")
tk.MustExec("insert t1 values (11)")
// conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=11")
tk2.MustExec("commit")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
// no conflict for subquery.
tk1.MustExec("begin")
tk1.MustQuery("select * from t where exists(select null from t1 where t1.c1=t.c1) for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=12")
tk2.MustExec("commit")
tk1.MustExec("commit")
// not conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=22 where c1=12")
tk2.MustExec("commit")
tk1.MustExec("commit")
// not conflict, auto commit
tk1.MustExec("set @@autocommit=1;")
tk1.MustQuery("select * from t where c1=11 for update")
tk2.MustExec("begin")
tk2.MustExec("update t set c2=211 where c1=11")
tk2.MustExec("commit")
tk1.MustExec("commit")
// conflict
tk1.MustExec("begin")
tk1.MustQuery("select * from (select * from t for update) t join t1 for update")
tk2.MustExec("begin")
tk2.MustExec("update t1 set c1 = 13")
tk2.MustExec("commit")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
}
func (s *testSuite) TestSelectForUpdateOf(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk.MustExec("drop table if exists t, t1")
tk.MustExec("create table t (i int)")
tk.MustExec("create table t1 (i int)")
tk.MustExec("insert t values (1)")
tk.MustExec("insert t1 values (1)")
tk.MustExec("begin pessimistic")
tk.MustQuery("select * from t, t1 where t.i = t1.i for update of t").Check(testkit.Rows("1 1"))
tk1.MustExec("begin pessimistic")
// no lock for t
tk1.MustQuery("select * from t1 for update").Check(testkit.Rows("1"))
// meet lock for t1
err := tk1.ExecToErr("select * from t for update nowait")
c.Assert(terror.ErrorEqual(err, error2.ErrLockAcquireFailAndNoWaitSet), IsTrue, Commentf("error: ", err))
// t1 rolled back, tk1 acquire the lock
tk.MustExec("rollback")
tk1.MustQuery("select * from t for update nowait").Check(testkit.Rows("1"))
tk1.MustExec("rollback")
}
func (s *testSuite) TestEmptyEnum(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (e enum('Y', 'N'))")
tk.MustExec("set sql_mode='STRICT_TRANS_TABLES'")
_, err := tk.Exec("insert into t values (0)")
c.Assert(terror.ErrorEqual(err, types.ErrTruncated), IsTrue, Commentf("err %v", err))
_, err = tk.Exec("insert into t values ('abc')")
c.Assert(terror.ErrorEqual(err, types.ErrTruncated), IsTrue, Commentf("err %v", err))
tk.MustExec("set sql_mode=''")
tk.MustExec("insert into t values (0)")
tk.MustQuery("select * from t").Check(testkit.Rows(""))
tk.MustExec("insert into t values ('abc')")
tk.MustQuery("select * from t").Check(testkit.Rows("", ""))
tk.MustExec("insert into t values (null)")
tk.MustQuery("select * from t").Check(testkit.Rows("", "", "<nil>"))
// Test https://github.com/pingcap/tidb/issues/29525.
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (id int auto_increment primary key, c1 enum('a', '', 'c'));")
tk.MustExec("insert into t(c1) values (0);")
tk.MustQuery("select id, c1+0, c1 from t;").Check(testkit.Rows("1 0 "))
tk.MustExec("alter table t change c1 c1 enum('a', '') not null;")
tk.MustQuery("select id, c1+0, c1 from t;").Check(testkit.Rows("1 0 "))
tk.MustExec("insert into t(c1) values (0);")
tk.MustQuery("select id, c1+0, c1 from t;").Check(testkit.Rows("1 0 ", "2 0 "))
}
// TestIssue4024 This tests https://github.com/pingcap/tidb/issues/4024
func (s *testSuite) TestIssue4024(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database test2")
tk.MustExec("use test2")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("update t, test2.t set test2.t.a=2")
tk.MustQuery("select * from t").Check(testkit.Rows("1"))
tk.MustQuery("select * from test2.t").Check(testkit.Rows("2"))
tk.MustExec("update test.t, test2.t set test.t.a=3")
tk.MustQuery("select * from t").Check(testkit.Rows("3"))
tk.MustQuery("select * from test2.t").Check(testkit.Rows("2"))
}
const (
checkRequestOff = iota
checkRequestSyncLog
checkDDLAddIndexPriority
)
type checkRequestClient struct {
tikv.Client
priority kvrpcpb.CommandPri
lowPriorityCnt uint32
mu struct {
sync.RWMutex
checkFlags uint32
syncLog bool
}
}
func (c *checkRequestClient) setCheckPriority(priority kvrpcpb.CommandPri) {
atomic.StoreInt32((*int32)(&c.priority), int32(priority))
}
func (c *checkRequestClient) getCheckPriority() kvrpcpb.CommandPri {
return (kvrpcpb.CommandPri)(atomic.LoadInt32((*int32)(&c.priority)))
}
func (c *checkRequestClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
resp, err := c.Client.SendRequest(ctx, addr, req, timeout)
c.mu.RLock()
checkFlags := c.mu.checkFlags
c.mu.RUnlock()
if checkFlags == checkRequestSyncLog {
switch req.Type {
case tikvrpc.CmdPrewrite, tikvrpc.CmdCommit:
c.mu.RLock()
syncLog := c.mu.syncLog
c.mu.RUnlock()
if syncLog != req.SyncLog {
return nil, errors.New("fail to set sync log")
}
}
} else if checkFlags == checkDDLAddIndexPriority {
if req.Type == tikvrpc.CmdScan {
if c.getCheckPriority() != req.Priority {
return nil, errors.New("fail to set priority")
}
} else if req.Type == tikvrpc.CmdPrewrite {
if c.getCheckPriority() == kvrpcpb.CommandPri_Low {
atomic.AddUint32(&c.lowPriorityCnt, 1)
}
}
}
return resp, err
}
type testSuiteWithCliBaseCharset struct {
testSuiteWithCliBase
}
func (s *testSuiteWithCliBaseCharset) SetUpSuite(c *C) {
collate.SetCharsetFeatEnabledForTest(true)
s.testSuiteWithCliBase.SetUpSuite(c)
}
func (s *testSuiteWithCliBaseCharset) TearDownSuite(c *C) {
s.testSuiteWithCliBase.TearDownSuite(c)
collate.SetCharsetFeatEnabledForTest(false)
}
type testSuiteWithCliBase struct {
store kv.Storage
dom *domain.Domain
cli *checkRequestClient
}
type testSuite1 struct {
testSuiteWithCliBase
}
type testSerialSuite2 struct {
testSuiteWithCliBase
}
func (s *testSuiteWithCliBase) SetUpSuite(c *C) {
cli := &checkRequestClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
s.cli = cli
var err error
s.store, err = mockstore.NewMockStore(
mockstore.WithClientHijacker(hijackClient),
)
c.Assert(err, IsNil)
session.SetStatsLease(0)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
s.dom.SetStatsUpdating(true)
}
func (s *testSuiteWithCliBase) TearDownSuite(c *C) {
s.dom.Close()
s.store.Close()
}
func (s *testSuiteWithCliBase) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
func (s *testSuite2) TestAddIndexPriority(c *C) {
cli := &checkRequestClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
store, err := mockstore.NewMockStore(
mockstore.WithClientHijacker(hijackClient),
)
c.Assert(err, IsNil)
dom, err := session.BootstrapSession(store)
c.Assert(err, IsNil)
defer func() {
dom.Close()
err = store.Close()
c.Assert(err, IsNil)
}()
tk := testkit.NewTestKit(c, store)
tk.MustExec("use test")
tk.MustExec("create table t1 (id int, v int)")
// Insert some data to make sure plan build IndexLookup for t1.
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t1 values (%d, %d)", i, i))
}
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(kvrpcpb.CommandPri_Low)
tk.MustExec("alter table t1 add index t1_index (id);")
c.Assert(atomic.LoadUint32(&cli.lowPriorityCnt) > 0, IsTrue)
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
tk.MustExec("alter table t1 drop index t1_index;")
tk.MustExec("SET SESSION tidb_ddl_reorg_priority = 'PRIORITY_NORMAL'")
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(kvrpcpb.CommandPri_Normal)
tk.MustExec("alter table t1 add index t1_index (id);")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
tk.MustExec("alter table t1 drop index t1_index;")
tk.MustExec("SET SESSION tidb_ddl_reorg_priority = 'PRIORITY_HIGH'")
cli.mu.Lock()
cli.mu.checkFlags = checkDDLAddIndexPriority
cli.mu.Unlock()
cli.setCheckPriority(kvrpcpb.CommandPri_High)
tk.MustExec("alter table t1 add index t1_index (id);")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
}
func (s *testSuite1) TestAlterTableComment(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_1")
tk.MustExec("create table t_1 (c1 int, c2 int, c3 int default 1, index (c1)) comment = 'test table';")
tk.MustExec("alter table `t_1` comment 'this is table comment';")
result := tk.MustQuery("select table_comment from information_schema.tables where table_name = 't_1';")
result.Check(testkit.Rows("this is table comment"))
tk.MustExec("alter table `t_1` comment 'table t comment';")
result = tk.MustQuery("select table_comment from information_schema.tables where table_name = 't_1';")
result.Check(testkit.Rows("table t comment"))
}
func (s *testSuite) TestTimezonePushDown(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (ts timestamp)")
defer tk.MustExec("drop table t")
tk.MustExec(`insert into t values ("2018-09-13 10:02:06")`)
systemTZ := timeutil.SystemLocation()
c.Assert(systemTZ.String(), Not(Equals), "System")
c.Assert(systemTZ.String(), Not(Equals), "Local")
ctx := context.Background()
count := 0
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
count += 1
dagReq := new(tipb.DAGRequest)
err := proto.Unmarshal(req.Data, dagReq)
c.Assert(err, IsNil)
c.Assert(dagReq.GetTimeZoneName(), Equals, systemTZ.String())
})
_, err := tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
c.Assert(err, IsNil)
tk.MustExec(`set time_zone="System"`)
_, err = tk.Se.Execute(ctx1, `select * from t where ts = "2018-09-13 10:02:06"`)
c.Assert(err, IsNil)
c.Assert(count, Equals, 2) // Make sure the hook function is called.
}
func (s *testSuite) TestNotFillCacheFlag(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int primary key)")
defer tk.MustExec("drop table t")
tk.MustExec("insert into t values (1)")
tests := []struct {
sql string
expect bool
}{
{"select SQL_NO_CACHE * from t", true},
{"select SQL_CACHE * from t", false},
{"select * from t", false},
}
count := 0
ctx := context.Background()
for _, test := range tests {
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
count++
if req.NotFillCache != test.expect {
c.Errorf("sql=%s, expect=%v, get=%v", test.sql, test.expect, req.NotFillCache)
}
})
rs, err := tk.Se.Execute(ctx1, test.sql)
c.Assert(err, IsNil)
tk.ResultSetToResult(rs[0], Commentf("sql: %v", test.sql))
}
c.Assert(count, Equals, len(tests)) // Make sure the hook function is called.
}
func (s *testSuite1) TestSyncLog(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
cli := s.cli
cli.mu.Lock()
cli.mu.checkFlags = checkRequestSyncLog
cli.mu.syncLog = true
cli.mu.Unlock()
tk.MustExec("create table t (id int primary key)")
cli.mu.Lock()
cli.mu.syncLog = false
cli.mu.Unlock()
tk.MustExec("insert into t values (1)")
cli.mu.Lock()
cli.mu.checkFlags = checkRequestOff
cli.mu.Unlock()
}
func (s *testSuite) TestHandleTransfer(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t(a int, index idx(a))")
tk.MustExec("insert into t values(1), (2), (4)")
tk.MustExec("begin")
tk.MustExec("update t set a = 3 where a = 4")
// test table scan read whose result need handle.
tk.MustQuery("select * from t ignore index(idx)").Check(testkit.Rows("1", "2", "3"))
tk.MustExec("insert into t values(4)")
// test single read whose result need handle
tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustQuery("select * from t use index(idx) order by a desc").Check(testkit.Rows("4", "3", "2", "1"))
tk.MustExec("update t set a = 5 where a = 3")
tk.MustQuery("select * from t use index(idx)").Check(testkit.Rows("1", "2", "4", "5"))
tk.MustExec("commit")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("insert into t values(3, 3), (1, 1), (2, 2)")
// Second test double read.
tk.MustQuery("select * from t use index(idx) order by a").Check(testkit.Rows("1 1", "2 2", "3 3"))
}
func (s *testSuite) TestBit(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(2))")
tk.MustExec("insert into t values (0), (1), (2), (3)")
_, err := tk.Exec("insert into t values (4)")
c.Assert(err, NotNil)
_, err = tk.Exec("insert into t values ('a')")
c.Assert(err, NotNil)
r, err := tk.Exec("select * from t where c1 = 2")
c.Assert(err, IsNil)
req := r.NewChunk(nil)
err = r.Next(context.Background(), req)
c.Assert(err, IsNil)
c.Assert(types.BinaryLiteral(req.GetRow(0).GetBytes(0)), DeepEquals, types.NewBinaryLiteralFromUint(2, -1))
r.Close()
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(31))")
tk.MustExec("insert into t values (0x7fffffff)")
_, err = tk.Exec("insert into t values (0x80000000)")
c.Assert(err, NotNil)
_, err = tk.Exec("insert into t values (0xffffffff)")
c.Assert(err, NotNil)
tk.MustExec("insert into t values ('123')")
tk.MustExec("insert into t values ('1234')")
_, err = tk.Exec("insert into t values ('12345)")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(62))")
tk.MustExec("insert into t values ('12345678')")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(61))")
_, err = tk.Exec("insert into t values ('12345678')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(32))")
tk.MustExec("insert into t values (0x7fffffff)")
tk.MustExec("insert into t values (0xffffffff)")
_, err = tk.Exec("insert into t values (0x1ffffffff)")
c.Assert(err, NotNil)
tk.MustExec("insert into t values ('1234')")
_, err = tk.Exec("insert into t values ('12345')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(64))")
tk.MustExec("insert into t values (0xffffffffffffffff)")
tk.MustExec("insert into t values ('12345678')")
_, err = tk.Exec("insert into t values ('123456789')")
c.Assert(err, NotNil)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c1 bit(64))")
tk.MustExec("insert into t values (0xffffffffffffffff)")
tk.MustExec("insert into t values ('12345678')")
tk.MustQuery("select * from t where c1").Check(testkit.Rows("\xff\xff\xff\xff\xff\xff\xff\xff", "12345678"))
}
func (s *testSuite) TestEnum(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c enum('a', 'b', 'c'))")
tk.MustExec("insert into t values ('a'), (2), ('c')")
tk.MustQuery("select * from t where c = 'a'").Check(testkit.Rows("a"))
tk.MustQuery("select c + 1 from t where c = 2").Check(testkit.Rows("3"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values ()")
tk.MustExec("insert into t values (null), ('1')")
tk.MustQuery("select c + 1 from t where c = 1").Check(testkit.Rows("2"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values(1), (2), (3)")
tk.MustQuery("select * from t where c").Check(testkit.Rows("a", "b", "c"))
}
func (s *testSuite) TestSet(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (c set('a', 'b', 'c'))")
tk.MustExec("insert into t values ('a'), (2), ('c'), ('a,b'), ('b,a')")
tk.MustQuery("select * from t where c = 'a'").Check(testkit.Rows("a"))
tk.MustQuery("select * from t where c = 'a,b'").Check(testkit.Rows("a,b", "a,b"))
tk.MustQuery("select c + 1 from t where c = 2").Check(testkit.Rows("3"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values ()")
tk.MustExec("insert into t values (null), ('1')")
tk.MustQuery("select c + 1 from t where c = 1").Check(testkit.Rows("2"))
tk.MustExec("delete from t")
tk.MustExec("insert into t values(3)")
tk.MustQuery("select * from t where c").Check(testkit.Rows("a,b"))
}
func (s *testSuite) TestSubqueryInValues(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (id int, name varchar(20))")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (gid int)")
tk.MustExec("insert into t1 (gid) value (1)")
tk.MustExec("insert into t (id, name) value ((select gid from t1) ,'asd')")
tk.MustQuery("select * from t").Check(testkit.Rows("1 asd"))
}
func (s *testSuite) TestEnhancedRangeAccess(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, b int)")
tk.MustExec("insert into t values(1, 2), (2, 1)")
tk.MustQuery("select * from t where (a = 1 and b = 2) or (a = 2 and b = 1)").Check(testkit.Rows("1 2", "2 1"))
tk.MustQuery("select * from t where (a = 1 and b = 1) or (a = 2 and b = 2)").Check(nil)
}
// TestMaxInt64Handle Issue #4810
func (s *testSuite) TestMaxInt64Handle(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id bigint, PRIMARY KEY (id))")
tk.MustExec("insert into t values(9223372036854775807)")
tk.MustExec("select * from t where id = 9223372036854775807")
tk.MustQuery("select * from t where id = 9223372036854775807;").Check(testkit.Rows("9223372036854775807"))
tk.MustQuery("select * from t").Check(testkit.Rows("9223372036854775807"))
_, err := tk.Exec("insert into t values(9223372036854775807)")
c.Assert(err, NotNil)
tk.MustExec("delete from t where id = 9223372036854775807")
tk.MustQuery("select * from t").Check(nil)
}
func (s *testSuite) TestTableScanWithPointRanges(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int, PRIMARY KEY (id))")
tk.MustExec("insert into t values(1), (5), (10)")
tk.MustQuery("select * from t where id in(1, 2, 10)").Check(testkit.Rows("1", "10"))
}
func (s *testSuite) TestUnsignedPk(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id bigint unsigned primary key)")
var num1, num2 uint64 = math.MaxInt64 + 1, math.MaxInt64 + 2
tk.MustExec(fmt.Sprintf("insert into t values(%v), (%v), (1), (2)", num1, num2))
num1Str := strconv.FormatUint(num1, 10)
num2Str := strconv.FormatUint(num2, 10)
tk.MustQuery("select * from t order by id").Check(testkit.Rows("1", "2", num1Str, num2Str))
tk.MustQuery("select * from t where id not in (2)").Check(testkit.Rows(num1Str, num2Str, "1"))
tk.MustExec("drop table t")
tk.MustExec("create table t(a bigint unsigned primary key, b int, index idx(b))")
tk.MustExec("insert into t values(9223372036854775808, 1), (1, 1)")
tk.MustQuery("select * from t use index(idx) where b = 1 and a < 2").Check(testkit.Rows("1 1"))
tk.MustQuery("select * from t use index(idx) where b = 1 order by b, a").Check(testkit.Rows("1 1", "9223372036854775808 1"))
}
func (s *testSuite) TestSignedCommonHandle(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(k1 int, k2 int, primary key(k1, k2))")
tk.MustExec("insert into t(k1, k2) value(-100, 1), (-50, 1), (0, 0), (1, 1), (3, 3)")
tk.MustQuery("select k1 from t order by k1").Check(testkit.Rows("-100", "-50", "0", "1", "3"))
tk.MustQuery("select k1 from t order by k1 desc").Check(testkit.Rows("3", "1", "0", "-50", "-100"))
tk.MustQuery("select k1 from t where k1 < -51").Check(testkit.Rows("-100"))
tk.MustQuery("select k1 from t where k1 < -1").Check(testkit.Rows("-100", "-50"))
tk.MustQuery("select k1 from t where k1 <= 0").Check(testkit.Rows("-100", "-50", "0"))
tk.MustQuery("select k1 from t where k1 < 2").Check(testkit.Rows("-100", "-50", "0", "1"))
tk.MustQuery("select k1 from t where k1 < -1 and k1 > -90").Check(testkit.Rows("-50"))
}
func (s *testSuite) TestIssue5666(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@profiling=1")
tk.MustQuery("SELECT QUERY_ID, SUM(DURATION) AS SUM_DURATION FROM INFORMATION_SCHEMA.PROFILING GROUP BY QUERY_ID;").Check(testkit.Rows("0 0"))
}
func (s *testSuite) TestIssue5341(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("drop table if exists test.t")
tk.MustExec("create table test.t(a char)")
tk.MustExec("insert into test.t value('a')")
tk.MustQuery("select * from test.t where a < 1 order by a limit 0;").Check(testkit.Rows())
}
func (s *testSuite) TestContainDotColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists test.t1")
tk.MustExec("create table test.t1(t1.a char)")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2(a char, t2.b int)")
tk.MustExec("drop table if exists t3")
_, err := tk.Exec("create table t3(s.a char);")
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.ErrWrongTableName))
}
func (s *testSuite) TestCheckIndex(c *C) {
s.ctx = mock.NewContext()
s.ctx.Store = s.store
se, err := session.CreateSession4Test(s.store)
c.Assert(err, IsNil)
defer se.Close()
_, err = se.Execute(context.Background(), "create database test_admin")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test_admin")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "create table t (pk int primary key, c int default 1, c1 int default 1, unique key c(c))")
c.Assert(err, IsNil)
is := s.domain.InfoSchema()
db := model.NewCIStr("test_admin")
dbInfo, ok := is.SchemaByName(db)
c.Assert(ok, IsTrue)
tblName := model.NewCIStr("t")
tbl, err := is.TableByName(db, tblName)
c.Assert(err, IsNil)
tbInfo := tbl.Meta()
alloc := autoid.NewAllocator(s.store, dbInfo.ID, tbInfo.ID, false, autoid.RowIDAllocType)
tb, err := tables.TableFromMeta(autoid.NewAllocators(alloc), tbInfo)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t C")
c.Assert(err, IsNil)
// set data to:
// index data (handle, data): (1, 10), (2, 20)
// table data (handle, data): (1, 10), (2, 20)
recordVal1 := types.MakeDatums(int64(1), int64(10), int64(11))
recordVal2 := types.MakeDatums(int64(2), int64(20), int64(21))
c.Assert(s.ctx.NewTxn(context.Background()), IsNil)
_, err = tb.AddRecord(s.ctx, recordVal1)
c.Assert(err, IsNil)
_, err = tb.AddRecord(s.ctx, recordVal2)
c.Assert(err, IsNil)
txn, err := s.ctx.Txn(true)
c.Assert(err, IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
mockCtx := mock.NewContext()
idx := tb.Indices()[0]
sc := &stmtctx.StatementContext{TimeZone: time.Local}
_, err = se.Execute(context.Background(), "admin check index t idx_inexistent")
c.Assert(strings.Contains(err.Error(), "not exist"), IsTrue)
// set data to:
// index data (handle, data): (1, 10), (2, 20), (3, 30)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
_, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(30)), kv.IntHandle(3), nil)
c.Assert(err, IsNil)
key := tablecodec.EncodeRowKey(tb.Meta().ID, kv.IntHandle(4).Encoded())
setColValue(c, txn, key, types.NewDatum(int64(40)))
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[executor:8133]handle 3, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:30, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:<nil>")
// set data to:
// index data (handle, data): (1, 10), (2, 20), (3, 30), (4, 40)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
_, err = idx.Create(mockCtx, txn, types.MakeDatums(int64(40)), kv.IntHandle(4), nil)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(strings.Contains(err.Error(), "table count 3 != index(c) count 4"), IsTrue)
// set data to:
// index data (handle, data): (1, 10), (4, 40)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
txn, err = s.store.Begin()
c.Assert(err, IsNil)
err = idx.Delete(sc, txn, types.MakeDatums(int64(30)), kv.IntHandle(3))
c.Assert(err, IsNil)
err = idx.Delete(sc, txn, types.MakeDatums(int64(20)), kv.IntHandle(2))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "admin check index t c")
c.Assert(strings.Contains(err.Error(), "table count 3 != index(c) count 2"), IsTrue)
// TODO: pass the case below:
// set data to:
// index data (handle, data): (1, 10), (4, 40), (2, 30)
// table data (handle, data): (1, 10), (2, 20), (4, 40)
}
func setColValue(c *C,
|
saction, key kv.Key, v types.Datum) {
row := []types.Datum{v, {}}
colIDs := []int64{2, 3}
sc := &stmtctx.StatementContext{TimeZone: time.Local}
rd := rowcodec.Encoder{Enable: true}
value, err := tablecodec.EncodeRow(sc, row, colIDs, nil, nil, &rd)
c.Assert(err, IsNil)
err = txn.Set(key, value)
c.Assert(err, IsNil)
}
func (s *testSuite) TestCheckTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
// Test 'admin check table' when the table has a unique index with null values.
tk.MustExec("use test")
tk.MustExec("drop table if exists admin_test;")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2));")
tk.MustExec("insert admin_test (c1, c2) values (1, 1), (2, 2), (NULL, NULL);")
tk.MustExec("admin check table admin_test;")
}
func (s *testSuite) TestCheckTableClusterIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("drop table if exists admin_test;")
tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, primary key (c1, c2), index (c1), unique key(c2));")
tk.MustExec("insert admin_test (c1, c2) values (1, 1), (2, 2), (3, 3);")
tk.MustExec("admin check table admin_test;")
}
func (s *testSuite) TestCoprocessorStreamingFlag(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (id int, value int, index idx(id))")
// Add some data to make statistics work.
for i := 0; i < 100; i++ {
tk.MustExec(fmt.Sprintf("insert into t values (%d, %d)", i, i))
}
tests := []struct {
sql string
expect bool
}{
{"select * from t", true}, // TableReader
{"select * from t where id = 5", true}, // IndexLookup
{"select * from t where id > 5", true}, // Filter
{"select * from t limit 3", false}, // Limit
{"select avg(id) from t", false}, // Aggregate
{"select * from t order by value limit 3", false}, // TopN
}
ctx := context.Background()
for _, test := range tests {
ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) {
if req.Streaming != test.expect {
c.Errorf("sql=%s, expect=%v, get=%v", test.sql, test.expect, req.Streaming)
}
})
rs, err := tk.Se.Execute(ctx1, test.sql)
c.Assert(err, IsNil)
tk.ResultSetToResult(rs[0], Commentf("sql: %v", test.sql))
}
}
func (s *testSuite) TestIncorrectLimitArg(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a bigint);`)
tk.MustExec(`prepare stmt1 from 'select * from t limit ?';`)
tk.MustExec(`prepare stmt2 from 'select * from t limit ?, ?';`)
tk.MustExec(`set @a = -1;`)
tk.MustExec(`set @b = 1;`)
var err error
_, err = tk.Se.Execute(context.TODO(), `execute stmt1 using @a;`)
c.Assert(err.Error(), Equals, `[planner:1210]Incorrect arguments to LIMIT`)
_, err = tk.Se.Execute(context.TODO(), `execute stmt2 using @b, @a;`)
c.Assert(err.Error(), Equals, `[planner:1210]Incorrect arguments to LIMIT`)
}
func (s *testSuite) TestLimit(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t(a bigint, b bigint);`)
tk.MustExec(`insert into t values(1, 1), (2, 2), (3, 30), (4, 40), (5, 5), (6, 6);`)
tk.MustQuery(`select * from t order by a limit 1, 1;`).Check(testkit.Rows(
"2 2",
))
tk.MustQuery(`select * from t order by a limit 1, 2;`).Check(testkit.Rows(
"2 2",
"3 30",
))
tk.MustQuery(`select * from t order by a limit 1, 3;`).Check(testkit.Rows(
"2 2",
"3 30",
"4 40",
))
tk.MustQuery(`select * from t order by a limit 1, 4;`).Check(testkit.Rows(
"2 2",
"3 30",
"4 40",
"5 5",
))
// test inline projection
tk.MustQuery(`select a from t where a > 0 limit 1, 1;`).Check(testkit.Rows(
"2",
))
tk.MustQuery(`select a from t where a > 0 limit 1, 2;`).Check(testkit.Rows(
"2",
"3",
))
tk.MustQuery(`select b from t where a > 0 limit 1, 3;`).Check(testkit.Rows(
"2",
"30",
"40",
))
tk.MustQuery(`select b from t where a > 0 limit 1, 4;`).Check(testkit.Rows(
"2",
"30",
"40",
"5",
))
// test @@tidb_init_chunk_size=2
tk.MustExec(`set @@tidb_init_chunk_size=2;`)
tk.MustQuery(`select * from t where a > 0 limit 2, 1;`).Check(testkit.Rows(
"3 30",
))
tk.MustQuery(`select * from t where a > 0 limit 2, 2;`).Check(testkit.Rows(
"3 30",
"4 40",
))
tk.MustQuery(`select * from t where a > 0 limit 2, 3;`).Check(testkit.Rows(
"3 30",
"4 40",
"5 5",
))
tk.MustQuery(`select * from t where a > 0 limit 2, 4;`).Check(testkit.Rows(
"3 30",
"4 40",
"5 5",
"6 6",
))
// test inline projection
tk.MustQuery(`select a from t order by a limit 2, 1;`).Check(testkit.Rows(
"3",
))
tk.MustQuery(`select b from t order by a limit 2, 2;`).Check(testkit.Rows(
"30",
"40",
))
tk.MustQuery(`select a from t order by a limit 2, 3;`).Check(testkit.Rows(
"3",
"4",
"5",
))
tk.MustQuery(`select b from t order by a limit 2, 4;`).Check(testkit.Rows(
"30",
"40",
"5",
"6",
))
}
func (s *testSuite) TestCoprocessorStreamingWarning(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a double)")
tk.MustExec("insert into t value(1.2)")
tk.MustExec("set @@session.tidb_enable_streaming = 1")
result := tk.MustQuery("select * from t where a/0 > 1")
result.Check(testkit.Rows())
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1365|Division by 0"))
}
func (s *testSuite3) TestYearTypeDeleteIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a YEAR, PRIMARY KEY(a));")
tk.MustExec("insert into t set a = '2151';")
tk.MustExec("delete from t;")
tk.MustExec("admin check table t")
}
func (s *testSuite3) TestForSelectScopeInUnion(c *C) {
// A union B for update, the "for update" option belongs to union statement, so
// it should works on both A and B.
tk1 := testkit.NewTestKit(c, s.store)
tk2 := testkit.NewTestKit(c, s.store)
tk1.MustExec("use test")
tk1.MustExec("drop table if exists t")
tk1.MustExec("create table t(a int)")
tk1.MustExec("insert into t values (1)")
tk1.MustExec("begin")
// 'For update' would act on the second select.
tk1.MustQuery("select 1 as a union select a from t for update")
tk2.MustExec("use test")
tk2.MustExec("update t set a = a + 1")
// As tk1 use select 'for update', it should detect conflict and fail.
_, err := tk1.Exec("commit")
c.Assert(err, NotNil)
tk1.MustExec("begin")
tk1.MustQuery("select 1 as a union select a from t limit 5 for update")
tk1.MustQuery("select 1 as a union select a from t order by a for update")
tk2.MustExec("update t set a = a + 1")
_, err = tk1.Exec("commit")
c.Assert(err, NotNil)
}
func (s *testSuite3) TestUnsignedDecimalOverflow(c *C) {
tests := []struct {
input interface{}
hasErr bool
err string
}{{
-1,
true,
"Out of range value for column",
}, {
"-1.1e-1",
true,
"Out of range value for column",
}, {
-1.1,
true,
"Out of range value for column",
}, {
-0,
false,
"",
},
}
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a decimal(10,2) unsigned)")
for _, t := range tests {
res, err := tk.Exec("insert into t values (?)", t.input)
if res != nil {
defer res.Close()
}
if t.hasErr {
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), t.err), IsTrue)
} else {
c.Assert(err, IsNil)
}
if res != nil {
c.Assert(res.Close(), IsNil)
}
}
tk.MustExec("set sql_mode=''")
tk.MustExec("delete from t")
tk.MustExec("insert into t values (?)", -1)
r := tk.MustQuery("select a from t limit 1")
r.Check(testkit.Rows("0.00"))
}
func (s *testSuite3) TestIndexJoinTableDualPanic(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists a")
tk.MustExec("create table a (f1 int, f2 varchar(32), primary key (f1))")
tk.MustExec("insert into a (f1,f2) values (1,'a'), (2,'b'), (3,'c')")
// TODO here: index join cause the data race of txn.
tk.MustQuery("select /*+ inl_merge_join(a) */ a.* from a inner join (select 1 as k1,'k2-1' as k2) as k on a.f1=k.k1;").
Check(testkit.Rows("1 a"))
}
func (s *testSuite3) TestSortLeftJoinWithNullColumnInRightChildPanic(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int)")
tk.MustExec("create table t2(a int)")
tk.MustExec("insert into t1(a) select 1;")
tk.MustQuery("select b.n from t1 left join (select a as a, null as n from t2) b on b.a = t1.a order by t1.a").
Check(testkit.Rows("<nil>"))
}
func (s *testSuiteP1) TestUnionAutoSignedCast(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1,t2")
tk.MustExec("create table t1 (id int, i int, b bigint, d double, dd decimal)")
tk.MustExec("create table t2 (id int, i int unsigned, b bigint unsigned, d double unsigned, dd decimal unsigned)")
tk.MustExec("insert into t1 values(1, -1, -1, -1.1, -1)")
tk.MustExec("insert into t2 values(2, 1, 1, 1.1, 1)")
tk.MustQuery("select * from t1 union select * from t2 order by id").
Check(testkit.Rows("1 -1 -1 -1.1 -1", "2 1 1 1.1 1"))
tk.MustQuery("select id, i, b, d, dd from t2 union select id, i, b, d, dd from t1 order by id").
Check(testkit.Rows("1 -1 -1 -1.1 -1", "2 1 1 1.1 1"))
tk.MustQuery("select id, i from t2 union select id, cast(i as unsigned int) from t1 order by id").
Check(testkit.Rows("1 18446744073709551615", "2 1"))
tk.MustQuery("select dd from t2 union all select dd from t2").
Check(testkit.Rows("1", "1"))
tk.MustExec("drop table if exists t3,t4")
tk.MustExec("create table t3 (id int, v int)")
tk.MustExec("create table t4 (id int, v double unsigned)")
tk.MustExec("insert into t3 values (1, -1)")
tk.MustExec("insert into t4 values (2, 1)")
tk.MustQuery("select id, v from t3 union select id, v from t4 order by id").
Check(testkit.Rows("1 -1", "2 1"))
tk.MustQuery("select id, v from t4 union select id, v from t3 order by id").
Check(testkit.Rows("1 -1", "2 1"))
tk.MustExec("drop table if exists t5,t6,t7")
tk.MustExec("create table t5 (id int, v bigint unsigned)")
tk.MustExec("create table t6 (id int, v decimal)")
tk.MustExec("create table t7 (id int, v bigint)")
tk.MustExec("insert into t5 values (1, 1)")
tk.MustExec("insert into t6 values (2, -1)")
tk.MustExec("insert into t7 values (3, -1)")
tk.MustQuery("select id, v from t5 union select id, v from t6 order by id").
Check(testkit.Rows("1 1", "2 -1"))
tk.MustQuery("select id, v from t5 union select id, v from t7 union select id, v from t6 order by id").
Check(testkit.Rows("1 1", "2 -1", "3 -1"))
}
func (s *testSuiteP1) TestUpdateClustered(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
type resultChecker struct {
check string
assert []string
}
for _, clustered := range []string{"", "clustered"} {
tests := []struct {
initSchema []string
initData []string
dml string
resultCheck []resultChecker
}{
{ // left join + update both + match & unmatched + pk
[]string{
"drop table if exists a, b",
"create table a (k1 int, k2 int, v int)",
fmt.Sprintf("create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) %s)", clustered),
},
[]string{
"insert into a values (1, 1, 1), (2, 2, 2)", // unmatched + matched
"insert into b values (2, 2, 2, 2)",
},
"update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.v = 20, b.v = 100, a.k1 = a.k1 + 1, b.k1 = b.k1 + 1, a.k2 = a.k2 + 2, b.k2 = b.k2 + 2",
[]resultChecker{
{
"select * from b",
[]string{"2 3 4 100"},
},
{
"select * from a",
[]string{"2 3 20", "3 4 20"},
},
},
},
{ // left join + update both + match & unmatched + pk
[]string{
"drop table if exists a, b",
"create table a (k1 int, k2 int, v int)",
fmt.Sprintf("create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) %s)", clustered),
},
[]string{
"insert into a values (1, 1, 1), (2, 2, 2)", // unmatched + matched
"insert into b values (2, 2, 2, 2)",
},
"update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100",
[]resultChecker{
{
"select * from b",
[]string{"2 3 4 100"},
},
{
"select * from a",
[]string{"2 3 20", "3 4 20"},
},
},
},
{ // left join + update both + match & unmatched + prefix pk
[]string{
"drop table if exists a, b",
"create table a (k1 varchar(100), k2 varchar(100), v varchar(100))",
fmt.Sprintf("create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) %s, key kk1(k1(1), v(1)))", clustered),
},
[]string{
"insert into a values ('11', '11', '11'), ('22', '22', '22')", // unmatched + matched
"insert into b values ('22', '22', '22', '22')",
},
"update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100",
[]resultChecker{
{
"select * from b",
[]string{"22 23 24 100"},
},
{
"select * from a",
[]string{"12 13 20", "23 24 20"},
},
},
},
{ // right join + update both + match & unmatched + prefix pk
[]string{
"drop table if exists a, b",
"create table a (k1 varchar(100), k2 varchar(100), v varchar(100))",
fmt.Sprintf("create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) %s, key kk1(k1(1), v(1)))", clustered),
},
[]string{
"insert into a values ('11', '11', '11'), ('22', '22', '22')", // unmatched + matched
"insert into b values ('22', '22', '22', '22')",
},
"update b right join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100",
[]resultChecker{
{
"select * from b",
[]string{"22 23 24 100"},
},
{
"select * from a",
[]string{"12 13 20", "23 24 20"},
},
},
},
{ // inner join + update both + match & unmatched + prefix pk
[]string{
"drop table if exists a, b",
"create table a (k1 varchar(100), k2 varchar(100), v varchar(100))",
fmt.Sprintf("create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) %s, key kk1(k1(1), v(1)))", clustered),
},
[]string{
"insert into a values ('11', '11', '11'), ('22', '22', '22')", // unmatched + matched
"insert into b values ('22', '22', '22', '22')",
},
"update b join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100",
[]resultChecker{
{
"select * from b",
[]string{"22 23 24 100"},
},
{
"select * from a",
[]string{"11 11 11", "23 24 20"},
},
},
},
{
[]string{
"drop table if exists a, b",
"create table a (k1 varchar(100), k2 varchar(100), v varchar(100))",
fmt.Sprintf("create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) %s, key kk1(k1(1), v(1)))", clustered),
},
[]string{
"insert into a values ('11', '11', '11'), ('22', '22', '22')", // unmatched + matched
"insert into b values ('22', '22', '22', '22')",
},
"update a set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, a.v = 20 where exists (select 1 from b where a.k1 = b.k1 and a.k2 = b.k2)",
[]resultChecker{
{
"select * from b",
[]string{"22 22 22 22"},
},
{
"select * from a",
[]string{"11 11 11", "23 24 20"},
},
},
},
}
for _, test := range tests {
for _, s := range test.initSchema {
tk.MustExec(s)
}
for _, s := range test.initData {
tk.MustExec(s)
}
tk.MustExec(test.dml)
for _, checker := range test.resultCheck {
tk.MustQuery(checker.check).Check(testkit.Rows(checker.assert...))
}
tk.MustExec("admin check table a")
tk.MustExec("admin check table b")
}
}
}
func (s *testSuite6) TestUpdateJoin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2, t3, t4, t5, t6, t7")
tk.MustExec("create table t1(k int, v int)")
tk.MustExec("create table t2(k int, v int)")
tk.MustExec("create table t3(id int auto_increment, k int, v int, primary key(id))")
tk.MustExec("create table t4(k int, v int)")
tk.MustExec("create table t5(v int, k int, primary key(k))")
tk.MustExec("insert into t1 values (1, 1)")
tk.MustExec("insert into t4 values (3, 3)")
tk.MustExec("create table t6 (id int, v longtext)")
tk.MustExec("create table t7 (x int, id int, v longtext, primary key(id))")
// test the normal case that update one row for a single table.
tk.MustExec("update t1 set v = 0 where k = 1")
tk.MustQuery("select k, v from t1 where k = 1").Check(testkit.Rows("1 0"))
// test the case that the table with auto_increment or none-null columns as the right table of left join.
tk.MustExec("update t1 left join t3 on t1.k = t3.k set t1.v = 1")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 1"))
tk.MustQuery("select id, k, v from t3").Check(testkit.Rows())
// test left join and the case that the right table has no matching record but has updated the right table columns.
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t1.v = t2.v, t2.v = 3")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 <nil>"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test the case that the update operation in the left table references data in the right table while data of the right table columns is modified.
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t2.v = 3, t1.v = t2.v")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 <nil>"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test right join and the case that the left table has no matching record but has updated the left table columns.
tk.MustExec("update t2 right join t1 on t2.k = t1.k set t2.v = 4, t1.v = 0")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 0"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
// test the case of right join and left join at the same time.
tk.MustExec("update t1 left join t2 on t1.k = t2.k right join t4 on t4.k = t2.k set t1.v = 4, t2.v = 4, t4.v = 4")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 0"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows())
tk.MustQuery("select k, v from t4").Check(testkit.Rows("3 4"))
// test normal left join and the case that the right table has matching rows.
tk.MustExec("insert t2 values (1, 10)")
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t2.v = 11")
tk.MustQuery("select k, v from t2").Check(testkit.Rows("1 11"))
// test the case of continuously joining the same table and updating the unmatching records.
tk.MustExec("update t1 t11 left join t2 on t11.k = t2.k left join t1 t12 on t2.v = t12.k set t12.v = 233, t11.v = 111")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("1 111"))
tk.MustQuery("select k, v from t2").Check(testkit.Rows("1 11"))
// test the left join case that the left table has records but all records are null.
tk.MustExec("delete from t1")
tk.MustExec("delete from t2")
tk.MustExec("insert into t1 values (null, null)")
tk.MustExec("update t1 left join t2 on t1.k = t2.k set t1.v = 1")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("<nil> 1"))
// test the case that the right table of left join has an primary key.
tk.MustExec("insert t5 values(0, 0)")
tk.MustExec("update t1 left join t5 on t1.k = t5.k set t1.v = 2")
tk.MustQuery("select k, v from t1").Check(testkit.Rows("<nil> 2"))
tk.MustQuery("select k, v from t5").Check(testkit.Rows("0 0"))
tk.MustExec("insert into t6 values (1, NULL)")
tk.MustExec("insert into t7 values (5, 1, 'a')")
tk.MustExec("update t6, t7 set t6.v = t7.v where t6.id = t7.id and t7.x = 5")
tk.MustQuery("select v from t6").Check(testkit.Rows("a"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(id int primary key, v int, gv int GENERATED ALWAYS AS (v * 2) STORED)")
tk.MustExec("create table t2(id int, v int)")
tk.MustExec("update t1 tt1 inner join (select count(t1.id) a, t1.id from t1 left join t2 on t1.id = t2.id group by t1.id) x on tt1.id = x.id set tt1.v = tt1.v + x.a")
}
func (s *testSuite3) TestMaxOneRow(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1`)
tk.MustExec(`drop table if exists t2`)
tk.MustExec(`create table t1(a double, b double);`)
tk.MustExec(`create table t2(a double, b double);`)
tk.MustExec(`insert into t1 values(1, 1), (2, 2), (3, 3);`)
tk.MustExec(`insert into t2 values(0, 0);`)
tk.MustExec(`set @@tidb_init_chunk_size=1;`)
rs, err := tk.Exec(`select (select t1.a from t1 where t1.a > t2.a) as a from t2;`)
c.Assert(err, IsNil)
err = rs.Next(context.TODO(), rs.NewChunk(nil))
c.Assert(err.Error(), Equals, "[executor:1242]Subquery returns more than 1 row")
c.Assert(rs.Close(), IsNil)
}
func (s *testSuiteP2) TestCurrentTimestampValueSelection(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t,t1")
tk.MustExec("create table t (id int, t0 timestamp null default current_timestamp, t1 timestamp(1) null default current_timestamp(1), t2 timestamp(2) null default current_timestamp(2) on update current_timestamp(2))")
tk.MustExec("insert into t (id) values (1)")
rs := tk.MustQuery("select t0, t1, t2 from t where id = 1")
t0 := rs.Rows()[0][0].(string)
t1 := rs.Rows()[0][1].(string)
t2 := rs.Rows()[0][2].(string)
c.Assert(len(strings.Split(t0, ".")), Equals, 1)
c.Assert(len(strings.Split(t1, ".")[1]), Equals, 1)
c.Assert(len(strings.Split(t2, ".")[1]), Equals, 2)
tk.MustQuery("select id from t where t0 = ?", t0).Check(testkit.Rows("1"))
tk.MustQuery("select id from t where t1 = ?", t1).Check(testkit.Rows("1"))
tk.MustQuery("select id from t where t2 = ?", t2).Check(testkit.Rows("1"))
time.Sleep(time.Second)
tk.MustExec("update t set t0 = now() where id = 1")
rs = tk.MustQuery("select t2 from t where id = 1")
newT2 := rs.Rows()[0][0].(string)
c.Assert(newT2 != t2, IsTrue)
tk.MustExec("create table t1 (id int, a timestamp, b timestamp(2), c timestamp(3))")
tk.MustExec("insert into t1 (id, a, b, c) values (1, current_timestamp(2), current_timestamp, current_timestamp(3))")
rs = tk.MustQuery("select a, b, c from t1 where id = 1")
a := rs.Rows()[0][0].(string)
b := rs.Rows()[0][1].(string)
d := rs.Rows()[0][2].(string)
c.Assert(len(strings.Split(a, ".")), Equals, 1)
c.Assert(strings.Split(b, ".")[1], Equals, "00")
c.Assert(len(strings.Split(d, ".")[1]), Equals, 3)
}
func (s *testSuite3) TestRowID(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly
tk.MustExec(`create table t(a varchar(10), b varchar(10), c varchar(1), index idx(a, b, c));`)
tk.MustExec(`insert into t values('a', 'b', 'c');`)
tk.MustExec(`insert into t values('a', 'b', 'c');`)
tk.MustQuery(`select b, _tidb_rowid from t use index(idx) where a = 'a';`).Check(testkit.Rows(
`b 1`,
`b 2`,
))
tk.MustExec(`begin;`)
tk.MustExec(`select * from t for update`)
tk.MustQuery(`select distinct b from t use index(idx) where a = 'a';`).Check(testkit.Rows(`b`))
tk.MustExec(`commit;`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a varchar(5) primary key)`)
tk.MustExec(`insert into t values('a')`)
tk.MustQuery("select *, _tidb_rowid from t use index(`primary`) where _tidb_rowid=1").Check(testkit.Rows("a 1"))
}
func (s *testSuite3) TestDoSubquery(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a int)`)
_, err := tk.Exec(`do 1 in (select * from t)`)
c.Assert(err, IsNil, Commentf("err %v", err))
tk.MustExec(`insert into t values(1)`)
r, err := tk.Exec(`do 1 in (select * from t)`)
c.Assert(err, IsNil, Commentf("err %v", err))
c.Assert(r, IsNil, Commentf("result of Do not empty"))
}
func (s *testSuite3) TestSubqueryTableAlias(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec("set sql_mode = ''")
tk.MustGetErrCode("select a, b from (select 1 a) ``, (select 2 b) ``;", mysql.ErrDerivedMustHaveAlias)
tk.MustGetErrCode("select a, b from (select 1 a) `x`, (select 2 b) `x`;", mysql.ErrNonuniqTable)
tk.MustGetErrCode("select a, b from (select 1 a), (select 2 b);", mysql.ErrDerivedMustHaveAlias)
// ambiguous column name
tk.MustGetErrCode("select a from (select 1 a) ``, (select 2 a) ``;", mysql.ErrDerivedMustHaveAlias)
tk.MustGetErrCode("select a from (select 1 a) `x`, (select 2 a) `x`;", mysql.ErrNonuniqTable)
tk.MustGetErrCode("select x.a from (select 1 a) `x`, (select 2 a) `x`;", mysql.ErrNonuniqTable)
tk.MustGetErrCode("select a from (select 1 a), (select 2 a);", mysql.ErrDerivedMustHaveAlias)
tk.MustExec("set sql_mode = 'oracle';")
tk.MustQuery("select a, b from (select 1 a) ``, (select 2 b) ``;").Check(testkit.Rows("1 2"))
tk.MustQuery("select a, b from (select 1 a) `x`, (select 2 b) `x`;").Check(testkit.Rows("1 2"))
tk.MustQuery("select a, b from (select 1 a), (select 2 b);").Check(testkit.Rows("1 2"))
// ambiguous column name
tk.MustGetErrCode("select a from (select 1 a) ``, (select 2 a) ``;", mysql.ErrNonUniq)
tk.MustGetErrCode("select a from (select 1 a) `x`, (select 2 a) `x`;", mysql.ErrNonUniq)
tk.MustGetErrCode("select x.a from (select 1 a) `x`, (select 2 a) `x`;", mysql.ErrNonUniq)
tk.MustGetErrCode("select a from (select 1 a), (select 2 a);", mysql.ErrNonUniq)
}
func (s *testSerialSuite) TestTSOFail(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a int)`)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/mockGetTSFail", "return"), IsNil)
ctx := failpoint.WithHook(context.Background(), func(ctx context.Context, fpname string) bool {
return fpname == "github.com/pingcap/tidb/session/mockGetTSFail"
})
_, err := tk.Se.Execute(ctx, `select * from t`)
c.Assert(err, NotNil)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/mockGetTSFail"), IsNil)
}
func (s *testSuite3) TestSelectHashPartitionTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists th`)
tk.MustExec("set @@session.tidb_enable_table_partition = '1';")
tk.MustExec(`create table th (a int, b int) partition by hash(a) partitions 3;`)
defer tk.MustExec(`drop table if exists th`)
tk.MustExec(`insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8);`)
tk.MustExec("insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8);")
tk.MustQuery("select b from th order by a").Check(testkit.Rows("-8", "-7", "-6", "-5", "-4", "-3", "-2", "-1", "0", "1", "2", "3", "4", "5", "6", "7", "8"))
tk.MustQuery(" select * from th where a=-2;").Check(testkit.Rows("-2 -2"))
tk.MustQuery(" select * from th where a=5;").Check(testkit.Rows("5 5"))
}
func (s *testSuiteP1) TestSelectPartition(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists th, tr, tl`)
tk.MustExec("set @@session.tidb_enable_list_partition = ON;")
tk.MustExec(`create table th (a int, b int) partition by hash(a) partitions 3;`)
tk.MustExec(`create table tr (a int, b int)
partition by range (a) (
partition r0 values less than (4),
partition r1 values less than (7),
partition r3 values less than maxvalue)`)
tk.MustExec(`create table tl (a int, b int, unique index idx(a)) partition by list (a) (
partition p0 values in (3,5,6,9,17),
partition p1 values in (1,2,10,11,19,20),
partition p2 values in (4,12,13,14,18),
partition p3 values in (7,8,15,16,null));`)
defer tk.MustExec(`drop table if exists th, tr, tl`)
tk.MustExec(`insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8);`)
tk.MustExec("insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8);")
tk.MustExec(`insert into tr values (-3,-3),(3,3),(4,4),(7,7),(8,8);`)
tk.MustExec(`insert into tl values (3,3),(1,1),(4,4),(7,7),(8,8),(null,null);`)
// select 1 partition.
tk.MustQuery("select b from th partition (p0) order by a").Check(testkit.Rows("-6", "-3", "0", "3", "6"))
tk.MustQuery("select b from tr partition (r0) order by a").Check(testkit.Rows("-3", "3"))
tk.MustQuery("select b from tl partition (p0) order by a").Check(testkit.Rows("3"))
tk.MustQuery("select b from th partition (p0,P0) order by a").Check(testkit.Rows("-6", "-3", "0", "3", "6"))
tk.MustQuery("select b from tr partition (r0,R0,r0) order by a").Check(testkit.Rows("-3", "3"))
tk.MustQuery("select b from tl partition (p0,P0,p0) order by a").Check(testkit.Rows("3"))
// select multi partition.
tk.MustQuery("select b from th partition (P2,p0) order by a").Check(testkit.Rows("-8", "-6", "-5", "-3", "-2", "0", "2", "3", "5", "6", "8"))
tk.MustQuery("select b from tr partition (r1,R3) order by a").Check(testkit.Rows("4", "7", "8"))
tk.MustQuery("select b from tl partition (p0,P3) order by a").Check(testkit.Rows("<nil>", "3", "7", "8"))
// test select unknown partition error
err := tk.ExecToErr("select b from th partition (p0,p4)")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'p4' in table 'th'")
err = tk.ExecToErr("select b from tr partition (r1,r4)")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'r4' in table 'tr'")
err = tk.ExecToErr("select b from tl partition (p0,p4)")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'p4' in table 'tl'")
// test select partition table in transaction.
tk.MustExec("begin")
tk.MustExec("insert into th values (10,10),(11,11)")
tk.MustQuery("select a, b from th where b>10").Check(testkit.Rows("11 11"))
tk.MustExec("commit")
tk.MustQuery("select a, b from th where b>10").Check(testkit.Rows("11 11"))
// test partition function is scalar func
tk.MustExec("drop table if exists tscalar")
tk.MustExec(`create table tscalar (c1 int) partition by range (c1 % 30) (
partition p0 values less than (0),
partition p1 values less than (10),
partition p2 values less than (20),
partition pm values less than (maxvalue));`)
tk.MustExec("insert into tscalar values(0), (10), (40), (50), (55)")
// test IN expression
tk.MustExec("insert into tscalar values(-0), (-10), (-40), (-50), (-55)")
tk.MustQuery("select * from tscalar where c1 in (55, 55)").Check(testkit.Rows("55"))
tk.MustQuery("select * from tscalar where c1 in (40, 40)").Check(testkit.Rows("40"))
tk.MustQuery("select * from tscalar where c1 in (40)").Check(testkit.Rows("40"))
tk.MustQuery("select * from tscalar where c1 in (-40)").Check(testkit.Rows("-40"))
tk.MustQuery("select * from tscalar where c1 in (-40, -40)").Check(testkit.Rows("-40"))
tk.MustQuery("select * from tscalar where c1 in (-1)").Check(testkit.Rows())
}
func (s *testSuiteP1) TestDeletePartition(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1`)
tk.MustExec(`create table t1 (a int) partition by range (a) (
partition p0 values less than (10),
partition p1 values less than (20),
partition p2 values less than (30),
partition p3 values less than (40),
partition p4 values less than MAXVALUE
)`)
tk.MustExec("insert into t1 values (1),(11),(21),(31)")
tk.MustExec("delete from t1 partition (p4)")
tk.MustQuery("select * from t1 order by a").Check(testkit.Rows("1", "11", "21", "31"))
tk.MustExec("delete from t1 partition (p0) where a > 10")
tk.MustQuery("select * from t1 order by a").Check(testkit.Rows("1", "11", "21", "31"))
tk.MustExec("delete from t1 partition (p0,p1,p2)")
tk.MustQuery("select * from t1").Check(testkit.Rows("31"))
}
func (s *testSuite) TestSelectView(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table view_t (a int,b int)")
tk.MustExec("insert into view_t values(1,2)")
tk.MustExec("create definer='root'@'localhost' view view1 as select * from view_t")
tk.MustExec("create definer='root'@'localhost' view view2(c,d) as select * from view_t")
tk.MustExec("create definer='root'@'localhost' view view3(c,d) as select a,b from view_t")
tk.MustExec("create definer='root'@'localhost' view view4 as select * from (select * from (select * from view_t) tb1) tb;")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view4;").Check(testkit.Rows("1 2"))
tk.MustExec("drop table view_t;")
tk.MustExec("create table view_t(c int,d int)")
err := tk.ExecToErr("select * from view1")
c.Assert(err.Error(), Equals, "[planner:1356]View 'test.view1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them")
err = tk.ExecToErr("select * from view2")
c.Assert(err.Error(), Equals, "[planner:1356]View 'test.view2' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them")
err = tk.ExecToErr("select * from view3")
c.Assert(err.Error(), Equals, plannercore.ErrViewInvalid.GenWithStackByArgs("test", "view3").Error())
tk.MustExec("drop table view_t;")
tk.MustExec("create table view_t(a int,b int,c int)")
tk.MustExec("insert into view_t values(1,2,3)")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view4;").Check(testkit.Rows("1 2"))
tk.MustExec("alter table view_t drop column a")
tk.MustExec("alter table view_t add column a int after b")
tk.MustExec("update view_t set a=1;")
tk.MustQuery("select * from view1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select * from view4;").Check(testkit.Rows("1 2"))
tk.MustExec("drop table view_t;")
tk.MustExec("drop view view1,view2,view3,view4;")
tk.MustExec("set @@tidb_enable_window_function = 1")
defer func() {
tk.MustExec("set @@tidb_enable_window_function = 0")
}()
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t values (1,1),(1,2),(2,1),(2,2)")
tk.MustExec("create definer='root'@'localhost' view v as select a, first_value(a) over(rows between 1 preceding and 1 following), last_value(a) over(rows between 1 preceding and 1 following) from t")
result := tk.MustQuery("select * from v")
result.Check(testkit.Rows("1 1 1", "1 1 2", "2 1 2", "2 2 2"))
tk.MustExec("drop view v;")
}
type testSuite2 struct {
*baseTestSuite
}
func (s *testSuite2) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite3 struct {
*baseTestSuite
}
func (s *testSuite3) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite4 struct {
*baseTestSuite
}
func (s *testSuite4) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite5 struct {
*baseTestSuite
}
func (s *testSuite5) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite6 struct {
*baseTestSuite
}
func (s *testSuite6) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite7 struct {
*baseTestSuite
}
func (s *testSuite7) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSuite8 struct {
*baseTestSuite
}
func (s *testSuite8) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
type testSerialSuite1 struct {
*baseTestSuite
}
func (s *testSerialSuite1) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show full tables")
for _, tb := range r.Rows() {
tableName := tb[0]
if tb[1] == "VIEW" {
tk.MustExec(fmt.Sprintf("drop view %v", tableName))
} else if tb[1] == "SEQUENCE" {
tk.MustExec(fmt.Sprintf("drop sequence %v", tableName))
} else {
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
}
func (s *testSuiteP2) TestStrToDateBuiltin(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery(`select str_to_date('20190101','%Y%m%d%!') from dual`).Check(testkit.Rows("2019-01-01"))
tk.MustQuery(`select str_to_date('20190101','%Y%m%d%f') from dual`).Check(testkit.Rows("2019-01-01 00:00:00.000000"))
tk.MustQuery(`select str_to_date('20190101','%Y%m%d%H%i%s') from dual`).Check(testkit.Rows("2019-01-01 00:00:00"))
tk.MustQuery(`select str_to_date('18/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('a18/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('69/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('70/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("1970-10-22"))
tk.MustQuery(`select str_to_date('8/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("2008-10-22"))
tk.MustQuery(`select str_to_date('8/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2008-10-22"))
tk.MustQuery(`select str_to_date('18/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('a18/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('69/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('70/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("1970-10-22"))
tk.MustQuery(`select str_to_date('018/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("0018-10-22"))
tk.MustQuery(`select str_to_date('2018/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('018/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18/10/22','%y0/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18/10/22','%Y0/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18a/10/22','%y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18a/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('20188/10/22','%Y/%m/%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018510522','%Y5%m5%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018^10^22','%Y^%m^%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018@10@22','%Y@%m@%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018%10%22','%Y%%m%%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018(10(22','%Y(%m(%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018\10\22','%Y\%m\%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('2018=10=22','%Y=%m=%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018+10+22','%Y+%m+%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('2018_10_22','%Y_%m_%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('69510522','%y5%m5%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('69^10^22','%y^%m^%d') from dual`).Check(testkit.Rows("2069-10-22"))
tk.MustQuery(`select str_to_date('18@10@22','%y@%m@%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18%10%22','%y%%m%%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18(10(22','%y(%m(%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18\10\22','%y\%m\%d') from dual`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`select str_to_date('18+10+22','%y+%m+%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18=10=22','%y=%m=%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`select str_to_date('18_10_22','%y_%m_%d') from dual`).Check(testkit.Rows("2018-10-22"))
tk.MustQuery(`SELECT STR_TO_DATE('2020-07-04 11:22:33 PM', '%Y-%m-%d %r')`).Check(testkit.Rows("2020-07-04 23:22:33"))
tk.MustQuery(`SELECT STR_TO_DATE('2020-07-04 12:22:33 AM', '%Y-%m-%d %r')`).Check(testkit.Rows("2020-07-04 00:22:33"))
tk.MustQuery(`SELECT STR_TO_DATE('2020-07-04 12:22:33', '%Y-%m-%d %T')`).Check(testkit.Rows("2020-07-04 12:22:33"))
tk.MustQuery(`SELECT STR_TO_DATE('2020-07-04 00:22:33', '%Y-%m-%d %T')`).Check(testkit.Rows("2020-07-04 00:22:33"))
}
func (s *testSuiteP2) TestAddDateBuiltinWithWarnings(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@sql_mode='NO_ZERO_DATE'")
result := tk.MustQuery(`select date_add('2001-01-00', interval -2 hour);`)
result.Check(testkit.Rows("<nil>"))
tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Incorrect datetime value: '2001-01-00'"))
}
func (s *testSuiteP2) TestIssue27232(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a timestamp)")
tk.MustExec("insert into t values (\"1970-07-23 10:04:59\"), (\"2038-01-19 03:14:07\")")
tk.MustQuery("select * from t where date_sub(a, interval 10 month) = date_sub(\"1970-07-23 10:04:59\", interval 10 month)").Check(testkit.Rows("1970-07-23 10:04:59"))
tk.MustQuery("select * from t where timestampadd(hour, 1, a ) = timestampadd(hour, 1, \"2038-01-19 03:14:07\")").Check(testkit.Rows("2038-01-19 03:14:07"))
}
func (s *testSuiteP2) TestStrToDateBuiltinWithWarnings(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@sql_mode='NO_ZERO_DATE'")
tk.MustExec("use test")
tk.MustQuery(`SELECT STR_TO_DATE('0000-1-01', '%Y-%m-%d');`).Check(testkit.Rows("<nil>"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1411 Incorrect datetime value: '0000-1-01' for function str_to_date"))
}
func (s *testSuiteP2) TestReadPartitionedTable(c *C) {
// Test three reader on partitioned table.
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists pt")
tk.MustExec("create table pt (a int, b int, index i_b(b)) partition by range (a) (partition p1 values less than (2), partition p2 values less than (4), partition p3 values less than (6))")
for i := 0; i < 6; i++ {
tk.MustExec(fmt.Sprintf("insert into pt values(%d, %d)", i, i))
}
// Table reader
tk.MustQuery("select * from pt order by a").Check(testkit.Rows("0 0", "1 1", "2 2", "3 3", "4 4", "5 5"))
// Index reader
tk.MustQuery("select b from pt where b = 3").Check(testkit.Rows("3"))
// Index lookup
tk.MustQuery("select a from pt where b = 3").Check(testkit.Rows("3"))
}
func (s *testSplitTable) TestSplitRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, t1")
tk.MustExec("create table t(a varchar(100),b int, index idx1(b,a))")
tk.MustExec(`split table t index idx1 by (10000,"abcd"),(10000000);`)
_, err := tk.Exec(`split table t index idx1 by ("abcd");`)
c.Assert(err, NotNil)
terr := errors.Cause(err).(*terror.Error)
c.Assert(terr.Code(), Equals, errors.ErrCode(mysql.WarnDataTruncated))
// Test for split index region.
// Check min value is more than max value.
tk.MustExec(`split table t index idx1 between (0) and (1000000000) regions 10`)
tk.MustGetErrCode(`split table t index idx1 between (2,'a') and (1,'c') regions 10`, errno.ErrInvalidSplitRegionRanges)
// Check min value is invalid.
_, err = tk.Exec(`split table t index idx1 between () and (1) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index `idx1` region lower value count should more than 0")
// Check max value is invalid.
_, err = tk.Exec(`split table t index idx1 between (1) and () regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index `idx1` region upper value count should more than 0")
// Check pre-split region num is too large.
_, err = tk.Exec(`split table t index idx1 between (0) and (1000000000) regions 10000`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index region num exceeded the limit 1000")
// Check pre-split region num 0 is invalid.
_, err = tk.Exec(`split table t index idx1 between (0) and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split index region num should more than 0")
// Test truncate error msg.
_, err = tk.Exec(`split table t index idx1 between ("aa") and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[types:1265]Incorrect value: 'aa' for column 'b'")
// Test for split table region.
tk.MustExec(`split table t between (0) and (1000000000) regions 10`)
// Check the lower value is more than the upper value.
tk.MustGetErrCode(`split table t between (2) and (1) regions 10`, errno.ErrInvalidSplitRegionRanges)
// Check the lower value is invalid.
_, err = tk.Exec(`split table t between () and (1) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region lower value count should be 1")
// Check upper value is invalid.
_, err = tk.Exec(`split table t between (1) and () regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region upper value count should be 1")
// Check pre-split region num is too large.
_, err = tk.Exec(`split table t between (0) and (1000000000) regions 10000`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region num exceeded the limit 1000")
// Check pre-split region num 0 is invalid.
_, err = tk.Exec(`split table t between (0) and (1000000000) regions 0`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Split table region num should more than 0")
// Test truncate error msg.
_, err = tk.Exec(`split table t between ("aa") and (1000000000) regions 10`)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[types:1265]Incorrect value: 'aa' for column '_tidb_rowid'")
// Test split table region step is too small.
tk.MustGetErrCode(`split table t between (0) and (100) regions 10`, errno.ErrInvalidSplitRegionRanges)
// Test split region by syntax.
tk.MustExec(`split table t by (0),(1000),(1000000)`)
// Test split region twice to test for multiple batch split region requests.
tk.MustExec("create table t1(a int, b int)")
tk.MustQuery("split table t1 between(0) and (10000) regions 10;").Check(testkit.Rows("9 1"))
tk.MustQuery("split table t1 between(10) and (10010) regions 5;").Check(testkit.Rows("4 1"))
// Test split region for partition table.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int) partition by hash(a) partitions 5;")
tk.MustQuery("split table t between (0) and (1000000) regions 5;").Check(testkit.Rows("20 1"))
// Test for `split for region` syntax.
tk.MustQuery("split region for partition table t between (1000000) and (100000000) regions 10;").Check(testkit.Rows("45 1"))
// Test split region for partition table with specified partition.
tk.MustQuery("split table t partition (p1,p2) between (100000000) and (1000000000) regions 5;").Check(testkit.Rows("8 1"))
// Test for `split for region` syntax.
tk.MustQuery("split region for partition table t partition (p3,p4) between (100000000) and (1000000000) regions 5;").Check(testkit.Rows("8 1"))
}
func (s *testSplitTable) TestSplitRegionEdgeCase(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a bigint(20) auto_increment primary key);")
tk.MustExec("split table t between (-9223372036854775808) and (9223372036854775807) regions 16;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int(20) auto_increment primary key);")
tk.MustGetErrCode("split table t between (-9223372036854775808) and (9223372036854775807) regions 16;", errno.ErrDataOutOfRange)
}
func (s *testSplitTable) TestClusterIndexSplitTableIntegration(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("drop database if exists test_cluster_index_index_split_table_integration;")
tk.MustExec("create database test_cluster_index_index_split_table_integration;")
tk.MustExec("use test_cluster_index_index_split_table_integration;")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("create table t (a varchar(255), b double, c int, primary key (a, b));")
// Value list length not match.
lowerMsg := "Split table region lower value count should be 2"
upperMsg := "Split table region upper value count should be 2"
tk.MustGetErrMsg("split table t between ('aaa') and ('aaa', 100.0) regions 10;", lowerMsg)
tk.MustGetErrMsg("split table t between ('aaa', 1.0) and ('aaa', 100.0, 11) regions 10;", upperMsg)
// Value type not match.
errMsg := "[types:1265]Incorrect value: 'aaa' for column 'b'"
tk.MustGetErrMsg("split table t between ('aaa', 0.0) and (100.0, 'aaa') regions 10;", errMsg)
// lower bound >= upper bound.
errMsg = "[executor:8212]Failed to split region ranges: Split table `t` region lower value (aaa,0) should less than the upper value (aaa,0)"
tk.MustGetErrMsg("split table t between ('aaa', 0.0) and ('aaa', 0.0) regions 10;", errMsg)
errMsg = "[executor:8212]Failed to split region ranges: Split table `t` region lower value (bbb,0) should less than the upper value (aaa,0)"
tk.MustGetErrMsg("split table t between ('bbb', 0.0) and ('aaa', 0.0) regions 10;", errMsg)
// Exceed limit 1000.
errMsg = "Split table region num exceeded the limit 1000"
tk.MustGetErrMsg("split table t between ('aaa', 0.0) and ('aaa', 0.1) regions 100000;", errMsg)
// Split on null values.
errMsg = "[planner:1048]Column 'a' cannot be null"
tk.MustGetErrMsg("split table t between (null, null) and (null, null) regions 1000;", errMsg)
tk.MustGetErrMsg("split table t by (null, null);", errMsg)
// Success.
tk.MustExec("split table t between ('aaa', 0.0) and ('aaa', 100.0) regions 10;")
tk.MustExec("split table t by ('aaa', 0.0), ('aaa', 20.0), ('aaa', 100.0);")
tk.MustExec("split table t by ('aaa', 100.0), ('qqq', 20.0), ('zzz', 100.0), ('zzz', 1000.0);")
tk.MustExec("drop table t;")
tk.MustExec("create table t (a int, b int, c int, d int, primary key(a, c, d));")
tk.MustQuery("split table t between (0, 0, 0) and (0, 0, 1) regions 1000;").Check(testkit.Rows("999 1"))
tk.MustExec("drop table t;")
tk.MustExec("create table t (a int, b int, c int, d int, primary key(d, a, c));")
tk.MustQuery("split table t by (0, 0, 0), (1, 2, 3), (65535, 65535, 65535);").Check(testkit.Rows("3 1"))
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a varchar(255), b decimal, c int, primary key (a, b));")
errMsg = "[types:1265]Incorrect value: '' for column 'b'"
tk.MustGetErrMsg("split table t by ('aaa', '')", errMsg)
}
func (s *testSplitTable) TestClusterIndexShowTableRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("set global tidb_scatter_region = 1")
tk.MustExec("drop database if exists cluster_index_regions;")
tk.MustExec("create database cluster_index_regions;")
tk.MustExec("use cluster_index_regions;")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("create table t (a int, b int, c int, primary key(a, b));")
tk.MustExec("insert t values (1, 1, 1), (2, 2, 2);")
tk.MustQuery("split table t between (1, 0) and (2, 3) regions 2;").Check(testkit.Rows("1 1"))
rows := tk.MustQuery("show table t regions").Rows()
tbl := testGetTableByName(c, tk.Se, "cluster_index_regions", "t")
// Check the region start key.
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_r_03800000000000000183800000000000", tbl.Meta().ID))
tk.MustExec("drop table t;")
tk.MustExec("create table t (a int, b int);")
tk.MustQuery("split table t between (0) and (100000) regions 2;").Check(testkit.Rows("1 1"))
rows = tk.MustQuery("show table t regions").Rows()
tbl = testGetTableByName(c, tk.Se, "cluster_index_regions", "t")
// Check the region start key is int64.
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_r_50000", tbl.Meta().ID))
}
func (s *testSuiteWithData) TestClusterIndexOuterJoinElimination(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("create table t (a int, b int, c int, primary key(a,b))")
rows := tk.MustQuery(`explain format = 'brief' select t1.a from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b`).Rows()
rowStrs := s.testData.ConvertRowsToStrings(rows)
for _, row := range rowStrs {
// outer join has been eliminated.
c.Assert(strings.Index(row, "Join"), Equals, -1)
}
}
func (s *testSplitTable) TestShowTableRegion(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_regions")
tk.MustExec("set global tidb_scatter_region = 1")
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("create table t_regions (a int key, b int, c int, index idx(b), index idx2(c))")
_, err := tk.Exec("split partition table t_regions partition (p1,p2) index idx between (0) and (20000) regions 2;")
c.Assert(err.Error(), Equals, plannercore.ErrPartitionClauseOnNonpartitioned.Error())
// Test show table regions.
tk.MustQuery(`split table t_regions between (-10000) and (10000) regions 4;`).Check(testkit.Rows("4 1"))
re := tk.MustQuery("show table t_regions regions")
// Test show table regions and split table on global temporary table.
tk.MustExec("drop table if exists t_regions_temporary_table")
tk.MustExec("create global temporary table t_regions_temporary_table (a int key, b int, c int, index idx(b), index idx2(c)) ON COMMIT DELETE ROWS;")
// Test show table regions.
_, err = tk.Exec("show table t_regions_temporary_table regions")
c.Assert(err.Error(), Equals, plannercore.ErrOptOnTemporaryTable.GenWithStackByArgs("show table regions").Error())
// Test split table.
_, err = tk.Exec("split table t_regions_temporary_table between (-10000) and (10000) regions 4;")
c.Assert(err.Error(), Equals, plannercore.ErrOptOnTemporaryTable.GenWithStackByArgs("split table").Error())
_, err = tk.Exec("split partition table t_regions_temporary_table partition (p1,p2) index idx between (0) and (20000) regions 2;")
c.Assert(err.Error(), Equals, plannercore.ErrOptOnTemporaryTable.GenWithStackByArgs("split table").Error())
tk.MustExec("drop table if exists t_regions_temporary_table")
// Test pre split regions
_, err = tk.Exec("create global temporary table temporary_table_pre_split(id int ) pre_split_regions=2 ON COMMIT DELETE ROWS;")
c.Assert(err.Error(), Equals, ddl.ErrOptOnTemporaryTable.GenWithStackByArgs("pre split regions").Error())
// Test show table regions and split table on local temporary table
tk.MustExec("drop table if exists t_regions_local_temporary_table")
tk.MustExec("create temporary table t_regions_local_temporary_table (a int key, b int, c int, index idx(b), index idx2(c));")
// Test show table regions.
_, err = tk.Exec("show table t_regions_local_temporary_table regions")
c.Assert(err.Error(), Equals, plannercore.ErrOptOnTemporaryTable.GenWithStackByArgs("show table regions").Error())
// Test split table.
_, err = tk.Exec("split table t_regions_local_temporary_table between (-10000) and (10000) regions 4;")
c.Assert(err.Error(), Equals, plannercore.ErrOptOnTemporaryTable.GenWithStackByArgs("split table").Error())
_, err = tk.Exec("split partition table t_regions_local_temporary_table partition (p1,p2) index idx between (0) and (20000) regions 2;")
c.Assert(err.Error(), Equals, plannercore.ErrOptOnTemporaryTable.GenWithStackByArgs("split table").Error())
tk.MustExec("drop table if exists t_regions_local_temporary_table")
// Test pre split regions
_, err = tk.Exec("create temporary table local_temporary_table_pre_split(id int ) pre_split_regions=2;")
c.Assert(err.Error(), Equals, ddl.ErrOptOnTemporaryTable.GenWithStackByArgs("pre split regions").Error())
rows := re.Rows()
// Table t_regions should have 5 regions now.
// 4 regions to store record data.
// 1 region to store index data.
c.Assert(len(rows), Equals, 5)
c.Assert(len(rows[0]), Equals, 11)
tbl := testGetTableByName(c, tk.Se, "test", "t_regions")
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[4][2], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
// Test show table index regions.
tk.MustQuery(`split table t_regions index idx between (-1000) and (1000) regions 4;`).Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t_regions index idx regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
// Check the region start key.
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d.*", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
re = tk.MustQuery("show table t_regions regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 9 regions now.
// 4 regions to store record data.
// 4 region to store index idx data.
// 1 region to store index idx2 data.
c.Assert(len(rows), Equals, 9)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_-5000", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_0", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[4][1], Matches, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[7][2], Equals, fmt.Sprintf("t_%d_i_2_", tbl.Meta().ID))
c.Assert(rows[8][2], Equals, fmt.Sprintf("t_%d_r", tbl.Meta().ID))
// Test unsigned primary key and wait scatter finish.
tk.MustExec("drop table if exists t_regions")
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("create table t_regions (a int unsigned key, b int, index idx(b))")
// Test show table regions.
tk.MustExec(`set @@session.tidb_wait_split_region_finish=1;`)
tk.MustQuery(`split table t_regions by (2500),(5000),(7500);`).Check(testkit.Rows("3 1"))
re = tk.MustQuery("show table t_regions regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t_regions")
// Check the region start key.
c.Assert(rows[0][1], Matches, "t_.*")
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_2500", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_5000", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_7500", tbl.Meta().ID))
// Test show table index regions.
tk.MustQuery(`split table t_regions index idx by (250),(500),(750);`).Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t_regions index idx regions")
rows = re.Rows()
// The index `idx` of table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
// Check the region start key.
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", tbl.Meta().ID))
// Test show table regions for partition table when disable split region when create table.
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
tk.MustExec("drop table if exists partition_t;")
tk.MustExec("set @@session.tidb_enable_table_partition = '1';")
tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3")
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][1], Matches, "t_.*")
// Test show table regions for partition table when enable split region when create table.
atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1)
tk.MustExec("set @@global.tidb_scatter_region=1;")
tk.MustExec("drop table if exists partition_t;")
tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3")
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 3)
tbl = testGetTableByName(c, tk.Se, "test", "partition_t")
partitionDef := tbl.Meta().GetPartitionInfo().Definitions
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[0].ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[1].ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[2].ID))
// Test split partition region when add new partition.
tk.MustExec("drop table if exists partition_t;")
tk.MustExec(`create table partition_t (a int, b int,index(a)) PARTITION BY RANGE (a) (
PARTITION p0 VALUES LESS THAN (10),
PARTITION p1 VALUES LESS THAN (20),
PARTITION p2 VALUES LESS THAN (30));`)
tk.MustExec(`alter table partition_t add partition ( partition p3 values less than (40), partition p4 values less than (50) );`)
re = tk.MustQuery("show table partition_t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 5)
tbl = testGetTableByName(c, tk.Se, "test", "partition_t")
partitionDef = tbl.Meta().GetPartitionInfo().Definitions
c.Assert(rows[0][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[0].ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[1].ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[2].ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[3].ID))
c.Assert(rows[4][1], Matches, fmt.Sprintf("t_%d_.*", partitionDef[4].ID))
// Test pre-split table region when create table.
tk.MustExec("drop table if exists t_pre")
tk.MustExec("create table t_pre (a int, b int) shard_row_id_bits = 2 pre_split_regions=2;")
re = tk.MustQuery("show table t_pre regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t_pre")
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_2305843009213693952", tbl.Meta().ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_4611686018427387904", tbl.Meta().ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_6917529027641081856", tbl.Meta().ID))
// Test pre-split table region when create table.
tk.MustExec("drop table if exists pt_pre")
tk.MustExec("create table pt_pre (a int, b int) shard_row_id_bits = 2 pre_split_regions=2 partition by hash(a) partitions 3;")
re = tk.MustQuery("show table pt_pre regions")
rows = re.Rows()
// Table t_regions should have 4 regions now.
c.Assert(len(rows), Equals, 12)
tbl = testGetTableByName(c, tk.Se, "test", "pt_pre")
pi := tbl.Meta().GetPartitionInfo().Definitions
c.Assert(len(pi), Equals, 3)
for i, p := range pi {
c.Assert(rows[1+4*i][1], Equals, fmt.Sprintf("t_%d_r_2305843009213693952", p.ID))
c.Assert(rows[2+4*i][1], Equals, fmt.Sprintf("t_%d_r_4611686018427387904", p.ID))
c.Assert(rows[3+4*i][1], Equals, fmt.Sprintf("t_%d_r_6917529027641081856", p.ID))
}
defer atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0)
// Test split partition table.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int) partition by hash(a) partitions 5;")
tk.MustQuery("split table t between (0) and (4000000) regions 4;").Check(testkit.Rows("15 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 20)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i, p := range tbl.Meta().GetPartitionInfo().Definitions {
c.Assert(rows[i*4+0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*4+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*4+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*4+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
// Test split region for partition table with specified partition.
tk.MustQuery("split table t partition (p4) between (1000000) and (2000000) regions 5;").Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 24)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i := 0; i < 4; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*4+0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*4+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*4+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*4+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
for i := 4; i < 5; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*4+0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*4+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*4+2][1], Equals, fmt.Sprintf("t_%d_r_1200000", p.ID))
c.Assert(rows[i*4+3][1], Equals, fmt.Sprintf("t_%d_r_1400000", p.ID))
c.Assert(rows[i*4+4][1], Equals, fmt.Sprintf("t_%d_r_1600000", p.ID))
c.Assert(rows[i*4+5][1], Equals, fmt.Sprintf("t_%d_r_1800000", p.ID))
c.Assert(rows[i*4+6][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*4+7][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
// Test for show table partition regions.
for i := 0; i < 4; i++ {
re = tk.MustQuery(fmt.Sprintf("show table t partition (p%v) regions", i))
rows = re.Rows()
c.Assert(len(rows), Equals, 4)
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
}
re = tk.MustQuery("show table t partition (p0, p4) regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 12)
p := tbl.Meta().GetPartitionInfo().Definitions[0]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
p = tbl.Meta().GetPartitionInfo().Definitions[4]
c.Assert(rows[4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[5][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[6][1], Equals, fmt.Sprintf("t_%d_r_1200000", p.ID))
c.Assert(rows[7][1], Equals, fmt.Sprintf("t_%d_r_1400000", p.ID))
c.Assert(rows[8][1], Equals, fmt.Sprintf("t_%d_r_1600000", p.ID))
c.Assert(rows[9][1], Equals, fmt.Sprintf("t_%d_r_1800000", p.ID))
c.Assert(rows[10][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[11][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
// Test for duplicate partition names.
re = tk.MustQuery("show table t partition (p0, p0, p0) regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 4)
p = tbl.Meta().GetPartitionInfo().Definitions[0]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
// Test split partition table index.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int,index idx(a)) partition by hash(a) partitions 5;")
tk.MustQuery("split table t between (0) and (4000000) regions 4;").Check(testkit.Rows("20 1"))
tk.MustQuery("split table t index idx between (0) and (4000000) regions 4;").Check(testkit.Rows("20 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 40)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i := 0; i < 5; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*8+0][1], Equals, fmt.Sprintf("t_%d_r", p.ID))
c.Assert(rows[i*8+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*8+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*8+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
c.Assert(rows[i*8+4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*8+5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
// Test split index region for partition table with specified partition.
tk.MustQuery("split table t partition (p4) index idx between (0) and (1000000) regions 5;").Check(testkit.Rows("4 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 44)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(len(tbl.Meta().GetPartitionInfo().Definitions), Equals, 5)
for i := 0; i < 4; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*8+0][1], Equals, fmt.Sprintf("t_%d_r", p.ID))
c.Assert(rows[i*8+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*8+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*8+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
c.Assert(rows[i*8+4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*8+5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
for i := 4; i < 5; i++ {
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[i*8+0][1], Equals, fmt.Sprintf("t_%d_r", p.ID))
c.Assert(rows[i*8+1][1], Equals, fmt.Sprintf("t_%d_r_1000000", p.ID))
c.Assert(rows[i*8+2][1], Equals, fmt.Sprintf("t_%d_r_2000000", p.ID))
c.Assert(rows[i*8+3][1], Equals, fmt.Sprintf("t_%d_r_3000000", p.ID))
c.Assert(rows[i*8+4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[i*8+5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+8][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+9][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+10][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[i*8+11][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
// Test show table partition region on unknown-partition.
err = tk.QueryToErr("show table t partition (p_unknown) index idx regions")
c.Assert(terror.ErrorEqual(err, table.ErrUnknownPartition), IsTrue)
// Test show table partition index.
for i := 0; i < 4; i++ {
re = tk.MustQuery(fmt.Sprintf("show table t partition (p%v) index idx regions", i))
rows = re.Rows()
c.Assert(len(rows), Equals, 4)
p := tbl.Meta().GetPartitionInfo().Definitions[i]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
}
re = tk.MustQuery("show table t partition (p3,p4) index idx regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 12)
p = tbl.Meta().GetPartitionInfo().Definitions[3]
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[1][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
p = tbl.Meta().GetPartitionInfo().Definitions[4]
c.Assert(rows[4][1], Equals, fmt.Sprintf("t_%d_", p.ID))
c.Assert(rows[5][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[6][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[7][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[8][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[9][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[10][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
c.Assert(rows[11][1], Matches, fmt.Sprintf("t_%d_i_1_.*", p.ID))
// Test split for the second index.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int,b int,index idx(a), index idx2(b))")
tk.MustQuery("split table t index idx2 between (0) and (4000000) regions 2;").Check(testkit.Rows("3 1"))
re = tk.MustQuery("show table t regions")
rows = re.Rows()
c.Assert(len(rows), Equals, 4)
tbl = testGetTableByName(c, tk.Se, "test", "t")
c.Assert(rows[0][1], Equals, fmt.Sprintf("t_%d_i_3_", tbl.Meta().ID))
c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_", tbl.Meta().ID))
c.Assert(rows[2][1], Matches, fmt.Sprintf("t_%d_i_2_.*", tbl.Meta().ID))
c.Assert(rows[3][1], Matches, fmt.Sprintf("t_%d_i_2_.*", tbl.Meta().ID))
// Test show table partition region on non-partition table.
err = tk.QueryToErr("show table t partition (p3,p4) index idx regions")
c.Assert(terror.ErrorEqual(err, plannercore.ErrPartitionClauseOnNonpartitioned), IsTrue)
}
func testGetTableByName(c *C, ctx sessionctx.Context, db, table string) table.Table {
dom := domain.GetDomain(ctx)
// Make sure the table schema is the new schema.
err := dom.Reload()
c.Assert(err, IsNil)
tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(db), model.NewCIStr(table))
c.Assert(err, IsNil)
return tbl
}
func (s *testSuiteP2) TestIssue10435(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(i int, j int, k int)")
tk.MustExec("insert into t1 VALUES (1,1,1),(2,2,2),(3,3,3),(4,4,4)")
tk.MustExec("INSERT INTO t1 SELECT 10*i,j,5*j FROM t1 UNION SELECT 20*i,j,5*j FROM t1 UNION SELECT 30*i,j,5*j FROM t1")
tk.MustExec("set @@session.tidb_enable_window_function=1")
tk.MustQuery("SELECT SUM(i) OVER W FROM t1 WINDOW w AS (PARTITION BY j ORDER BY i) ORDER BY 1+SUM(i) OVER w").Check(
testkit.Rows("1", "2", "3", "4", "11", "22", "31", "33", "44", "61", "62", "93", "122", "124", "183", "244"),
)
}
func (s *testSerialSuite2) TestUnsignedFeedback(c *C) {
tk := testkit.NewTestKit(c, s.store)
oriProbability := statistics.FeedbackProbability.Load()
statistics.FeedbackProbability.Store(1.0)
defer func() { statistics.FeedbackProbability.Store(oriProbability) }()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint unsigned, b int, primary key(a))")
tk.MustExec("insert into t values (1,1),(2,2)")
tk.MustExec("analyze table t")
tk.MustQuery("select count(distinct b) from t").Check(testkit.Rows("2"))
result := tk.MustQuery("explain analyze select count(distinct b) from t")
c.Assert(result.Rows()[2][4], Equals, "table:t")
c.Assert(result.Rows()[2][6], Equals, "keep order:false")
}
func (s *testSuiteWithCliBaseCharset) TestCharsetFeature(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustQuery("show charset").Check(testkit.Rows(
"ascii US ASCII ascii_bin 1",
"binary binary binary 1",
"gbk Chinese Internal Code Specification gbk_chinese_ci 2",
"latin1 Latin1 latin1_bin 1",
"utf8 UTF-8 Unicode utf8_bin 3",
"utf8mb4 UTF-8 Unicode utf8mb4_bin 4",
))
tk.MustQuery("show collation").Check(testkit.Rows(
"ascii_bin ascii 65 Yes Yes 1",
"binary binary 63 Yes Yes 1",
"gbk_bin gbk 87 Yes 1",
"gbk_chinese_ci gbk 28 Yes Yes 1",
"latin1_bin latin1 47 Yes Yes 1",
"utf8_bin utf8 83 Yes Yes 1",
"utf8_general_ci utf8 33 Yes 1",
"utf8_unicode_ci utf8 192 Yes 1",
"utf8mb4_bin utf8mb4 46 Yes Yes 1",
"utf8mb4_general_ci utf8mb4 45 Yes 1",
"utf8mb4_unicode_ci utf8mb4 224 Yes 1",
))
tk.MustExec("set names gbk;")
tk.MustQuery("select @@character_set_connection;").Check(testkit.Rows("gbk"))
tk.MustQuery("select @@collation_connection;").Check(testkit.Rows("gbk_chinese_ci"))
tk.MustExec("set @@character_set_client=gbk;")
tk.MustQuery("select @@character_set_client;").Check(testkit.Rows("gbk"))
tk.MustExec("set names utf8mb4;")
tk.MustExec("set @@character_set_connection=gbk;")
tk.MustQuery("select @@character_set_connection;").Check(testkit.Rows("gbk"))
tk.MustQuery("select @@collation_connection;").Check(testkit.Rows("gbk_chinese_ci"))
tk.MustGetErrCode("select _gbk 'a';", errno.ErrUnknownCharacterSet)
tk.MustExec("use test")
tk.MustExec("create table t1(a char(10) charset gbk);")
tk.MustExec("create table t2(a char(10) charset gbk collate gbk_bin);")
tk.MustExec("create table t3(a char(10)) charset gbk;")
tk.MustExec("alter table t3 add column b char(10) charset gbk;")
tk.MustQuery("show create table t3").Check(testkit.Rows("t3 CREATE TABLE `t3` (\n" +
" `a` char(10) DEFAULT NULL,\n" +
" `b` char(10) DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=gbk COLLATE=gbk_chinese_ci",
))
tk.MustExec("create table t4(a char(10));")
tk.MustExec("alter table t4 add column b char(10) charset gbk;")
tk.MustQuery("show create table t4").Check(testkit.Rows("t4 CREATE TABLE `t4` (\n" +
" `a` char(10) DEFAULT NULL,\n" +
" `b` char(10) CHARACTER SET gbk COLLATE gbk_chinese_ci DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin",
))
tk.MustExec("create database test_gbk charset gbk;")
tk.MustExec("use test_gbk")
tk.MustExec("create table t1(a char(10));")
tk.MustQuery("show create table t1").Check(testkit.Rows("t1 CREATE TABLE `t1` (\n" +
" `a` char(10) DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=gbk COLLATE=gbk_chinese_ci",
))
collate.SetNewCollationEnabledForTest(false)
tk.MustQuery("show charset").Check(testkit.Rows(
"ascii US ASCII ascii_bin 1",
"binary binary binary 1",
"gbk Chinese Internal Code Specification gbk_chinese_ci 2",
"latin1 Latin1 latin1_bin 1",
"utf8 UTF-8 Unicode utf8_bin 3",
"utf8mb4 UTF-8 Unicode utf8mb4_bin 4",
))
tk.MustQuery("show collation").Check(testkit.Rows(
"utf8mb4_bin utf8mb4 46 Yes Yes 1",
"latin1_bin latin1 47 Yes Yes 1",
"binary binary 63 Yes Yes 1",
"ascii_bin ascii 65 Yes Yes 1",
"utf8_bin utf8 83 Yes Yes 1",
))
}
func (s *testSuiteWithCliBaseCharset) TestCharsetFeatureCollation(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t" +
"(ascii_char char(10) character set ascii," +
"gbk_char char(10) character set gbk collate gbk_bin," +
"latin_char char(10) character set latin1," +
"utf8mb4_char char(10) character set utf8mb4)",
)
tk.MustExec("insert into t values ('a', 'a', 'a', 'a'), ('a', '啊', '€', 'ㅂ');")
tk.MustQuery("select collation(concat(ascii_char, gbk_char)) from t;").Check(testkit.Rows("gbk_bin", "gbk_bin"))
tk.MustQuery("select collation(concat(gbk_char, ascii_char)) from t;").Check(testkit.Rows("gbk_bin", "gbk_bin"))
tk.MustQuery("select collation(concat(utf8mb4_char, gbk_char)) from t;").Check(testkit.Rows("utf8mb4_bin", "utf8mb4_bin"))
tk.MustQuery("select collation(concat(gbk_char, utf8mb4_char)) from t;").Check(testkit.Rows("utf8mb4_bin", "utf8mb4_bin"))
tk.MustQuery("select collation(concat('啊', convert('啊' using gbk) collate gbk_bin));").Check(testkit.Rows("gbk_bin"))
tk.MustQuery("select collation(concat(_latin1 'a', convert('啊' using gbk) collate gbk_bin));").Check(testkit.Rows("gbk_bin"))
tk.MustGetErrCode("select collation(concat(latin_char, gbk_char)) from t;", mysql.ErrCantAggregate2collations)
tk.MustGetErrCode("select collation(concat(convert('€' using latin1), convert('啊' using gbk) collate gbk_bin));", mysql.ErrCantAggregate2collations)
tk.MustGetErrCode("select collation(concat(utf8mb4_char, gbk_char collate gbk_bin)) from t;", mysql.ErrCantAggregate2collations)
tk.MustGetErrCode("select collation(concat('ㅂ', convert('啊' using gbk) collate gbk_bin));", mysql.ErrCantAggregate2collations)
tk.MustGetErrCode("select collation(concat(ascii_char collate ascii_bin, gbk_char)) from t;", mysql.ErrCantAggregate2collations)
}
func (s *testSuiteWithCliBaseCharset) TestCharsetWithPrefixIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a char(20) charset gbk, b char(20) charset gbk, primary key (a(2)));")
tk.MustExec("insert into t values ('a', '中文'), ('中文', '中文'), ('一二三', '一二三'), ('b', '一二三');")
tk.MustQuery("select * from t").Check(testkit.Rows("a 中文", "中文 中文", "一二三 一二三", "b 一二三"))
tk.MustExec("drop table t")
tk.MustExec("create table t(a char(20) charset gbk, b char(20) charset gbk, unique index idx_a(a(2)));")
tk.MustExec("insert into t values ('a', '中文'), ('中文', '中文'), ('一二三', '一二三'), ('b', '一二三');")
tk.MustQuery("select * from t").Check(testkit.Rows("a 中文", "中文 中文", "一二三 一二三", "b 一二三"))
}
func (s *testSerialSuite2) TestIssue23567(c *C) {
tk := testkit.NewTestKit(c, s.store)
oriProbability := statistics.FeedbackProbability.Load()
statistics.FeedbackProbability.Store(1.0)
defer func() { statistics.FeedbackProbability.Store(oriProbability) }()
failpoint.Enable("github.com/pingcap/tidb/statistics/feedbackNoNDVCollect", `return("")`)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a bigint unsigned, b int, primary key(a))")
tk.MustExec("insert into t values (1, 1), (2, 2)")
tk.MustExec("analyze table t")
// The SQL should not panic.
tk.MustQuery("select count(distinct b) from t")
failpoint.Disable("github.com/pingcap/tidb/statistics/feedbackNoNDVCollect")
}
func (s *testSuite) TestSummaryFailedUpdate(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int as(-a))")
tk.MustExec("insert into t(a) values(1), (3), (7)")
sm := &mockSessionManager1{
PS: make([]*util.ProcessInfo, 0),
}
tk.Se.SetSessionManager(sm)
s.domain.ExpensiveQueryHandle().SetSessionManager(sm)
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionCancel
})
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("set @@tidb_mem_quota_query=1")
err := tk.ExecToErr("update t set t.a = t.a - 1 where t.a in (select a from t where a < 4)")
c.Assert(err, NotNil)
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=1000000000")
tk.MustQuery("select stmt_type from information_schema.statements_summary where digest_text = 'update `t` set `t` . `a` = `t` . `a` - ? where `t` . `a` in ( select `a` from `t` where `a` < ? )'").Check(testkit.Rows("Update"))
}
func (s *testSuite) TestOOMPanicAction(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key, b double);")
tk.MustExec("insert into t values (1,1)")
sm := &mockSessionManager1{
PS: make([]*util.ProcessInfo, 0),
}
tk.Se.SetSessionManager(sm)
s.domain.ExpensiveQueryHandle().SetSessionManager(sm)
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionCancel
})
tk.MustExec("set @@tidb_mem_quota_query=1;")
err := tk.QueryToErr("select sum(b) from t group by a;")
c.Assert(err, NotNil)
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
// Test insert from select oom panic.
tk.MustExec("drop table if exists t,t1")
tk.MustExec("create table t (a bigint);")
tk.MustExec("create table t1 (a bigint);")
tk.MustExec("set @@tidb_mem_quota_query=200;")
_, err = tk.Exec("insert into t1 values (1),(2),(3),(4),(5);")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
_, err = tk.Exec("replace into t1 values (1),(2),(3),(4),(5);")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=10000")
tk.MustExec("insert into t1 values (1),(2),(3),(4),(5);")
tk.MustExec("set @@tidb_mem_quota_query=10;")
_, err = tk.Exec("insert into t select a from t1 order by a desc;")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
_, err = tk.Exec("replace into t select a from t1 order by a desc;")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=10000")
tk.MustExec("insert into t values (1),(2),(3),(4),(5);")
// Set the memory quota to 244 to make this SQL panic during the DeleteExec
// instead of the TableReaderExec.
tk.MustExec("set @@tidb_mem_quota_query=244;")
_, err = tk.Exec("delete from t")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=10000;")
tk.MustExec("delete from t1")
tk.MustExec("insert into t1 values(1)")
tk.MustExec("insert into t values (1),(2),(3),(4),(5);")
tk.MustExec("set @@tidb_mem_quota_query=244;")
_, err = tk.Exec("delete t, t1 from t join t1 on t.a = t1.a")
c.Assert(err, NotNil)
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
tk.MustExec("set @@tidb_mem_quota_query=100000;")
tk.MustExec("truncate table t")
tk.MustExec("insert into t values(1),(2),(3)")
// set the memory to quota to make the SQL panic during UpdateExec instead
// of TableReader.
tk.MustExec("set @@tidb_mem_quota_query=244;")
_, err = tk.Exec("update t set a = 4")
c.Assert(err.Error(), Matches, "Out Of Memory Quota!.*")
}
type testRecoverTable struct {
store kv.Storage
dom *domain.Domain
cluster testutils.Cluster
cli *regionProperityClient
}
func (s *testRecoverTable) SetUpSuite(c *C) {
cli := ®ionProperityClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
s.cli = cli
var err error
s.store, err = mockstore.NewMockStore(
mockstore.WithClientHijacker(hijackClient),
mockstore.WithClusterInspector(func(c testutils.Cluster) {
mockstore.BootstrapWithSingleStore(c)
s.cluster = c
}),
)
c.Assert(err, IsNil)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
}
func (s *testRecoverTable) TearDownSuite(c *C) {
s.dom.Close()
s.store.Close()
}
func (s *testRecoverTable) TestRecoverTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func() {
err := failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange")
c.Assert(err, IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_recover")
tk.MustExec("use test_recover")
tk.MustExec("drop table if exists t_recover")
tk.MustExec("create table t_recover (a int);")
timeBeforeDrop, timeAfterDrop, safePointSQL, resetGC := testkit.MockGC(tk)
defer resetGC()
tk.MustExec("insert into t_recover values (1),(2),(3)")
tk.MustExec("drop table t_recover")
// if GC safe point is not exists in mysql.tidb
_, err := tk.Exec("recover table t_recover")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "can not get 'tikv_gc_safe_point'")
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// Should recover, and we can drop it straight away.
tk.MustExec("recover table t_recover")
tk.MustExec("drop table t_recover")
err = gcutil.EnableGC(tk.Se)
c.Assert(err, IsNil)
// recover job is before GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeAfterDrop))
_, err = tk.Exec("recover table t_recover")
c.Assert(err, NotNil)
c.Assert(strings.Contains(err.Error(), "Can't find dropped/truncated table 't_recover' in GC safe point"), Equals, true)
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// if there is a new table with the same name, should return failed.
tk.MustExec("create table t_recover (a int);")
_, err = tk.Exec("recover table t_recover")
c.Assert(err.Error(), Equals, infoschema.ErrTableExists.GenWithStackByArgs("t_recover").Error())
// drop the new table with the same name, then recover table.
tk.MustExec("rename table t_recover to t_recover2")
// do recover table.
tk.MustExec("recover table t_recover")
// check recover table meta and data record.
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (4),(5),(6)")
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_recover;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003"))
// recover table by none exits job.
_, err = tk.Exec(fmt.Sprintf("recover table by job %d", 10000000))
c.Assert(err, NotNil)
// Disable GC by manual first, then after recover table, the GC enable status should also be disabled.
err = gcutil.DisableGC(tk.Se)
c.Assert(err, IsNil)
tk.MustExec("delete from t_recover where a > 1")
tk.MustExec("drop table t_recover")
tk.MustExec("recover table t_recover")
// check recover table meta and data record.
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1"))
// check recover table autoID.
tk.MustExec("insert into t_recover values (7),(8),(9)")
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "7", "8", "9"))
// Recover truncate table.
tk.MustExec("truncate table t_recover")
tk.MustExec("rename table t_recover to t_recover_new")
tk.MustExec("recover table t_recover")
tk.MustExec("insert into t_recover values (10)")
tk.MustQuery("select * from t_recover;").Check(testkit.Rows("1", "7", "8", "9", "10"))
// Test for recover one table multiple time.
tk.MustExec("drop table t_recover")
tk.MustExec("flashback table t_recover to t_recover_tmp")
_, err = tk.Exec("recover table t_recover")
c.Assert(infoschema.ErrTableExists.Equal(err), IsTrue)
gcEnable, err := gcutil.CheckGCEnable(tk.Se)
c.Assert(err, IsNil)
c.Assert(gcEnable, Equals, false)
}
func (s *testRecoverTable) TestFlashbackTable(c *C) {
c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_flashback")
tk.MustExec("use test_flashback")
tk.MustExec("drop table if exists t_flashback")
tk.MustExec("create table t_flashback (a int);")
timeBeforeDrop, _, safePointSQL, resetGC := testkit.MockGC(tk)
defer resetGC()
// Set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// Set GC enable.
err := gcutil.EnableGC(tk.Se)
c.Assert(err, IsNil)
tk.MustExec("insert into t_flashback values (1),(2),(3)")
tk.MustExec("drop table t_flashback")
// Test flash table with not_exist_table_name name.
_, err = tk.Exec("flashback table t_not_exists")
c.Assert(err.Error(), Equals, "Can't find localTemporary/dropped/truncated table: t_not_exists in DDL history jobs")
// Test flashback table failed by there is already a new table with the same name.
// If there is a new table with the same name, should return failed.
tk.MustExec("create table t_flashback (a int);")
_, err = tk.Exec("flashback table t_flashback")
c.Assert(err.Error(), Equals, infoschema.ErrTableExists.GenWithStackByArgs("t_flashback").Error())
// Drop the new table with the same name, then flashback table.
tk.MustExec("rename table t_flashback to t_flashback_tmp")
// Test for flashback table.
tk.MustExec("flashback table t_flashback")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_flashback;").Check(testkit.Rows("1", "2", "3"))
// Check flashback table autoID.
tk.MustExec("insert into t_flashback values (4),(5),(6)")
tk.MustQuery("select * from t_flashback;").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// Check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_flashback;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003"))
// Test for flashback to new table.
tk.MustExec("drop table t_flashback")
tk.MustExec("create table t_flashback (a int);")
tk.MustExec("flashback table t_flashback to t_flashback2")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_flashback2;").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// Check flashback table autoID.
tk.MustExec("insert into t_flashback2 values (7),(8),(9)")
tk.MustQuery("select * from t_flashback2;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9"))
// Check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_flashback2;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003", "7 10001", "8 10002", "9 10003"))
// Test for flashback one table multiple time.
_, err = tk.Exec("flashback table t_flashback to t_flashback4")
c.Assert(infoschema.ErrTableExists.Equal(err), IsTrue)
// Test for flashback truncated table to new table.
tk.MustExec("truncate table t_flashback2")
tk.MustExec("flashback table t_flashback2 to t_flashback3")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_flashback3;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9"))
// Check flashback table autoID.
tk.MustExec("insert into t_flashback3 values (10),(11)")
tk.MustQuery("select * from t_flashback3;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"))
// Check rebase auto id.
tk.MustQuery("select a,_tidb_rowid from t_flashback3;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003", "7 10001", "8 10002", "9 10003", "10 15001", "11 15002"))
// Test for flashback drop partition table.
tk.MustExec("drop table if exists t_p_flashback")
tk.MustExec("create table t_p_flashback (a int) partition by hash(a) partitions 4;")
tk.MustExec("insert into t_p_flashback values (1),(2),(3)")
tk.MustExec("drop table t_p_flashback")
tk.MustExec("flashback table t_p_flashback")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_p_flashback order by a;").Check(testkit.Rows("1", "2", "3"))
// Check flashback table autoID.
tk.MustExec("insert into t_p_flashback values (4),(5)")
tk.MustQuery("select a,_tidb_rowid from t_p_flashback order by a;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002"))
// Test for flashback truncate partition table.
tk.MustExec("truncate table t_p_flashback")
tk.MustExec("flashback table t_p_flashback to t_p_flashback1")
// Check flashback table meta and data record.
tk.MustQuery("select * from t_p_flashback1 order by a;").Check(testkit.Rows("1", "2", "3", "4", "5"))
// Check flashback table autoID.
tk.MustExec("insert into t_p_flashback1 values (6)")
tk.MustQuery("select a,_tidb_rowid from t_p_flashback1 order by a;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 10001"))
tk.MustExec("drop database if exists Test2")
tk.MustExec("create database Test2")
tk.MustExec("use Test2")
tk.MustExec("create table t (a int);")
tk.MustExec("insert into t values (1),(2)")
tk.MustExec("drop table t")
tk.MustExec("flashback table t")
tk.MustQuery("select a from t order by a").Check(testkit.Rows("1", "2"))
tk.MustExec("drop table t")
tk.MustExec("drop database if exists Test3")
tk.MustExec("create database Test3")
tk.MustExec("use Test3")
tk.MustExec("create table t (a int);")
tk.MustExec("drop table t")
tk.MustExec("drop database Test3")
tk.MustExec("use Test2")
tk.MustExec("flashback table t")
tk.MustExec("insert into t values (3)")
tk.MustQuery("select a from t order by a").Check(testkit.Rows("1", "2", "3"))
}
func (s *testRecoverTable) TestRecoverTempTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("create database if not exists test_recover")
tk.MustExec("use test_recover")
tk.MustExec("drop table if exists t_recover")
tk.MustExec("create global temporary table t_recover (a int) on commit delete rows;")
tk.MustExec("use test_recover")
tk.MustExec("drop table if exists tmp2_recover")
tk.MustExec("create temporary table tmp2_recover (a int);")
timeBeforeDrop, _, safePointSQL, resetGC := testkit.MockGC(tk)
defer resetGC()
// Set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("drop table t_recover")
tk.MustGetErrCode("recover table t_recover;", errno.ErrUnsupportedDDLOperation)
tk.MustGetErrCode("flashback table t_recover;", errno.ErrUnsupportedDDLOperation)
tk.MustExec("drop table tmp2_recover")
tk.MustGetErrMsg("recover table tmp2_recover;", "Can't find localTemporary/dropped/truncated table: tmp2_recover in DDL history jobs")
tk.MustGetErrMsg("flashback table tmp2_recover;", "Can't find localTemporary/dropped/truncated table: tmp2_recover in DDL history jobs")
}
func (s *testSuiteP2) TestPointGetPreparedPlan(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists ps_text")
defer tk1.MustExec("drop database if exists ps_text")
tk1.MustExec("create database ps_text")
tk1.MustExec("use ps_text")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
pspk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where a = ?")
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[pspk1Id].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
pspk2Id, _, _, err := tk1.Se.PrepareStmt("select * from t where ? = a ")
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[pspk2Id].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
// unique index
psuk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where b = ? ")
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[psuk1Id].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// test schema changed, cached plan should be invalidated
tk1.MustExec("alter table t add column col4 int default 10 after c")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
tk1.MustExec("alter table t drop index k_b")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
tk1.MustExec(`insert into t values(4, 3, 3, 11)`)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10", "4 3 3 11"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
tk1.MustExec("delete from t where a = 4")
tk1.MustExec("alter table t add index k_b(b)")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, psuk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// use pk again
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk2Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(3)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("3 3 3 10"))
}
func (s *testSuiteP2) TestPointGetPreparedPlanWithCommitMode(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists ps_text")
defer tk1.MustExec("drop database if exists ps_text")
tk1.MustExec("create database ps_text")
tk1.MustExec("use ps_text")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
pspk1Id, _, _, err := tk1.Se.PrepareStmt("select * from t where a = ?")
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[pspk1Id].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(0)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(nil)
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
// try to exec using point get plan(this plan should not go short path)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// update rows
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use ps_text")
tk2.MustExec("update t set c = c + 10 where c = 1")
// try to point get again
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 1"))
// try to update in session 1
tk1.MustExec("update t set c = c + 10 where c = 1")
_, err = tk1.Exec("commit")
c.Assert(kv.ErrWriteConflict.Equal(err), IsTrue, Commentf("error: %s", err))
// verify
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(1)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("1 1 11"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, pspk1Id, []types.Datum{types.NewDatum(2)})
c.Assert(err, IsNil)
tk1.ResultSetToResult(rs, Commentf("%v", rs)).Check(testkit.Rows("2 2 2"))
tk2.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 11"))
}
func (s *testSuiteP2) TestPointUpdatePreparedPlan(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists pu_test")
defer tk1.MustExec("drop database if exists pu_test")
tk1.MustExec("create database pu_test")
tk1.MustExec("use pu_test")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
updateID1, pc, _, err := tk1.Se.PrepareStmt(`update t set c = c + 1 where a = ?`)
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[updateID1].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
c.Assert(pc, Equals, 1)
updateID2, pc, _, err := tk1.Se.PrepareStmt(`update t set c = c + 2 where ? = a`)
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[updateID2].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
c.Assert(pc, Equals, 1)
ctx := context.Background()
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 4"))
// using the generated plan but with different params
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
// updateID2
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID2, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 8"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID2, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 10"))
// unique index
updUkID1, _, _, err := tk1.Se.PrepareStmt(`update t set c = c + 10 where b = ?`)
c.Assert(err, IsNil)
tk1.Se.GetSessionVars().PreparedStmts[updUkID1].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 20"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 30"))
// test schema changed, cached plan should be invalidated
tk1.MustExec("alter table t add column col4 int default 10 after c")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 31 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 32 10"))
tk1.MustExec("alter table t drop index k_b")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 42 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 52 10"))
tk1.MustExec("alter table t add unique index k_b(b)")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 62 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updUkID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 72 10"))
tk1.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 1 10"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2 10"))
}
func (s *testSuiteP2) TestPointUpdatePreparedPlanWithCommitMode(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk1.MustExec("drop database if exists pu_test2")
defer tk1.MustExec("drop database if exists pu_test2")
tk1.MustExec("create database pu_test2")
tk1.MustExec("use pu_test2")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
ctx := context.Background()
updateID1, _, _, err := tk1.Se.PrepareStmt(`update t set c = c + 1 where a = ?`)
tk1.Se.GetSessionVars().PreparedStmts[updateID1].(*plannercore.CachedPrepareStmt).PreparedAst.UseCache = false
c.Assert(err, IsNil)
// first time plan generated
rs, err := tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 4"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
// next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
// try to exec using point get plan(this plan should not go short path)
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
// update rows
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use pu_test2")
tk2.MustExec(`prepare pu2 from "update t set c = c + 2 where ? = a "`)
tk2.MustExec("set @p3 = 3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 5"))
tk2.MustExec("execute pu2 using @p3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 7"))
tk2.MustExec("execute pu2 using @p3")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
// try to update in session 1
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 6"))
_, err = tk1.Exec("commit")
c.Assert(kv.ErrWriteConflict.Equal(err), IsTrue, Commentf("error: %s", err))
// verify
tk2.MustQuery("select * from t where a = 1").Check(testkit.Rows("1 1 1"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2"))
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
tk1.MustQuery("select * from t where a = 2").Check(testkit.Rows("2 2 2"))
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 9"))
// again next start a non autocommit txn
tk1.MustExec("set autocommit = 0")
tk1.MustExec("begin")
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 10"))
rs, err = tk1.Se.ExecutePreparedStmt(ctx, updateID1, []types.Datum{types.NewDatum(3)})
c.Assert(rs, IsNil)
c.Assert(err, IsNil)
tk1.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 11"))
tk1.MustExec("commit")
tk2.MustQuery("select * from t where a = 3").Check(testkit.Rows("3 3 11"))
}
func (s *testSuite1) TestPartitionHashCode(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec(`create table t(c1 bigint, c2 bigint, c3 bigint, primary key(c1))
partition by hash (c1) partitions 4;`)
wg := sync.WaitGroup{}
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
tk1 := testkit.NewTestKitWithInit(c, s.store)
for i := 0; i < 5; i++ {
tk1.MustExec("select * from t")
}
}()
}
wg.Wait()
}
func (s *testSuite1) TestAlterDefaultValue(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(a int, primary key(a))")
tk.MustExec("insert into t(a) values(1)")
tk.MustExec("alter table t add column b int default 1")
tk.MustExec("alter table t alter b set default 2")
tk.MustQuery("select b from t where a = 1").Check(testkit.Rows("1"))
}
type testClusterTableSuite struct {
testSuiteWithCliBase
rpcserver *grpc.Server
listenAddr string
}
func (s *testClusterTableSuite) SetUpSuite(c *C) {
s.testSuiteWithCliBase.SetUpSuite(c)
s.rpcserver, s.listenAddr = s.setUpRPCService(c, "127.0.0.1:0")
}
func (s *testClusterTableSuite) setUpRPCService(c *C, addr string) (*grpc.Server, string) {
sm := &mockSessionManager1{}
sm.PS = append(sm.PS, &util.ProcessInfo{
ID: 1,
User: "root",
Host: "127.0.0.1",
Command: mysql.ComQuery,
})
lis, err := net.Listen("tcp", addr)
c.Assert(err, IsNil)
srv := server.NewRPCServer(config.GetGlobalConfig(), s.dom, sm)
port := lis.Addr().(*net.TCPAddr).Port
addr = fmt.Sprintf("127.0.0.1:%d", port)
go func() {
err = srv.Serve(lis)
c.Assert(err, IsNil)
}()
config.UpdateGlobal(func(conf *config.Config) {
conf.Status.StatusPort = uint(port)
})
return srv, addr
}
func (s *testClusterTableSuite) TearDownSuite(c *C) {
if s.rpcserver != nil {
s.rpcserver.Stop()
s.rpcserver = nil
}
s.testSuiteWithCliBase.TearDownSuite(c)
}
func (s *testSuiteP1) TestPrepareLoadData(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustGetErrCode(`prepare stmt from "load data local infile '/tmp/load_data_test.csv' into table test";`, mysql.ErrUnsupportedPs)
}
func (s *testClusterTableSuite) TestSlowQuery(c *C) {
logData0 := ""
logData1 := `
# Time: 2020-02-15T18:00:01.000000+08:00
select 1;
# Time: 2020-02-15T19:00:05.000000+08:00
select 2;`
logData2 := `
# Time: 2020-02-16T18:00:01.000000+08:00
select 3;
# Time: 2020-02-16T18:00:05.000000+08:00
select 4;`
logData3 := `
# Time: 2020-02-16T19:00:00.000000+08:00
select 5;
# Time: 2020-02-17T18:00:05.000000+08:00
select 6;`
logData4 := `
# Time: 2020-05-14T19:03:54.314615176+08:00
select 7;`
logData := []string{logData0, logData1, logData2, logData3, logData4}
fileName0 := "tidb-slow-2020-02-14T19-04-05.01.log"
fileName1 := "tidb-slow-2020-02-15T19-04-05.01.log"
fileName2 := "tidb-slow-2020-02-16T19-04-05.01.log"
fileName3 := "tidb-slow-2020-02-17T18-00-05.01.log"
fileName4 := "tidb-slow.log"
fileNames := []string{fileName0, fileName1, fileName2, fileName3, fileName4}
prepareLogs(c, logData, fileNames)
defer func() {
removeFiles(fileNames)
}()
tk := testkit.NewTestKitWithInit(c, s.store)
loc, err := time.LoadLocation("Asia/Shanghai")
c.Assert(err, IsNil)
tk.Se.GetSessionVars().TimeZone = loc
tk.MustExec("use information_schema")
cases := []struct {
prepareSQL string
sql string
result []string
}{
{
sql: "select count(*),min(time),max(time) from %s where time > '2019-01-26 21:51:00' and time < now()",
result: []string{"7|2020-02-15 18:00:01.000000|2020-05-14 19:03:54.314615"},
},
{
sql: "select count(*),min(time),max(time) from %s where time > '2020-02-15 19:00:00' and time < '2020-02-16 18:00:02'",
result: []string{"2|2020-02-15 19:00:05.000000|2020-02-16 18:00:01.000000"},
},
{
sql: "select count(*),min(time),max(time) from %s where time > '2020-02-16 18:00:02' and time < '2020-02-17 17:00:00'",
result: []string{"2|2020-02-16 18:00:05.000000|2020-02-16 19:00:00.000000"},
},
{
sql: "select count(*),min(time),max(time) from %s where time > '2020-02-16 18:00:02' and time < '2020-02-17 20:00:00'",
result: []string{"3|2020-02-16 18:00:05.000000|2020-02-17 18:00:05.000000"},
},
{
sql: "select count(*),min(time),max(time) from %s",
result: []string{"1|2020-05-14 19:03:54.314615|2020-05-14 19:03:54.314615"},
},
{
sql: "select count(*),min(time) from %s where time > '2020-02-16 20:00:00'",
result: []string{"1|2020-02-17 18:00:05.000000"},
},
{
sql: "select count(*) from %s where time > '2020-02-17 20:00:00'",
result: []string{"0"},
},
{
sql: "select query from %s where time > '2019-01-26 21:51:00' and time < now()",
result: []string{"select 1;", "select 2;", "select 3;", "select 4;", "select 5;", "select 6;", "select 7;"},
},
// Test for different timezone.
{
prepareSQL: "set @@time_zone = '+00:00'",
sql: "select time from %s where time = '2020-02-17 10:00:05.000000'",
result: []string{"2020-02-17 10:00:05.000000"},
},
{
prepareSQL: "set @@time_zone = '+02:00'",
sql: "select time from %s where time = '2020-02-17 12:00:05.000000'",
result: []string{"2020-02-17 12:00:05.000000"},
},
// Test for issue 17224
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from %s where time = '2020-05-14 19:03:54.314615'",
result: []string{"2020-05-14 19:03:54.314615"},
},
}
for _, cas := range cases {
if len(cas.prepareSQL) > 0 {
tk.MustExec(cas.prepareSQL)
}
sql := fmt.Sprintf(cas.sql, "slow_query")
tk.MustQuery(sql).Check(testutil.RowsWithSep("|", cas.result...))
sql = fmt.Sprintf(cas.sql, "cluster_slow_query")
tk.MustQuery(sql).Check(testutil.RowsWithSep("|", cas.result...))
}
}
func (s *testClusterTableSuite) TestIssue20236(c *C) {
logData0 := ""
logData1 := `
# Time: 2020-02-15T18:00:01.000000+08:00
select 1;
# Time: 2020-02-15T19:00:05.000000+08:00
select 2;
# Time: 2020-02-15T20:00:05.000000+08:00`
logData2 := `select 3;
# Time: 2020-02-16T18:00:01.000000+08:00
select 4;
# Time: 2020-02-16T18:00:05.000000+08:00
select 5;`
logData3 := `
# Time: 2020-02-16T19:00:00.000000+08:00
select 6;
# Time: 2020-02-17T18:00:05.000000+08:00
select 7;
# Time: 2020-02-17T19:00:00.000000+08:00`
logData4 := `select 8;
# Time: 2020-02-17T20:00:00.000000+08:00
select 9
# Time: 2020-05-14T19:03:54.314615176+08:00
select 10;`
logData := []string{logData0, logData1, logData2, logData3, logData4}
fileName0 := "tidb-slow-2020-02-14T19-04-05.01.log"
fileName1 := "tidb-slow-2020-02-15T19-04-05.01.log"
fileName2 := "tidb-slow-2020-02-16T19-04-05.01.log"
fileName3 := "tidb-slow-2020-02-17T18-00-05.01.log"
fileName4 := "tidb-slow.log"
fileNames := []string{fileName0, fileName1, fileName2, fileName3, fileName4}
prepareLogs(c, logData, fileNames)
defer func() {
removeFiles(fileNames)
}()
tk := testkit.NewTestKitWithInit(c, s.store)
loc, err := time.LoadLocation("Asia/Shanghai")
c.Assert(err, IsNil)
tk.Se.GetSessionVars().TimeZone = loc
tk.MustExec("use information_schema")
cases := []struct {
prepareSQL string
sql string
result []string
}{
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000'",
result: []string{"2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000' order by time desc",
result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:01:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time",
result: []string{"2020-02-15 18:00:01.000000", "2020-02-15 19:00:05.000000", "2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:01:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time desc",
result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000", "2020-02-15 19:00:05.000000", "2020-02-15 18:00:01.000000"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select count(*) from cluster_slow_query where time > '2020-02-15 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc",
result: []string{"9"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select count(*) from cluster_slow_query where (time > '2020-02-16 18:00:00' and time < '2020-05-14 20:00:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-17 20:00:00')",
result: []string{"6"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select count(*) from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-02-17 20:00:00.000000' order by time desc",
result: []string{"5"},
},
{
prepareSQL: "set @@time_zone = '+08:00'",
sql: "select time from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc limit 3",
result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"},
},
}
for _, cas := range cases {
if len(cas.prepareSQL) > 0 {
tk.MustExec(cas.prepareSQL)
}
tk.MustQuery(cas.sql).Check(testutil.RowsWithSep("|", cas.result...))
}
}
func (s *testClusterTableSuite) TestSQLDigestTextRetriever(c *C) {
tkInit := testkit.NewTestKitWithInit(c, s.store)
tkInit.MustExec("set global tidb_enable_stmt_summary = 1")
tkInit.MustQuery("select @@global.tidb_enable_stmt_summary").Check(testkit.Rows("1"))
tkInit.MustExec("drop table if exists test_sql_digest_text_retriever")
tkInit.MustExec("create table test_sql_digest_text_retriever (id int primary key, v int)")
tk := testkit.NewTestKitWithInit(c, s.store)
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("insert into test_sql_digest_text_retriever values (1, 1)")
insertNormalized, insertDigest := parser.NormalizeDigest("insert into test_sql_digest_text_retriever values (1, 1)")
_, updateDigest := parser.NormalizeDigest("update test_sql_digest_text_retriever set v = v + 1 where id = 1")
r := &expression.SQLDigestTextRetriever{
SQLDigestsMap: map[string]string{
insertDigest.String(): "",
updateDigest.String(): "",
},
}
err := r.RetrieveLocal(context.Background(), tk.Se)
c.Assert(err, IsNil)
c.Assert(r.SQLDigestsMap[insertDigest.String()], Equals, insertNormalized)
c.Assert(r.SQLDigestsMap[updateDigest.String()], Equals, "")
}
func (s *testClusterTableSuite) TestFunctionDecodeSQLDigests(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("set global tidb_enable_stmt_summary = 1")
tk.MustQuery("select @@global.tidb_enable_stmt_summary").Check(testkit.Rows("1"))
tk.MustExec("drop table if exists test_func_decode_sql_digests")
tk.MustExec("create table test_func_decode_sql_digests(id int primary key, v int)")
q1 := "begin"
norm1, digest1 := parser.NormalizeDigest(q1)
q2 := "select @@tidb_current_ts"
norm2, digest2 := parser.NormalizeDigest(q2)
q3 := "select id, v from test_func_decode_sql_digests where id = 1 for update"
norm3, digest3 := parser.NormalizeDigest(q3)
// TIDB_DECODE_SQL_DIGESTS function doesn't actually do "decoding", instead it queries `statements_summary` and it's
// variations for the corresponding statements.
// Execute the statements so that the queries will be saved into statements_summary table.
tk.MustExec(q1)
// Save the ts to query the transaction from tidb_trx.
ts, err := strconv.ParseUint(tk.MustQuery(q2).Rows()[0][0].(string), 10, 64)
c.Assert(err, IsNil)
c.Assert(ts, Greater, uint64(0))
tk.MustExec(q3)
tk.MustExec("rollback")
// Test statements truncating.
decoded := fmt.Sprintf(`["%s","%s","%s"]`, norm1, norm2, norm3)
digests := fmt.Sprintf(`["%s","%s","%s"]`, digest1, digest2, digest3)
tk.MustQuery("select tidb_decode_sql_digests(?, 0)", digests).Check(testkit.Rows(decoded))
// The three queries are shorter than truncate length, equal to truncate length and longer than truncate length respectively.
tk.MustQuery("select tidb_decode_sql_digests(?, ?)", digests, len(norm2)).Check(testkit.Rows(
"[\"begin\",\"select @@tidb_current_ts\",\"select `id` , `v` from `...\"]"))
// Empty array.
tk.MustQuery("select tidb_decode_sql_digests('[]')").Check(testkit.Rows("[]"))
// NULL
tk.MustQuery("select tidb_decode_sql_digests(null)").Check(testkit.Rows("<nil>"))
// Array containing wrong types and not-existing digests (maps to null).
tk.MustQuery("select tidb_decode_sql_digests(?)", fmt.Sprintf(`["%s",1,null,"%s",{"a":1},[2],"%s","","abcde"]`, digest1, digest2, digest3)).
Check(testkit.Rows(fmt.Sprintf(`["%s",null,null,"%s",null,null,"%s",null,null]`, norm1, norm2, norm3)))
// Not JSON array (throws warnings)
tk.MustQuery(`select tidb_decode_sql_digests('{"a":1}')`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`show warnings`).Check(testkit.Rows(`Warning 1210 The argument can't be unmarshalled as JSON array: '{"a":1}'`))
tk.MustQuery(`select tidb_decode_sql_digests('aabbccdd')`).Check(testkit.Rows("<nil>"))
tk.MustQuery(`show warnings`).Check(testkit.Rows(`Warning 1210 The argument can't be unmarshalled as JSON array: 'aabbccdd'`))
// Invalid argument count.
tk.MustGetErrCode("select tidb_decode_sql_digests('a', 1, 2)", 1582)
tk.MustGetErrCode("select tidb_decode_sql_digests()", 1582)
}
func (s *testClusterTableSuite) TestFunctionDecodeSQLDigestsPrivilege(c *C) {
dropUserTk := testkit.NewTestKitWithInit(c, s.store)
c.Assert(dropUserTk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk := testkit.NewTestKitWithInit(c, s.store)
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("create user 'testuser'@'localhost'")
defer dropUserTk.MustExec("drop user 'testuser'@'localhost'")
c.Assert(tk.Se.Auth(&auth.UserIdentity{
Username: "testuser",
Hostname: "localhost",
}, nil, nil), IsTrue)
err := tk.ExecToErr("select tidb_decode_sql_digests('[\"aa\"]')")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[expression:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation")
tk = testkit.NewTestKitWithInit(c, s.store)
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("create user 'testuser2'@'localhost'")
defer dropUserTk.MustExec("drop user 'testuser2'@'localhost'")
tk.MustExec("grant process on *.* to 'testuser2'@'localhost'")
c.Assert(tk.Se.Auth(&auth.UserIdentity{
Username: "testuser2",
Hostname: "localhost",
}, nil, nil), IsTrue)
_ = tk.MustQuery("select tidb_decode_sql_digests('[\"aa\"]')")
}
func prepareLogs(c *C, logData []string, fileNames []string) {
writeFile := func(file string, data string) {
f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
c.Assert(err, IsNil)
_, err = f.Write([]byte(data))
c.Assert(f.Close(), IsNil)
c.Assert(err, IsNil)
}
for i, log := range logData {
writeFile(fileNames[i], log)
}
}
func removeFiles(fileNames []string) {
for _, fileName := range fileNames {
os.Remove(fileName)
}
}
func (s *testSuite1) TestIssue15718(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists tt;")
tk.MustExec("create table tt(a decimal(10, 0), b varchar(1), c time);")
tk.MustExec("insert into tt values(0, '2', null), (7, null, '1122'), (NULL, 'w', null), (NULL, '2', '3344'), (NULL, NULL, '0'), (7, 'f', '33');")
tk.MustQuery("select a and b as d, a or c as e from tt;").Check(testkit.Rows("0 <nil>", "<nil> 1", "0 <nil>", "<nil> 1", "<nil> <nil>", "0 1"))
tk.MustExec("drop table if exists tt;")
tk.MustExec("create table tt(a decimal(10, 0), b varchar(1), c time);")
tk.MustExec("insert into tt values(0, '2', '123'), (7, null, '1122'), (null, 'w', null);")
tk.MustQuery("select a and b as d, a, b from tt order by d limit 1;").Check(testkit.Rows("<nil> 7 <nil>"))
tk.MustQuery("select b or c as d, b, c from tt order by d limit 1;").Check(testkit.Rows("<nil> w <nil>"))
tk.MustExec("drop table if exists t0;")
tk.MustExec("CREATE TABLE t0(c0 FLOAT);")
tk.MustExec("INSERT INTO t0(c0) VALUES (NULL);")
tk.MustQuery("SELECT * FROM t0 WHERE NOT(0 OR t0.c0);").Check(testkit.Rows())
}
func (s *testSuite1) TestIssue15767(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists tt;")
tk.MustExec("create table t(a int, b char);")
tk.MustExec("insert into t values (1,'s'),(2,'b'),(1,'c'),(2,'e'),(1,'a');")
tk.MustExec("insert into t select * from t;")
tk.MustExec("insert into t select * from t;")
tk.MustExec("insert into t select * from t;")
tk.MustQuery("select b, count(*) from ( select b from t order by a limit 20 offset 2) as s group by b order by b;").Check(testkit.Rows("a 6", "c 7", "s 7"))
}
func (s *testSuite1) TestIssue16025(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t0;")
tk.MustExec("CREATE TABLE t0(c0 NUMERIC PRIMARY KEY);")
tk.MustExec("INSERT IGNORE INTO t0(c0) VALUES (NULL);")
tk.MustQuery("SELECT * FROM t0 WHERE c0;").Check(testkit.Rows())
}
func (s *testSuite1) TestIssue16854(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("CREATE TABLE `t` ( `a` enum('WAITING','PRINTED','STOCKUP','CHECKED','OUTSTOCK','PICKEDUP','WILLBACK','BACKED') DEFAULT NULL)")
tk.MustExec("insert into t values(1),(2),(3),(4),(5),(6),(7);")
for i := 0; i < 7; i++ {
tk.MustExec("insert into t select * from t;")
}
tk.MustExec("set @@tidb_max_chunk_size=100;")
tk.MustQuery("select distinct a from t order by a").Check(testkit.Rows("WAITING", "PRINTED", "STOCKUP", "CHECKED", "OUTSTOCK", "PICKEDUP", "WILLBACK"))
tk.MustExec("drop table t")
tk.MustExec("CREATE TABLE `t` ( `a` set('WAITING','PRINTED','STOCKUP','CHECKED','OUTSTOCK','PICKEDUP','WILLBACK','BACKED') DEFAULT NULL)")
tk.MustExec("insert into t values(1),(2),(3),(4),(5),(6),(7);")
for i := 0; i < 7; i++ {
tk.MustExec("insert into t select * from t;")
}
tk.MustExec("set @@tidb_max_chunk_size=100;")
tk.MustQuery("select distinct a from t order by a").Check(testkit.Rows("WAITING", "PRINTED", "WAITING,PRINTED", "STOCKUP", "WAITING,STOCKUP", "PRINTED,STOCKUP", "WAITING,PRINTED,STOCKUP"))
tk.MustExec("drop table t")
}
func (s *testSuite) TestIssue16921(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a float);")
tk.MustExec("create index a on t(a);")
tk.MustExec("insert into t values (1.0), (NULL), (0), (2.0);")
tk.MustQuery("select `a` from `t` use index (a) where !`a`;").Check(testkit.Rows("0"))
tk.MustQuery("select `a` from `t` ignore index (a) where !`a`;").Check(testkit.Rows("0"))
tk.MustQuery("select `a` from `t` use index (a) where `a`;").Check(testkit.Rows("1", "2"))
tk.MustQuery("select `a` from `t` ignore index (a) where `a`;").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t use index (a) where not a is true;").Check(testkit.Rows("<nil>", "0"))
tk.MustQuery("select a from t use index (a) where not not a is true;").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t use index (a) where not not a;").Check(testkit.Rows("1", "2"))
tk.MustQuery("select a from t use index (a) where not not not a is true;").Check(testkit.Rows("<nil>", "0"))
tk.MustQuery("select a from t use index (a) where not not not a;").Check(testkit.Rows("0"))
}
func (s *testSuite) TestIssue19100(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t1, t2;")
tk.MustExec("create table t1 (c decimal);")
tk.MustExec("create table t2 (c decimal, key(c));")
tk.MustExec("insert into t1 values (null);")
tk.MustExec("insert into t2 values (null);")
tk.MustQuery("select count(*) from t1 where not c;").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t2 where not c;").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t1 where c;").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from t2 where c;").Check(testkit.Rows("0"))
}
// this is from jira issue #5856
func (s *testSuite1) TestInsertValuesWithSubQuery(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2(a int, b int, c int)")
defer tk.MustExec("drop table if exists t2")
// should not reference upper scope
c.Assert(tk.ExecToErr("insert into t2 values (11, 8, (select not b))"), NotNil)
c.Assert(tk.ExecToErr("insert into t2 set a = 11, b = 8, c = (select b))"), NotNil)
// subquery reference target table is allowed
tk.MustExec("insert into t2 values(1, 1, (select b from t2))")
tk.MustQuery("select * from t2").Check(testkit.Rows("1 1 <nil>"))
tk.MustExec("insert into t2 set a = 1, b = 1, c = (select b+1 from t2)")
tk.MustQuery("select * from t2").Check(testkit.Rows("1 1 <nil>", "1 1 2"))
// insert using column should work normally
tk.MustExec("delete from t2")
tk.MustExec("insert into t2 values(2, 4, a)")
tk.MustQuery("select * from t2").Check(testkit.Rows("2 4 2"))
tk.MustExec("insert into t2 set a = 3, b = 5, c = b")
tk.MustQuery("select * from t2").Check(testkit.Rows("2 4 2", "3 5 5"))
// issue #30626
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int)")
// TODO: should insert success and get (81,1) from the table
err := tk.ExecToErr("insert into t values ( 81, ( select ( SELECT '1' AS `c0` WHERE '1' >= `subq_0`.`c0` ) as `c1` FROM ( SELECT '1' AS `c0` ) AS `subq_0` ) );")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Insert's SET operation or VALUES_LIST doesn't support complex subqueries now")
err = tk.ExecToErr("insert into t set a = 81, b = (select ( SELECT '1' AS `c0` WHERE '1' >= `subq_0`.`c0` ) as `c1` FROM ( SELECT '1' AS `c0` ) AS `subq_0` );")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Insert's SET operation or VALUES_LIST doesn't support complex subqueries now")
}
func (s *testSuite1) TestDIVZeroInPartitionExpr(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(a int) partition by range (10 div a) (partition p0 values less than (10), partition p1 values less than maxvalue)")
defer tk.MustExec("drop table if exists t1")
tk.MustExec("set @@sql_mode=''")
tk.MustExec("insert into t1 values (NULL), (0), (1)")
tk.MustExec("set @@sql_mode='STRICT_ALL_TABLES,ERROR_FOR_DIVISION_BY_ZERO'")
tk.MustGetErrCode("insert into t1 values (NULL), (0), (1)", mysql.ErrDivisionByZero)
}
func (s *testSuite1) TestInsertIntoGivenPartitionSet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec(`create table t1(
a int(11) DEFAULT NULL,
b varchar(10) DEFAULT NULL,
UNIQUE KEY idx_a (a)) PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB,
PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB,
PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB,
PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB)`)
defer tk.MustExec("drop table if exists t1")
// insert into
tk.MustExec("insert into t1 partition(p0) values(1, 'a'), (2, 'b')")
tk.MustQuery("select * from t1 partition(p0) order by a").Check(testkit.Rows("1 a", "2 b"))
tk.MustExec("insert into t1 partition(p0, p1) values(3, 'c'), (4, 'd')")
tk.MustQuery("select * from t1 partition(p1)").Check(testkit.Rows())
tk.MustGetErrMsg("insert into t1 values(1, 'a')", "[kv:1062]Duplicate entry '1' for key 'idx_a'")
tk.MustGetErrMsg("insert into t1 partition(p0, p_non_exist) values(1, 'a')", "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
tk.MustGetErrMsg("insert into t1 partition(p0, p1) values(40, 'a')", "[table:1748]Found a row not matching the given partition set")
// replace into
tk.MustExec("replace into t1 partition(p0) values(1, 'replace')")
tk.MustExec("replace into t1 partition(p0, p1) values(3, 'replace'), (4, 'replace')")
tk.MustExec("replace into t1 values(1, 'a')")
tk.MustQuery("select * from t1 partition (p0) order by a").Check(testkit.Rows("1 a", "2 b", "3 replace", "4 replace"))
tk.MustGetErrMsg("replace into t1 partition(p0, p_non_exist) values(1, 'a')", "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
tk.MustGetErrMsg("replace into t1 partition(p0, p1) values(40, 'a')", "[table:1748]Found a row not matching the given partition set")
tk.MustExec("truncate table t1")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b char(10))")
defer tk.MustExec("drop table if exists t")
// insert into general table
tk.MustGetErrMsg("insert into t partition(p0, p1) values(1, 'a')", "[planner:1747]PARTITION () clause on non partitioned table")
// insert into from select
tk.MustExec("insert into t values(1, 'a'), (2, 'b')")
tk.MustExec("insert into t1 partition(p0) select * from t")
tk.MustQuery("select * from t1 partition(p0) order by a").Check(testkit.Rows("1 a", "2 b"))
tk.MustExec("truncate table t")
tk.MustExec("insert into t values(3, 'c'), (4, 'd')")
tk.MustExec("insert into t1 partition(p0, p1) select * from t")
tk.MustQuery("select * from t1 partition(p1) order by a").Check(testkit.Rows())
tk.MustQuery("select * from t1 partition(p0) order by a").Check(testkit.Rows("1 a", "2 b", "3 c", "4 d"))
tk.MustGetErrMsg("insert into t1 select 1, 'a'", "[kv:1062]Duplicate entry '1' for key 'idx_a'")
tk.MustGetErrMsg("insert into t1 partition(p0, p_non_exist) select 1, 'a'", "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
tk.MustGetErrMsg("insert into t1 partition(p0, p1) select 40, 'a'", "[table:1748]Found a row not matching the given partition set")
// replace into from select
tk.MustExec("replace into t1 partition(p0) select 1, 'replace'")
tk.MustExec("truncate table t")
tk.MustExec("insert into t values(3, 'replace'), (4, 'replace')")
tk.MustExec("replace into t1 partition(p0, p1) select * from t")
tk.MustExec("replace into t1 select 1, 'a'")
tk.MustQuery("select * from t1 partition (p0) order by a").Check(testkit.Rows("1 a", "2 b", "3 replace", "4 replace"))
tk.MustGetErrMsg("replace into t1 partition(p0, p_non_exist) select 1, 'a'", "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
tk.MustGetErrMsg("replace into t1 partition(p0, p1) select 40, 'a'", "[table:1748]Found a row not matching the given partition set")
}
func (s *testSuite1) TestUpdateGivenPartitionSet(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1,t2,t3,t4")
tk.MustExec(`create table t1(
a int(11),
b varchar(10) DEFAULT NULL,
primary key idx_a (a)) PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB,
PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB,
PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB,
PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB)`)
tk.MustExec(`create table t2(
a int(11) DEFAULT NULL,
b varchar(10) DEFAULT NULL) PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB,
PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB,
PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB,
PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB)`)
tk.MustExec(`create table t3 (a int(11), b varchar(10) default null)`)
defer tk.MustExec("drop table if exists t1,t2,t3")
tk.MustExec("insert into t3 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd')")
err := tk.ExecToErr("update t3 partition(p0) set a = 40 where a = 2")
c.Assert(err.Error(), Equals, "[planner:1747]PARTITION () clause on non partitioned table")
// update with primary key change
tk.MustExec("insert into t1 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd')")
err = tk.ExecToErr("update t1 partition(p0, p1) set a = 40")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
err = tk.ExecToErr("update t1 partition(p0) set a = 40 where a = 2")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
// test non-exist partition.
err = tk.ExecToErr("update t1 partition (p0, p_non_exist) set a = 40")
c.Assert(err.Error(), Equals, "[table:1735]Unknown partition 'p_non_exist' in table 't1'")
// test join.
err = tk.ExecToErr("update t1 partition (p0), t3 set t1.a = 40 where t3.a = 2")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
tk.MustExec("update t1 partition(p0) set a = 3 where a = 2")
tk.MustExec("update t1 partition(p0, p3) set a = 33 where a = 1")
// update without partition change
tk.MustExec("insert into t2 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd')")
err = tk.ExecToErr("update t2 partition(p0, p1) set a = 40")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
err = tk.ExecToErr("update t2 partition(p0) set a = 40 where a = 2")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
tk.MustExec("update t2 partition(p0) set a = 3 where a = 2")
tk.MustExec("update t2 partition(p0, p3) set a = 33 where a = 1")
tk.MustExec("create table t4(a int primary key, b int) partition by hash(a) partitions 2")
tk.MustExec("insert into t4(a, b) values(1, 1),(2, 2),(3, 3);")
err = tk.ExecToErr("update t4 partition(p0) set a = 5 where a = 2")
c.Assert(err.Error(), Equals, "[table:1748]Found a row not matching the given partition set")
}
func (s *testSuiteP2) TestApplyCache(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int);")
tk.MustExec("insert into t values (1),(1),(1),(1),(1),(1),(1),(1),(1);")
tk.MustExec("analyze table t;")
result := tk.MustQuery("explain analyze SELECT count(1) FROM (SELECT (SELECT min(a) FROM t as t2 WHERE t2.a > t1.a) AS a from t as t1) t;")
c.Assert(result.Rows()[1][0], Equals, "└─Apply_39")
var (
ind int
flag bool
)
value := (result.Rows()[1][5]).(string)
for ind = 0; ind < len(value)-5; ind++ {
if value[ind:ind+5] == "cache" {
flag = true
break
}
}
c.Assert(flag, Equals, true)
c.Assert(value[ind:], Equals, "cache:ON, cacheHitRatio:88.889%")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int);")
tk.MustExec("insert into t values (1),(2),(3),(4),(5),(6),(7),(8),(9);")
tk.MustExec("analyze table t;")
result = tk.MustQuery("explain analyze SELECT count(1) FROM (SELECT (SELECT min(a) FROM t as t2 WHERE t2.a > t1.a) AS a from t as t1) t;")
c.Assert(result.Rows()[1][0], Equals, "└─Apply_39")
flag = false
value = (result.Rows()[1][5]).(string)
for ind = 0; ind < len(value)-5; ind++ {
if value[ind:ind+5] == "cache" {
flag = true
break
}
}
c.Assert(flag, Equals, true)
c.Assert(value[ind:], Equals, "cache:OFF")
}
// For issue 17256
func (s *testSuite) TestGenerateColumnReplace(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int as (a + 1) virtual not null, unique index idx(b));")
tk.MustExec("REPLACE INTO `t1` (`a`) VALUES (2);")
tk.MustExec("REPLACE INTO `t1` (`a`) VALUES (2);")
tk.MustQuery("select * from t1").Check(testkit.Rows("2 3"))
tk.MustExec("insert into `t1` (`a`) VALUES (2) on duplicate key update a = 3;")
tk.MustQuery("select * from t1").Check(testkit.Rows("3 4"))
}
func (s *testSlowQuery) TestSlowQueryWithoutSlowLog(c *C) {
tk := testkit.NewTestKit(c, s.store)
originCfg := config.GetGlobalConfig()
newCfg := *originCfg
newCfg.Log.SlowQueryFile = "tidb-slow-not-exist.log"
newCfg.Log.SlowThreshold = math.MaxUint64
config.StoreGlobalConfig(&newCfg)
defer func() {
config.StoreGlobalConfig(originCfg)
}()
tk.MustQuery("select query from information_schema.slow_query").Check(testkit.Rows())
tk.MustQuery("select query from information_schema.slow_query where time > '2020-09-15 12:16:39' and time < now()").Check(testkit.Rows())
}
func (s *testSlowQuery) TestSlowQuerySensitiveQuery(c *C) {
tk := testkit.NewTestKit(c, s.store)
originCfg := config.GetGlobalConfig()
newCfg := *originCfg
f, err := os.CreateTemp("", "tidb-slow-*.log")
c.Assert(err, IsNil)
f.Close()
newCfg.Log.SlowQueryFile = f.Name()
config.StoreGlobalConfig(&newCfg)
defer func() {
tk.MustExec("set tidb_slow_log_threshold=300;")
config.StoreGlobalConfig(originCfg)
err = os.Remove(newCfg.Log.SlowQueryFile)
c.Assert(err, IsNil)
}()
err = logutil.InitLogger(newCfg.Log.ToLogConfig())
c.Assert(err, IsNil)
tk.MustExec("set tidb_slow_log_threshold=0;")
tk.MustExec("drop user if exists user_sensitive;")
tk.MustExec("create user user_sensitive identified by '123456789';")
tk.MustExec("alter user 'user_sensitive'@'%' identified by 'abcdefg';")
tk.MustExec("set password for 'user_sensitive'@'%' = 'xyzuvw';")
tk.MustQuery("select query from `information_schema`.`slow_query` " +
"where (query like 'set password%' or query like 'create user%' or query like 'alter user%') " +
"and query like '%user_sensitive%' order by query;").
Check(testkit.Rows(
"alter user {user_sensitive@% password = ***};",
"create user {user_sensitive@% password = ***};",
"set password for user user_sensitive@%;",
))
}
func (s *testSlowQuery) TestSlowQueryPrepared(c *C) {
tk := testkit.NewTestKit(c, s.store)
originCfg := config.GetGlobalConfig()
newCfg := *originCfg
f, err := os.CreateTemp("", "tidb-slow-*.log")
c.Assert(err, IsNil)
f.Close()
newCfg.Log.SlowQueryFile = f.Name()
config.StoreGlobalConfig(&newCfg)
defer func() {
tk.MustExec("set tidb_slow_log_threshold=300;")
tk.MustExec("set tidb_redact_log=0;")
config.StoreGlobalConfig(originCfg)
os.Remove(newCfg.Log.SlowQueryFile)
}()
err = logutil.InitLogger(newCfg.Log.ToLogConfig())
c.Assert(err, IsNil)
tk.MustExec("set tidb_slow_log_threshold=0;")
tk.MustExec(`prepare mystmt1 from 'select sleep(?), 1';`)
tk.MustExec("SET @num = 0.01;")
tk.MustExec("execute mystmt1 using @num;")
tk.MustQuery("SELECT Query FROM `information_schema`.`slow_query` " +
"where query like 'select%sleep%' order by time desc limit 1").
Check(testkit.Rows(
"select sleep(?), 1 [arguments: 0.01];",
))
tk.MustExec("set tidb_redact_log=1;")
tk.MustExec(`prepare mystmt2 from 'select sleep(?), 2';`)
tk.MustExec("execute mystmt2 using @num;")
tk.MustQuery("SELECT Query FROM `information_schema`.`slow_query` " +
"where query like 'select%sleep%' order by time desc limit 1").
Check(testkit.Rows(
"select `sleep` ( ? ) , ?;",
))
}
func (s *testSlowQuery) TestLogSlowLogIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
f, err := os.CreateTemp("", "tidb-slow-*.log")
c.Assert(err, IsNil)
f.Close()
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.Log.SlowQueryFile = f.Name()
})
err = logutil.InitLogger(config.GetGlobalConfig().Log.ToLogConfig())
c.Assert(err, IsNil)
tk.MustExec("use test")
tk.MustExec("create table t (a int, b int,index idx(a));")
tk.MustExec("set tidb_slow_log_threshold=0;")
tk.MustQuery("select * from t use index (idx) where a in (1) union select * from t use index (idx) where a in (2,3);")
tk.MustExec("set tidb_slow_log_threshold=300;")
tk.MustQuery("select index_names from `information_schema`.`slow_query` " +
"where query like 'select%union%' limit 1").
Check(testkit.Rows(
"[t:idx]",
))
}
func (s *testSlowQuery) TestSlowQuery(c *C) {
tk := testkit.NewTestKit(c, s.store)
f, err := os.CreateTemp("", "tidb-slow-*.log")
c.Assert(err, IsNil)
_, err = f.WriteString(`
# Time: 2020-10-13T20:08:13.970563+08:00
select * from t;
# Time: 2020-10-16T20:08:13.970563+08:00
select * from t;
`)
c.Assert(err, IsNil)
err = f.Close()
c.Assert(err, IsNil)
executor.ParseSlowLogBatchSize = 1
originCfg := config.GetGlobalConfig()
newCfg := *originCfg
newCfg.Log.SlowQueryFile = f.Name()
config.StoreGlobalConfig(&newCfg)
defer func() {
executor.ParseSlowLogBatchSize = 64
config.StoreGlobalConfig(originCfg)
err = os.Remove(newCfg.Log.SlowQueryFile)
c.Assert(err, IsNil)
}()
err = logutil.InitLogger(newCfg.Log.ToLogConfig())
c.Assert(err, IsNil)
tk.MustQuery("select count(*) from `information_schema`.`slow_query` where time > '2020-10-16 20:08:13' and time < '2020-10-16 21:08:13'").Check(testkit.Rows("1"))
tk.MustQuery("select count(*) from `information_schema`.`slow_query` where time > '2019-10-13 20:08:13' and time < '2020-10-16 21:08:13'").Check(testkit.Rows("2"))
}
func (s *testSerialSuite) TestKillTableReader(c *C) {
var retry = "github.com/tikv/client-go/v2/locate/mockRetrySendReqToRegion"
defer func() {
c.Assert(failpoint.Disable(retry), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int)")
tk.MustExec("insert into t values (1),(2),(3)")
tk.MustExec("set @@tidb_distsql_scan_concurrency=1")
atomic.StoreUint32(&tk.Se.GetSessionVars().Killed, 0)
c.Assert(failpoint.Enable(retry, `return(true)`), IsNil)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(1 * time.Second)
err := tk.QueryToErr("select * from t")
c.Assert(err, NotNil)
c.Assert(int(terror.ToSQLError(errors.Cause(err).(*terror.Error)).Code), Equals, int(executor.ErrQueryInterrupted.Code()))
}()
atomic.StoreUint32(&tk.Se.GetSessionVars().Killed, 1)
wg.Wait()
}
func (s *testSerialSuite) TestPrevStmtDesensitization(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec(fmt.Sprintf("set @@session.%v=1", variable.TiDBRedactLog))
defer tk.MustExec(fmt.Sprintf("set @@session.%v=0", variable.TiDBRedactLog))
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, unique key (a))")
tk.MustExec("begin")
tk.MustExec("insert into t values (1),(2)")
c.Assert(tk.Se.GetSessionVars().PrevStmt.String(), Equals, "insert into `t` values ( ? ) , ( ? )")
c.Assert(tk.ExecToErr("insert into t values (1)").Error(), Equals, `[kv:1062]Duplicate entry '?' for key 'a'`)
}
func (s *testSuite) TestIssue19372(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1, t2;")
tk.MustExec("create table t1 (c_int int, c_str varchar(40), key(c_str));")
tk.MustExec("create table t2 like t1;")
tk.MustExec("insert into t1 values (1, 'a'), (2, 'b'), (3, 'c');")
tk.MustExec("insert into t2 select * from t1;")
tk.MustQuery("select (select t2.c_str from t2 where t2.c_str <= t1.c_str and t2.c_int in (1, 2) order by t2.c_str limit 1) x from t1 order by c_int;").Check(testkit.Rows("a", "a", "a"))
}
func (s *testSerialSuite1) TestCollectCopRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int)")
tk.MustExec("set tidb_enable_collect_execution_info=1;")
c.Assert(failpoint.Enable("tikvclient/tikvStoreRespResult", `return(true)`), IsNil)
rows := tk.MustQuery("explain analyze select * from t1").Rows()
c.Assert(len(rows), Equals, 2)
explain := fmt.Sprintf("%v", rows[0])
c.Assert(explain, Matches, ".*rpc_num: 2, .*regionMiss:.*")
c.Assert(failpoint.Disable("tikvclient/tikvStoreRespResult"), IsNil)
}
func (s *testSerialSuite1) TestIndexLookupRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int, index(a))")
tk.MustExec("insert into t1 values (1,2),(2,3),(3,4)")
sql := "explain analyze select * from t1 use index(a) where a > 1;"
rows := tk.MustQuery(sql).Rows()
c.Assert(len(rows), Equals, 3)
explain := fmt.Sprintf("%v", rows[0])
c.Assert(explain, Matches, ".*time:.*loops:.*index_task:.*table_task: {total_time.*num.*concurrency.*}.*")
indexExplain := fmt.Sprintf("%v", rows[1])
tableExplain := fmt.Sprintf("%v", rows[2])
c.Assert(indexExplain, Matches, ".*time:.*loops:.*cop_task:.*")
c.Assert(tableExplain, Matches, ".*time:.*loops:.*cop_task:.*")
}
func (s *testSerialSuite1) TestHashAggRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int)")
tk.MustExec("insert into t1 values (1,2),(2,3),(3,4)")
sql := "explain analyze SELECT /*+ HASH_AGG() */ count(*) FROM t1 WHERE a < 10;"
rows := tk.MustQuery(sql).Rows()
c.Assert(len(rows), Equals, 5)
explain := fmt.Sprintf("%v", rows[0])
c.Assert(explain, Matches, ".*time:.*loops:.*partial_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*final_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*")
}
func (s *testSerialSuite1) TestIndexMergeRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t1")
tk.MustExec("set @@tidb_enable_index_merge = 1")
tk.MustExec("create table t1(id int primary key, a int, b int, c int, d int)")
tk.MustExec("create index t1a on t1(a)")
tk.MustExec("create index t1b on t1(b)")
tk.MustExec("insert into t1 values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5)")
sql := "explain analyze select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4;"
rows := tk.MustQuery(sql).Rows()
c.Assert(len(rows), Equals, 4)
explain := fmt.Sprintf("%v", rows[0])
c.Assert(explain, Matches, ".*time:.*loops:.*index_task:{fetch_handle:.*, merge:.*}.*table_task:{num.*concurrency.*fetch_row.*wait_time.*}.*")
tableRangeExplain := fmt.Sprintf("%v", rows[1])
indexExplain := fmt.Sprintf("%v", rows[2])
tableExplain := fmt.Sprintf("%v", rows[3])
c.Assert(tableRangeExplain, Matches, ".*time:.*loops:.*cop_task:.*")
c.Assert(indexExplain, Matches, ".*time:.*loops:.*cop_task:.*")
c.Assert(tableExplain, Matches, ".*time:.*loops:.*cop_task:.*")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
sql = "select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4 order by a"
tk.MustQuery(sql).Check(testkit.Rows("1 1 1 1 1", "5 5 5 5 5"))
}
func (s *testSuite) TestCollectDMLRuntimeStats(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int, unique index (a))")
testSQLs := []string{
"insert ignore into t1 values (5,5);",
"insert into t1 values (5,5) on duplicate key update a=a+1;",
"replace into t1 values (5,6),(6,7)",
"update t1 set a=a+1 where a=6;",
}
getRootStats := func() string {
info := tk.Se.ShowProcess()
c.Assert(info, NotNil)
p, ok := info.Plan.(plannercore.Plan)
c.Assert(ok, IsTrue)
stats := tk.Se.GetSessionVars().StmtCtx.RuntimeStatsColl.GetRootStats(p.ID())
return stats.String()
}
for _, sql := range testSQLs {
tk.MustExec(sql)
c.Assert(getRootStats(), Matches, "time.*loops.*Get.*num_rpc.*total_time.*")
}
// Test for lock keys stats.
tk.MustExec("begin pessimistic")
tk.MustExec("update t1 set b=b+1")
c.Assert(getRootStats(), Matches, "time.*lock_keys.*time.* region.* keys.* lock_rpc:.* rpc_count.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustQuery("select * from t1 for update").Check(testkit.Rows("5 6", "7 7"))
c.Assert(getRootStats(), Matches, "time.*lock_keys.*time.* region.* keys.* lock_rpc:.* rpc_count.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("insert ignore into t1 values (9,9)")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prepare:.*, check_insert: {total_time:.*, mem_insert_time:.*, prefetch:.*, rpc:{BatchGet:{num_rpc:.*, total_time:.*}}}.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("insert into t1 values (10,10) on duplicate key update a=a+1")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prepare:.*, check_insert: {total_time:.*, mem_insert_time:.*, prefetch:.*, rpc:{BatchGet:{num_rpc:.*, total_time:.*}.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("insert into t1 values (1,2)")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prepare:.*, insert:.*")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("insert ignore into t1 values(11,11) on duplicate key update `a`=`a`+1")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prepare:.*, check_insert: {total_time:.*, mem_insert_time:.*, prefetch:.*, rpc:.*}")
tk.MustExec("rollback")
tk.MustExec("begin pessimistic")
tk.MustExec("replace into t1 values (1,4)")
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, prefetch:.*, rpc:.*")
tk.MustExec("rollback")
}
func (s *testSuite) TestIssue13758(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (pk int(11) primary key, a int(11) not null, b int(11), key idx_b(b), key idx_a(a))")
tk.MustExec("insert into `t1` values (1,1,0),(2,7,6),(3,2,null),(4,1,null),(5,4,5)")
tk.MustExec("create table t2 (a int)")
tk.MustExec("insert into t2 values (1),(null)")
tk.MustQuery("select (select a from t1 use index(idx_a) where b >= t2.a order by a limit 1) as field from t2").Check(testkit.Rows(
"4",
"<nil>",
))
}
func (s *testCoprCache) SetUpSuite(c *C) {
originConfig := config.GetGlobalConfig()
config.StoreGlobalConfig(config.NewConfig())
defer config.StoreGlobalConfig(originConfig)
cli := ®ionProperityClient{}
hijackClient := func(c tikv.Client) tikv.Client {
cli.Client = c
return cli
}
var err error
s.store, err = mockstore.NewMockStore(
mockstore.WithClusterInspector(func(c testutils.Cluster) {
mockstore.BootstrapWithSingleStore(c)
s.cls = c
}),
mockstore.WithClientHijacker(hijackClient),
)
c.Assert(err, IsNil)
s.dom, err = session.BootstrapSession(s.store)
c.Assert(err, IsNil)
}
func (s *testCoprCache) TearDownSuite(c *C) {
s.dom.Close()
s.store.Close()
}
func (s *testCoprCache) TestIntegrationCopCache(c *C) {
originConfig := config.GetGlobalConfig()
config.StoreGlobalConfig(config.NewConfig())
defer config.StoreGlobalConfig(originConfig)
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int primary key)")
tblInfo, err := s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tid := tblInfo.Meta().ID
tk.MustExec(`insert into t values(1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12)`)
tableStart := tablecodec.GenTableRecordPrefix(tid)
s.cls.SplitKeys(tableStart, tableStart.PrefixNext(), 6)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/cophandler/mockCopCacheInUnistore", `return(123)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/cophandler/mockCopCacheInUnistore"), IsNil)
}()
rows := tk.MustQuery("explain analyze select * from t where t.a < 10").Rows()
c.Assert(rows[0][2], Equals, "9")
c.Assert(strings.Contains(rows[0][5].(string), "cop_task: {num: 5"), Equals, true)
c.Assert(strings.Contains(rows[0][5].(string), "copr_cache_hit_ratio: 0.00"), Equals, true)
rows = tk.MustQuery("explain analyze select * from t").Rows()
c.Assert(rows[0][2], Equals, "12")
c.Assert(strings.Contains(rows[0][5].(string), "cop_task: {num: 6"), Equals, true)
hitRatioIdx := strings.Index(rows[0][5].(string), "copr_cache_hit_ratio:") + len("copr_cache_hit_ratio: ")
c.Assert(hitRatioIdx >= len("copr_cache_hit_ratio: "), Equals, true)
hitRatio, err := strconv.ParseFloat(rows[0][5].(string)[hitRatioIdx:hitRatioIdx+4], 64)
c.Assert(err, IsNil)
c.Assert(hitRatio > 0, Equals, true)
// Test for cop cache disabled.
cfg := config.NewConfig()
cfg.TiKVClient.CoprCache.CapacityMB = 0
config.StoreGlobalConfig(cfg)
rows = tk.MustQuery("explain analyze select * from t where t.a < 10").Rows()
c.Assert(rows[0][2], Equals, "9")
c.Assert(strings.Contains(rows[0][5].(string), "copr_cache: disabled"), Equals, true)
}
func (s *testSerialSuite) TestCoprocessorOOMTicase(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`set @@tidb_wait_split_region_finish=1`)
// create table for non keep-order case
tk.MustExec("drop table if exists t5")
tk.MustExec("create table t5(id int)")
tk.MustQuery(`split table t5 between (0) and (10000) regions 10`).Check(testkit.Rows("9 1"))
// create table for keep-order case
tk.MustExec("drop table if exists t6")
tk.MustExec("create table t6(id int, index(id))")
tk.MustQuery(`split table t6 between (0) and (10000) regions 10`).Check(testkit.Rows("10 1"))
tk.MustQuery("split table t6 INDEX id between (0) and (10000) regions 10;").Check(testkit.Rows("10 1"))
count := 10
for i := 0; i < count; i++ {
tk.MustExec(fmt.Sprintf("insert into t5 (id) values (%v)", i))
tk.MustExec(fmt.Sprintf("insert into t6 (id) values (%v)", i))
}
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionLog
})
testcases := []struct {
name string
sql string
}{
{
name: "keep Order",
sql: "select id from t6 order by id",
},
{
name: "non keep Order",
sql: "select id from t5",
},
}
f := func() {
for _, testcase := range testcases {
c.Log(testcase.name)
// larger than one copResponse, smaller than 2 copResponse
quota := 2*copr.MockResponseSizeForTest - 100
se, err := session.CreateSession4Test(s.store)
c.Check(err, IsNil)
tk.Se = se
tk.MustExec("use test")
tk.MustExec(fmt.Sprintf("set @@tidb_mem_quota_query=%v;", quota))
var expect []string
for i := 0; i < count; i++ {
expect = append(expect, fmt.Sprintf("%v", i))
}
tk.MustQuery(testcase.sql).Sort().Check(testkit.Rows(expect...))
// assert oom action worked by max consumed > memory quota
c.Assert(tk.Se.GetSessionVars().StmtCtx.MemTracker.MaxConsumed(), Greater, int64(quota))
se.Close()
}
}
// ticase-4169, trigger oom action twice after workers consuming all the data
err := failpoint.Enable("github.com/pingcap/tidb/store/copr/ticase-4169", `return(true)`)
c.Assert(err, IsNil)
f()
err = failpoint.Disable("github.com/pingcap/tidb/store/copr/ticase-4169")
c.Assert(err, IsNil)
// ticase-4170, trigger oom action twice after iterator receiving all the data.
err = failpoint.Enable("github.com/pingcap/tidb/store/copr/ticase-4170", `return(true)`)
c.Assert(err, IsNil)
f()
err = failpoint.Disable("github.com/pingcap/tidb/store/copr/ticase-4170")
c.Assert(err, IsNil)
// ticase-4171, trigger oom before reading or consuming any data
err = failpoint.Enable("github.com/pingcap/tidb/store/copr/ticase-4171", `return(true)`)
c.Assert(err, IsNil)
f()
err = failpoint.Disable("github.com/pingcap/tidb/store/copr/ticase-4171")
c.Assert(err, IsNil)
}
func (s *testSuite) TestIssue20237(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, s")
tk.MustExec("create table t(a date, b float)")
tk.MustExec("create table s(b float)")
tk.MustExec(`insert into t values(NULL,-37), ("2011-11-04",105), ("2013-03-02",-22), ("2006-07-02",-56), (NULL,124), (NULL,111), ("2018-03-03",-5);`)
tk.MustExec(`insert into s values(-37),(105),(-22),(-56),(124),(105),(111),(-5);`)
tk.MustQuery(`select count(distinct t.a, t.b) from t join s on t.b= s.b;`).Check(testkit.Rows("4"))
}
func (s *testSerialSuite) TestIssue19148(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a decimal(16, 2));")
tk.MustExec("select * from t where a > any_value(a);")
ctx := tk.Se.(sessionctx.Context)
is := domain.GetDomain(ctx).InfoSchema()
tblInfo, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
c.Assert(int(tblInfo.Meta().Columns[0].Flag), Equals, 0)
}
func (s *testSuite) TestIssue19667(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE t (a DATETIME)")
tk.MustExec("INSERT INTO t VALUES('1988-04-17 01:59:59')")
tk.MustQuery(`SELECT DATE_ADD(a, INTERVAL 1 SECOND) FROM t`).Check(testkit.Rows("1988-04-17 02:00:00"))
}
func issue20975Prepare(c *C, store kv.Storage) (*testkit.TestKit, *testkit.TestKit) {
tk1 := testkit.NewTestKit(c, store)
tk2 := testkit.NewTestKit(c, store)
tk1.MustExec("use test")
tk1.MustExec("drop table if exists t1, t2")
tk2.MustExec("use test")
tk1.MustExec("create table t1(id int primary key, c int)")
tk1.MustExec("insert into t1 values(1, 10), (2, 20)")
return tk1, tk2
}
func (s *testSuite) TestIssue20975UpdateNoChange(c *C) {
tk1, tk2 := issue20975Prepare(c, s.store)
tk1.MustExec("begin pessimistic")
tk1.MustExec("update t1 set c=c")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdate(c *C) {
tk1, tk2 := issue20975Prepare(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdatePointGet(c *C) {
tk1, tk2 := issue20975Prepare(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id=1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id=1 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdateBatchPointGet(c *C) {
tk1, tk2 := issue20975Prepare(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id in (1, 2) for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id in (1, 2) for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func issue20975PreparePartitionTable(c *C, store kv.Storage) (*testkit.TestKit, *testkit.TestKit) {
tk1 := testkit.NewTestKit(c, store)
tk2 := testkit.NewTestKit(c, store)
tk1.MustExec("use test")
tk1.MustExec("drop table if exists t1, t2")
tk2.MustExec("use test")
tk1.MustExec(`create table t1(id int primary key, c int) partition by range (id) (
partition p1 values less than (10),
partition p2 values less than (20)
)`)
tk1.MustExec("insert into t1 values(1, 10), (2, 20), (11, 30), (12, 40)")
return tk1, tk2
}
func (s *testSuite) TestIssue20975UpdateNoChangeWithPartitionTable(c *C) {
tk1, tk2 := issue20975PreparePartitionTable(c, s.store)
// Set projection concurrency to avoid data race here.
// TODO: remove this line after fixing https://github.com/pingcap/tidb/issues/25496
tk1.Se.GetSessionVars().Concurrency.SetProjectionConcurrency(0)
tk1.MustExec("begin pessimistic")
tk1.MustExec("update t1 set c=c")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdateWithPartitionTable(c *C) {
tk1, tk2 := issue20975PreparePartitionTable(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdatePointGetWithPartitionTable(c *C) {
tk1, tk2 := issue20975PreparePartitionTable(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id=1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id=12 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id=1 for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id=12 for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20975SelectForUpdateBatchPointGetWithPartitionTable(c *C) {
tk1, tk2 := issue20975PreparePartitionTable(c, s.store)
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id in (1, 2) for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id in (11, 12) for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
tk1.MustExec("begin")
tk1.MustExec("select * from t1 where id in (1, 11) for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id in (1, 2) for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id in (11, 12) for update")
tk2.MustExec("create table t2(a int)")
tk1.MustExec("commit")
tk1.MustExec("begin pessimistic")
tk1.MustExec("select * from t1 where id in (1, 11) for update")
tk2.MustExec("drop table t2")
tk1.MustExec("commit")
}
func (s *testSuite) TestIssue20305(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t2 (a year(4))")
tk.MustExec("insert into t2 values(69)")
tk.MustQuery("select * from t2 where a <= 69").Check(testkit.Rows("2069"))
// the following test is a regression test that matches MySQL's behavior.
tk.MustExec("drop table if exists t3")
tk.MustExec("CREATE TABLE `t3` (`y` year DEFAULT NULL, `a` int DEFAULT NULL)")
tk.MustExec("INSERT INTO `t3` VALUES (2069, 70), (2010, 11), (2155, 2156), (2069, 69)")
tk.MustQuery("SELECT * FROM `t3` where y <= a").Check(testkit.Rows("2155 2156"))
}
func (s *testSuite) TestIssue22817(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t3")
tk.MustExec("create table t3 (a year)")
tk.MustExec("insert into t3 values (1991), (\"1992\"), (\"93\"), (94)")
tk.MustQuery("select * from t3 where a >= NULL").Check(testkit.Rows())
}
func (s *testSuite) TestIssue13953(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("CREATE TABLE `t` (`id` int(11) DEFAULT NULL, `tp_bigint` bigint(20) DEFAULT NULL )")
tk.MustExec("insert into t values(0,1),(1,9215570218099803537)")
tk.MustQuery("select A.tp_bigint,B.id from t A join t B on A.id < B.id * 16 where A.tp_bigint = B.id;").Check(
testkit.Rows("1 1"))
}
func (s *testSuite) TestZeroDateTimeCompatibility(c *C) {
SQLs := []string{
`select YEAR(0000-00-00), YEAR("0000-00-00")`,
`select MONTH(0000-00-00), MONTH("0000-00-00")`,
`select DAYOFWEEK(0000-00-00), DAYOFWEEK("0000-00-00")`,
`select DAYOFMONTH(0000-00-00), DAYOFMONTH("0000-00-00")`,
`select DAYOFYEAR(0000-00-00), DAYOFYEAR("0000-00-00")`,
`select QUARTER(0000-00-00), QUARTER("0000-00-00")`,
`select EXTRACT(DAY FROM 0000-00-00), EXTRACT(DAY FROM "0000-00-00")`,
`select EXTRACT(MONTH FROM 0000-00-00), EXTRACT(MONTH FROM "0000-00-00")`,
`select EXTRACT(YEAR FROM 0000-00-00), EXTRACT(YEAR FROM "0000-00-00")`,
`select EXTRACT(WEEK FROM 0000-00-00), EXTRACT(WEEK FROM "0000-00-00")`,
`select EXTRACT(QUARTER FROM 0000-00-00), EXTRACT(QUARTER FROM "0000-00-00")`,
}
tk := testkit.NewTestKit(c, s.store)
for _, t := range SQLs {
fmt.Println(t)
tk.MustQuery(t).Check(testkit.Rows("0 <nil>"))
c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1))
}
}
// https://github.com/pingcap/tidb/issues/24165.
func (s *testSuite) TestInvalidDateValueInCreateTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
// Test for sql mode 'NO_ZERO_IN_DATE'.
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE';")
tk.MustGetErrCode("create table t (a datetime default '2999-00-00 00:00:00');", errno.ErrInvalidDefault)
tk.MustExec("create table t (a datetime);")
tk.MustGetErrCode("alter table t modify column a datetime default '2999-00-00 00:00:00';", errno.ErrInvalidDefault)
tk.MustExec("drop table if exists t;")
// Test for sql mode 'NO_ZERO_DATE'.
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,NO_ZERO_DATE';")
tk.MustGetErrCode("create table t (a datetime default '0000-00-00 00:00:00');", errno.ErrInvalidDefault)
tk.MustExec("create table t (a datetime);")
tk.MustGetErrCode("alter table t modify column a datetime default '0000-00-00 00:00:00';", errno.ErrInvalidDefault)
tk.MustExec("drop table if exists t;")
// Remove NO_ZERO_DATE and NO_ZERO_IN_DATE.
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES';")
// Test create table with zero datetime as a default value.
tk.MustExec("create table t (a datetime default '2999-00-00 00:00:00');")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a datetime default '0000-00-00 00:00:00');")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a datetime);")
tk.MustExec("alter table t modify column a datetime default '2999-00-00 00:00:00';")
tk.MustExec("alter table t modify column a datetime default '0000-00-00 00:00:00';")
tk.MustExec("drop table if exists t;")
// Test create table with invalid datetime(02-30) as a default value.
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES';")
tk.MustGetErrCode("create table t (a datetime default '2999-02-30 00:00:00');", errno.ErrInvalidDefault)
tk.MustExec("drop table if exists t;")
// NO_ZERO_IN_DATE and NO_ZERO_DATE have nothing to do with invalid datetime(02-30).
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE';")
tk.MustGetErrCode("create table t (a datetime default '2999-02-30 00:00:00');", errno.ErrInvalidDefault)
tk.MustExec("drop table if exists t;")
// ALLOW_INVALID_DATES allows invalid datetime(02-30).
tk.MustExec("set @@sql_mode='STRICT_TRANS_TABLES,ALLOW_INVALID_DATES';")
tk.MustExec("create table t (a datetime default '2999-02-30 00:00:00');")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a datetime);")
tk.MustExec("alter table t modify column a datetime default '2999-02-30 00:00:00';")
tk.MustExec("drop table if exists t;")
}
func (s *testSuite) TestOOMActionPriority(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t0")
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("drop table if exists t3")
tk.MustExec("drop table if exists t4")
tk.MustExec("create table t0(a int)")
tk.MustExec("insert into t0 values(1)")
tk.MustExec("create table t1(a int)")
tk.MustExec("insert into t1 values(1)")
tk.MustExec("create table t2(a int)")
tk.MustExec("insert into t2 values(1)")
tk.MustExec("create table t3(a int)")
tk.MustExec("insert into t3 values(1)")
tk.MustExec("create table t4(a int)")
tk.MustExec("insert into t4 values(1)")
tk.MustQuery("select * from t0 join t1 join t2 join t3 join t4 order by t0.a").Check(testkit.Rows("1 1 1 1 1"))
action := tk.Se.GetSessionVars().StmtCtx.MemTracker.GetFallbackForTest()
// check the first 5 actions is rate limit.
for i := 0; i < 5; i++ {
c.Assert(action.GetPriority(), Equals, int64(memory.DefRateLimitPriority))
action = action.GetFallback()
}
for action.GetFallback() != nil {
c.Assert(action.GetPriority(), Equals, int64(memory.DefSpillPriority))
action = action.GetFallback()
}
c.Assert(action.GetPriority(), Equals, int64(memory.DefLogPriority))
}
func (s *testSerialSuite) TestIssue21441(c *C) {
failpoint.Enable("github.com/pingcap/tidb/executor/issue21441", `return`)
defer failpoint.Disable("github.com/pingcap/tidb/executor/issue21441")
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
tk.MustExec(`insert into t values(1),(2),(3)`)
tk.Se.GetSessionVars().InitChunkSize = 1
tk.Se.GetSessionVars().MaxChunkSize = 1
sql := `
select a from t union all
select a from t union all
select a from t union all
select a from t union all
select a from t union all
select a from t union all
select a from t union all
select a from t`
tk.MustQuery(sql).Sort().Check(testkit.Rows(
"1", "1", "1", "1", "1", "1", "1", "1",
"2", "2", "2", "2", "2", "2", "2", "2",
"3", "3", "3", "3", "3", "3", "3", "3",
))
tk.MustQuery("select a from (" + sql + ") t order by a limit 4").Check(testkit.Rows("1", "1", "1", "1"))
tk.MustQuery("select a from (" + sql + ") t order by a limit 7, 4").Check(testkit.Rows("1", "2", "2", "2"))
tk.MustExec("set @@tidb_executor_concurrency = 2")
c.Assert(tk.Se.GetSessionVars().UnionConcurrency(), Equals, 2)
tk.MustQuery("select a from (" + sql + ") t order by a limit 4").Check(testkit.Rows("1", "1", "1", "1"))
tk.MustQuery("select a from (" + sql + ") t order by a limit 7, 4").Check(testkit.Rows("1", "2", "2", "2"))
}
func (s *testSuite) Test17780(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t0")
tk.MustExec("create table t0 (c0 double)")
tk.MustExec("insert into t0 values (1e30)")
tk.MustExec("update t0 set c0=0 where t0.c0 like 0")
// the update should not affect c0
tk.MustQuery("select count(*) from t0 where c0 = 0").Check(testkit.Rows("0"))
}
func (s *testSuite) TestIssue9918(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a year)")
tk.MustExec("insert into t values(0)")
tk.MustQuery("select cast(a as char) from t").Check(testkit.Rows("0000"))
}
func (s *testSuite) Test13004(c *C) {
tk := testkit.NewTestKit(c, s.store)
// see https://dev.mysql.com/doc/refman/5.6/en/date-and-time-literals.html, timestamp here actually produces a datetime
tk.MustQuery("SELECT TIMESTAMP '9999-01-01 00:00:00'").Check(testkit.Rows("9999-01-01 00:00:00"))
}
func (s *testSuite) Test12178(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists ta")
tk.MustExec("create table ta(id decimal(60,2))")
tk.MustExec("insert into ta values (JSON_EXTRACT('{\"c\": \"1234567890123456789012345678901234567890123456789012345\"}', '$.c'))")
tk.MustQuery("select * from ta").Check(testkit.Rows("1234567890123456789012345678901234567890123456789012345.00"))
}
func (s *testSuite) Test11883(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (f1 json)")
tk.MustExec("insert into t1(f1) values ('\"asd\"'),('\"asdf\"'),('\"asasas\"')")
tk.MustQuery("select f1 from t1 where json_extract(f1,\"$\") in (\"asd\",\"asasas\",\"asdf\")").Check(testkit.Rows("\"asd\"", "\"asdf\"", "\"asasas\""))
tk.MustQuery("select f1 from t1 where json_extract(f1, '$') = 'asd'").Check(testkit.Rows("\"asd\""))
// MySQL produces empty row for the following SQL, I doubt it should be MySQL's bug.
tk.MustQuery("select f1 from t1 where case json_extract(f1,\"$\") when \"asd\" then 1 else 0 end").Check(testkit.Rows("\"asd\""))
tk.MustExec("delete from t1")
tk.MustExec("insert into t1 values ('{\"a\": 1}')")
// the first value in the tuple should be interpreted as string instead of JSON, so no row will be returned
tk.MustQuery("select f1 from t1 where f1 in ('{\"a\": 1}', 'asdf', 'asdf')").Check(testkit.Rows())
// and if we explicitly cast it into a JSON value, the check will pass
tk.MustQuery("select f1 from t1 where f1 in (cast('{\"a\": 1}' as JSON), 'asdf', 'asdf')").Check(testkit.Rows("{\"a\": 1}"))
tk.MustQuery("select json_extract('\"asd\"', '$') = 'asd'").Check(testkit.Rows("1"))
tk.MustQuery("select json_extract('\"asd\"', '$') <=> 'asd'").Check(testkit.Rows("1"))
tk.MustQuery("select json_extract('\"asd\"', '$') <> 'asd'").Check(testkit.Rows("0"))
tk.MustQuery("select json_extract('{\"f\": 1.0}', '$.f') = 1.0").Check(testkit.Rows("1"))
tk.MustQuery("select json_extract('{\"f\": 1.0}', '$.f') = '1.0'").Check(testkit.Rows("0"))
tk.MustQuery("select json_extract('{\"n\": 1}', '$') = '{\"n\": 1}'").Check(testkit.Rows("0"))
tk.MustQuery("select json_extract('{\"n\": 1}', '$') <> '{\"n\": 1}'").Check(testkit.Rows("1"))
}
func (s *testSuite) Test15492(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int)")
tk.MustExec("insert into t values (2, 20), (1, 10), (3, 30)")
tk.MustQuery("select a + 1 as field1, a as field2 from t order by field1, field2 limit 2").Check(testkit.Rows("2 1", "3 2"))
}
func (s testSuite) TestTrackAggMemoryUsage(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
tk.MustExec("insert into t values(1)")
tk.MustExec("set tidb_track_aggregate_memory_usage = off;")
rows := tk.MustQuery("explain analyze select /*+ HASH_AGG() */ sum(a) from t").Rows()
c.Assert(rows[0][7], Equals, "N/A")
rows = tk.MustQuery("explain analyze select /*+ STREAM_AGG() */ sum(a) from t").Rows()
c.Assert(rows[0][7], Equals, "N/A")
tk.MustExec("set tidb_track_aggregate_memory_usage = on;")
rows = tk.MustQuery("explain analyze select /*+ HASH_AGG() */ sum(a) from t").Rows()
c.Assert(rows[0][7], Not(Equals), "N/A")
rows = tk.MustQuery("explain analyze select /*+ STREAM_AGG() */ sum(a) from t").Rows()
c.Assert(rows[0][7], Not(Equals), "N/A")
}
func (s *testSuite) Test12201(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists e")
tk.MustExec("create table e (e enum('a', 'b'))")
tk.MustExec("insert into e values ('a'), ('b')")
tk.MustQuery("select * from e where case 1 when 0 then e end").Check(testkit.Rows())
tk.MustQuery("select * from e where case 1 when 1 then e end").Check(testkit.Rows("a", "b"))
tk.MustQuery("select * from e where case e when 1 then e end").Check(testkit.Rows("a"))
tk.MustQuery("select * from e where case 1 when e then e end").Check(testkit.Rows("a"))
}
func (s *testSuite) TestIssue21451(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (en enum('c', 'b', 'a'));")
tk.MustExec("insert into t values ('a'), ('b'), ('c');")
tk.MustQuery("select max(en) from t;").Check(testkit.Rows("c"))
tk.MustQuery("select min(en) from t;").Check(testkit.Rows("a"))
tk.MustQuery("select * from t order by en;").Check(testkit.Rows("c", "b", "a"))
tk.MustExec("drop table t")
tk.MustExec("create table t(s set('c', 'b', 'a'));")
tk.MustExec("insert into t values ('a'), ('b'), ('c');")
tk.MustQuery("select max(s) from t;").Check(testkit.Rows("c"))
tk.MustQuery("select min(s) from t;").Check(testkit.Rows("a"))
tk.MustExec("drop table t")
tk.MustExec("create table t(id int, en enum('c', 'b', 'a'))")
tk.MustExec("insert into t values (1, 'a'),(2, 'b'), (3, 'c'), (1, 'c');")
tk.MustQuery("select id, max(en) from t where id=1 group by id;").Check(testkit.Rows("1 c"))
tk.MustQuery("select id, min(en) from t where id=1 group by id;").Check(testkit.Rows("1 a"))
tk.MustExec("drop table t")
tk.MustExec("create table t(id int, s set('c', 'b', 'a'));")
tk.MustExec("insert into t values (1, 'a'),(2, 'b'), (3, 'c'), (1, 'c');")
tk.MustQuery("select id, max(s) from t where id=1 group by id;").Check(testkit.Rows("1 c"))
tk.MustQuery("select id, min(s) from t where id=1 group by id;").Check(testkit.Rows("1 a"))
tk.MustExec("drop table t")
tk.MustExec("create table t(e enum('e','d','c','b','a'))")
tk.MustExec("insert into t values ('e'),('d'),('c'),('b'),('a');")
tk.MustQuery("select * from t order by e limit 1;").Check(testkit.Rows("e"))
tk.MustExec("drop table t")
tk.MustExec("create table t(s set('e', 'd', 'c', 'b', 'a'))")
tk.MustExec("insert into t values ('e'),('d'),('c'),('b'),('a');")
tk.MustQuery("select * from t order by s limit 1;").Check(testkit.Rows("e"))
tk.MustExec("drop table t")
}
func (s *testSuite) TestIssue15563(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery("select distinct 0.7544678906163867 / 0.68234634;").Check(testkit.Rows("1.10569639842486251190"))
}
func (s *testSuite) TestIssue22231(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_issue_22231")
tk.MustExec("create table t_issue_22231(a datetime)")
tk.MustExec("insert into t_issue_22231 values('2020--05-20 01:22:12')")
tk.MustQuery("select * from t_issue_22231 where a >= '2020-05-13 00:00:00 00:00:00' and a <= '2020-05-28 23:59:59 00:00:00'").Check(testkit.Rows("2020-05-20 01:22:12"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect datetime value: '2020-05-13 00:00:00 00:00:00'", "Warning 1292 Truncated incorrect datetime value: '2020-05-28 23:59:59 00:00:00'"))
tk.MustQuery("select cast('2020-10-22 10:31-10:12' as datetime)").Check(testkit.Rows("2020-10-22 10:31:10"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect datetime value: '2020-10-22 10:31-10:12'"))
tk.MustQuery("select cast('2020-05-28 23:59:59 00:00:00' as datetime)").Check(testkit.Rows("2020-05-28 23:59:59"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect datetime value: '2020-05-28 23:59:59 00:00:00'"))
tk.MustExec("drop table if exists t_issue_22231")
}
func (s *testSuite) TestIssue22201(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustQuery("SELECT HEX(WEIGHT_STRING('ab' AS BINARY(1000000000000000000)));").Check(testkit.Rows("<nil>"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1301 Result of cast_as_binary() was larger than max_allowed_packet (67108864) - truncated"))
tk.MustQuery("SELECT HEX(WEIGHT_STRING('ab' AS char(1000000000000000000)));").Check(testkit.Rows("<nil>"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1301 Result of weight_string() was larger than max_allowed_packet (67108864) - truncated"))
}
func (s *testSuiteP1) TestIssue22941(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists m, mp")
tk.MustExec(`CREATE TABLE m (
mid varchar(50) NOT NULL,
ParentId varchar(50) DEFAULT NULL,
PRIMARY KEY (mid),
KEY ind_bm_parent (ParentId,mid)
)`)
// mp should have more columns than m
tk.MustExec(`CREATE TABLE mp (
mpid bigint(20) unsigned NOT NULL DEFAULT '0',
mid varchar(50) DEFAULT NULL COMMENT '模块主键',
sid int,
PRIMARY KEY (mpid)
);`)
tk.MustExec(`insert into mp values("1","1","0");`)
tk.MustExec(`insert into m values("0", "0");`)
rs := tk.MustQuery(`SELECT ( SELECT COUNT(1) FROM m WHERE ParentId = c.mid ) expand, bmp.mpid, bmp.mpid IS NULL,bmp.mpid IS NOT NULL, sid FROM m c LEFT JOIN mp bmp ON c.mid = bmp.mid WHERE c.ParentId = '0'`)
rs.Check(testkit.Rows("1 <nil> 1 0 <nil>"))
rs = tk.MustQuery(`SELECT bmp.mpid, bmp.mpid IS NULL,bmp.mpid IS NOT NULL FROM m c LEFT JOIN mp bmp ON c.mid = bmp.mid WHERE c.ParentId = '0'`)
rs.Check(testkit.Rows("<nil> 1 0"))
}
func (s *testSerialSuite) TestTxnWriteThroughputSLI(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int key, b int)")
c.Assert(failpoint.Enable("github.com/pingcap/tidb/util/sli/CheckTxnWriteThroughput", "return(true)"), IsNil)
defer func() {
err := failpoint.Disable("github.com/pingcap/tidb/util/sli/CheckTxnWriteThroughput")
c.Assert(err, IsNil)
}()
mustExec := func(sql string) {
tk.MustExec(sql)
tk.Se.GetTxnWriteThroughputSLI().FinishExecuteStmt(time.Second, tk.Se.AffectedRows(), tk.Se.GetSessionVars().InTxn())
}
errExec := func(sql string) {
_, err := tk.Exec(sql)
c.Assert(err, NotNil)
tk.Se.GetTxnWriteThroughputSLI().FinishExecuteStmt(time.Second, tk.Se.AffectedRows(), tk.Se.GetSessionVars().InTxn())
}
// Test insert in small txn
mustExec("insert into t values (1,3),(2,4)")
writeSLI := tk.Se.GetTxnWriteThroughputSLI()
c.Assert(writeSLI.IsInvalid(), Equals, false)
c.Assert(writeSLI.IsSmallTxn(), Equals, true)
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 2, writeSize: 58, readKeys: 0, writeKeys: 2, writeTime: 1s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test insert ... select ... from
mustExec("insert into t select b, a from t")
c.Assert(writeSLI.IsInvalid(), Equals, true)
c.Assert(writeSLI.IsSmallTxn(), Equals, true)
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: true, affectRow: 2, writeSize: 58, readKeys: 0, writeKeys: 2, writeTime: 1s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test for delete
mustExec("delete from t")
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 4, writeSize: 76, readKeys: 0, writeKeys: 4, writeTime: 1s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test insert not in small txn
mustExec("begin")
for i := 0; i < 20; i++ {
mustExec(fmt.Sprintf("insert into t values (%v,%v)", i, i))
c.Assert(writeSLI.IsSmallTxn(), Equals, true)
}
// The statement which affect rows is 0 shouldn't record into time.
mustExec("select count(*) from t")
mustExec("select * from t")
mustExec("insert into t values (20,20)")
c.Assert(writeSLI.IsSmallTxn(), Equals, false)
mustExec("commit")
c.Assert(writeSLI.IsInvalid(), Equals, false)
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 21, writeSize: 609, readKeys: 0, writeKeys: 21, writeTime: 22s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test invalid when transaction has replace ... select ... from ... statement.
mustExec("delete from t")
tk.Se.GetTxnWriteThroughputSLI().Reset()
mustExec("begin")
mustExec("insert into t values (1,3),(2,4)")
mustExec("replace into t select b, a from t")
mustExec("commit")
c.Assert(writeSLI.IsInvalid(), Equals, true)
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: true, affectRow: 4, writeSize: 116, readKeys: 0, writeKeys: 4, writeTime: 3s")
tk.Se.GetTxnWriteThroughputSLI().Reset()
// Test clean last failed transaction information.
err := failpoint.Disable("github.com/pingcap/tidb/util/sli/CheckTxnWriteThroughput")
c.Assert(err, IsNil)
mustExec("begin")
mustExec("insert into t values (1,3),(2,4)")
errExec("commit")
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 0, writeSize: 0, readKeys: 0, writeKeys: 0, writeTime: 0s")
c.Assert(failpoint.Enable("github.com/pingcap/tidb/util/sli/CheckTxnWriteThroughput", "return(true)"), IsNil)
mustExec("begin")
mustExec("insert into t values (5, 6)")
mustExec("commit")
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 1, writeSize: 29, readKeys: 0, writeKeys: 1, writeTime: 2s")
// Test for reset
tk.Se.GetTxnWriteThroughputSLI().Reset()
c.Assert(tk.Se.GetTxnWriteThroughputSLI().String(), Equals, "invalid: false, affectRow: 0, writeSize: 0, readKeys: 0, writeKeys: 0, writeTime: 0s")
}
func (s *testSuite) TestIssue23993(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
// Real cast to time should return NULL
tk.MustExec("drop table if exists t_issue_23993")
tk.MustExec("create table t_issue_23993(a double)")
tk.MustExec("insert into t_issue_23993 values(-790822912)")
tk.MustQuery("select cast(a as time) from t_issue_23993").Check(testkit.Rows("<nil>"))
tk.MustQuery("select a from t_issue_23993 where cast(a as time)").Check(testkit.Rows())
// Int cast to time should return NULL
tk.MustExec("drop table if exists t_issue_23993")
tk.MustExec("create table t_issue_23993(a int)")
tk.MustExec("insert into t_issue_23993 values(-790822912)")
tk.MustQuery("select cast(a as time) from t_issue_23993").Check(testkit.Rows("<nil>"))
tk.MustQuery("select a from t_issue_23993 where cast(a as time)").Check(testkit.Rows())
// Decimal cast to time should return NULL
tk.MustExec("drop table if exists t_issue_23993")
tk.MustExec("create table t_issue_23993(a decimal)")
tk.MustExec("insert into t_issue_23993 values(-790822912)")
tk.MustQuery("select cast(a as time) from t_issue_23993").Check(testkit.Rows("<nil>"))
tk.MustQuery("select a from t_issue_23993 where cast(a as time)").Check(testkit.Rows())
// String cast to time should not return NULL
tk.MustExec("drop table if exists t_issue_23993")
tk.MustExec("create table t_issue_23993(a varchar(255))")
tk.MustExec("insert into t_issue_23993 values('-790822912')")
tk.MustQuery("select cast(a as time) from t_issue_23993").Check(testkit.Rows("-838:59:59"))
tk.MustQuery("select a from t_issue_23993 where cast(a as time)").Check(testkit.Rows("-790822912"))
}
func (s *testSuiteP2) TestProjectionBitType(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) clustered);")
tk.MustExec("create table t1(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) nonclustered);")
tk.MustExec("insert into t(k1) select 1;")
tk.MustExec("insert into t1(k1) select 1;")
tk.MustExec("set @@tidb_enable_vectorized_expression = 0;")
// following SQL should returns same result
tk.MustQuery("(select * from t where false) union(select * from t for update);").Check(testkit.Rows("1 \x01\xd5\xe4\xcf\u007f"))
tk.MustQuery("(select * from t1 where false) union(select * from t1 for update);").Check(testkit.Rows("1 \x01\xd5\xe4\xcf\u007f"))
tk.MustExec("set @@tidb_enable_vectorized_expression = 1;")
tk.MustQuery("(select * from t where false) union(select * from t for update);").Check(testkit.Rows("1 \x01\xd5\xe4\xcf\u007f"))
tk.MustQuery("(select * from t1 where false) union(select * from t1 for update);").Check(testkit.Rows("1 \x01\xd5\xe4\xcf\u007f"))
}
func (s *testSuite) TestIssue23609(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t1")
tk.MustExec("CREATE TABLE `t1` (\n `a` timestamp NULL DEFAULT NULL,\n `b` year(4) DEFAULT NULL,\n KEY `a` (`a`),\n KEY `b` (`b`)\n)")
tk.MustExec("insert into t1 values(\"2002-10-03 04:28:53\",2000), (\"2002-10-03 04:28:53\",2002), (NULL, 2002)")
tk.MustQuery("select /*+ inl_join (x,y) */ * from t1 x cross join t1 y on x.a=y.b").Check(testkit.Rows())
tk.MustQuery("select * from t1 x cross join t1 y on x.a>y.b order by x.a, x.b, y.a, y.b").Check(testkit.Rows("2002-10-03 04:28:53 2000 <nil> 2002", "2002-10-03 04:28:53 2000 2002-10-03 04:28:53 2000", "2002-10-03 04:28:53 2000 2002-10-03 04:28:53 2002", "2002-10-03 04:28:53 2002 <nil> 2002", "2002-10-03 04:28:53 2002 2002-10-03 04:28:53 2000", "2002-10-03 04:28:53 2002 2002-10-03 04:28:53 2002"))
tk.MustQuery("select * from t1 where a = b").Check(testkit.Rows())
tk.MustQuery("select * from t1 where a < b").Check(testkit.Rows())
c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(0))
}
func (s *testSuite1) TestIssue24091(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test;")
tk.MustExec("drop table if exists t;")
defer tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int) partition by hash (a div 0) partitions 10;")
tk.MustExec("insert into t values (NULL);")
tk.MustQuery("select null div 0;").Check(testkit.Rows("<nil>"))
tk.MustQuery("select * from t;").Check(testkit.Rows("<nil>"))
}
func (s *testSerialSuite) TestIssue24210(c *C) {
tk := testkit.NewTestKit(c, s.store)
// for ProjectionExec
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockProjectionExecBaseExecutorOpenReturnedError", `return(true)`), IsNil)
_, err := tk.Exec("select a from (select 1 as a, 2 as b) t")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "mock ProjectionExec.baseExecutor.Open returned error")
err = failpoint.Disable("github.com/pingcap/tidb/executor/mockProjectionExecBaseExecutorOpenReturnedError")
c.Assert(err, IsNil)
// for HashAggExec
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockHashAggExecBaseExecutorOpenReturnedError", `return(true)`), IsNil)
_, err = tk.Exec("select sum(a) from (select 1 as a, 2 as b) t group by b")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "mock HashAggExec.baseExecutor.Open returned error")
err = failpoint.Disable("github.com/pingcap/tidb/executor/mockHashAggExecBaseExecutorOpenReturnedError")
c.Assert(err, IsNil)
// for StreamAggExec
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockStreamAggExecBaseExecutorOpenReturnedError", `return(true)`), IsNil)
_, err = tk.Exec("select sum(a) from (select 1 as a, 2 as b) t")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "mock StreamAggExec.baseExecutor.Open returned error")
err = failpoint.Disable("github.com/pingcap/tidb/executor/mockStreamAggExecBaseExecutorOpenReturnedError")
c.Assert(err, IsNil)
// for SelectionExec
c.Assert(failpoint.Enable("github.com/pingcap/tidb/executor/mockSelectionExecBaseExecutorOpenReturnedError", `return(true)`), IsNil)
_, err = tk.Exec("select * from (select rand() as a) t where a > 0")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "mock SelectionExec.baseExecutor.Open returned error")
err = failpoint.Disable("github.com/pingcap/tidb/executor/mockSelectionExecBaseExecutorOpenReturnedError")
c.Assert(err, IsNil)
}
func (s *testSerialSuite) TestDeadlocksTable(c *C) {
deadlockhistory.GlobalDeadlockHistory.Clear()
deadlockhistory.GlobalDeadlockHistory.Resize(10)
occurTime := time.Date(2021, 5, 10, 1, 2, 3, 456789000, time.Local)
rec := &deadlockhistory.DeadlockRecord{
OccurTime: occurTime,
IsRetryable: false,
WaitChain: []deadlockhistory.WaitChainItem{
{
TryLockTxn: 101,
SQLDigest: "aabbccdd",
Key: []byte("k1"),
AllSQLDigests: nil,
TxnHoldingLock: 102,
},
{
TryLockTxn: 102,
SQLDigest: "ddccbbaa",
Key: []byte("k2"),
AllSQLDigests: []string{"sql1"},
TxnHoldingLock: 101,
},
},
}
deadlockhistory.GlobalDeadlockHistory.Push(rec)
occurTime2 := time.Date(2022, 6, 11, 2, 3, 4, 987654000, time.Local)
rec2 := &deadlockhistory.DeadlockRecord{
OccurTime: occurTime2,
IsRetryable: true,
WaitChain: []deadlockhistory.WaitChainItem{
{
TryLockTxn: 201,
AllSQLDigests: []string{},
TxnHoldingLock: 202,
},
{
TryLockTxn: 202,
AllSQLDigests: []string{"sql1", "sql2, sql3"},
TxnHoldingLock: 203,
},
{
TryLockTxn: 203,
TxnHoldingLock: 201,
},
},
}
deadlockhistory.GlobalDeadlockHistory.Push(rec2)
// `Push` sets the record's ID, and ID in a single DeadlockHistory is monotonically increasing. We must get it here
// to know what it is.
id1 := strconv.FormatUint(rec.ID, 10)
id2 := strconv.FormatUint(rec2.ID, 10)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/expression/sqlDigestRetrieverSkipRetrieveGlobal", "return"), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/expression/sqlDigestRetrieverSkipRetrieveGlobal"), IsNil)
}()
tk := testkit.NewTestKit(c, s.store)
tk.MustQuery("select * from information_schema.deadlocks").Check(
testutil.RowsWithSep("/",
id1+"/2021-05-10 01:02:03.456789/0/101/aabbccdd/<nil>/6B31/<nil>/102",
id1+"/2021-05-10 01:02:03.456789/0/102/ddccbbaa/<nil>/6B32/<nil>/101",
id2+"/2022-06-11 02:03:04.987654/1/201/<nil>/<nil>/<nil>/<nil>/202",
id2+"/2022-06-11 02:03:04.987654/1/202/<nil>/<nil>/<nil>/<nil>/203",
id2+"/2022-06-11 02:03:04.987654/1/203/<nil>/<nil>/<nil>/<nil>/201",
))
}
func (s testSerialSuite) TestExprBlackListForEnum(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a enum('a','b','c'), b enum('a','b','c'), c int, index idx(b,a));")
tk.MustExec("insert into t values(1,1,1),(2,2,2),(3,3,3);")
checkFuncPushDown := func(rows [][]interface{}, keyWord string) bool {
for _, line := range rows {
// Agg/Expr push down
if line[2].(string) == "cop[tikv]" && strings.Contains(line[4].(string), keyWord) {
return true
}
// access index
if line[2].(string) == "cop[tikv]" && strings.Contains(line[3].(string), keyWord) {
return true
}
}
return false
}
// Test agg(enum) push down
tk.MustExec("insert into mysql.expr_pushdown_blacklist(name) values('enum');")
tk.MustExec("admin reload expr_pushdown_blacklist;")
rows := tk.MustQuery("desc format='brief' select /*+ HASH_AGG() */ max(a) from t;").Rows()
c.Assert(checkFuncPushDown(rows, "max"), IsFalse)
rows = tk.MustQuery("desc format='brief' select /*+ STREAM_AGG() */ max(a) from t;").Rows()
c.Assert(checkFuncPushDown(rows, "max"), IsFalse)
tk.MustExec("delete from mysql.expr_pushdown_blacklist;")
tk.MustExec("admin reload expr_pushdown_blacklist;")
rows = tk.MustQuery("desc format='brief' select /*+ HASH_AGG() */ max(a) from t;").Rows()
c.Assert(checkFuncPushDown(rows, "max"), IsTrue)
rows = tk.MustQuery("desc format='brief' select /*+ STREAM_AGG() */ max(a) from t;").Rows()
c.Assert(checkFuncPushDown(rows, "max"), IsTrue)
// Test expr(enum) push down
tk.MustExec("insert into mysql.expr_pushdown_blacklist(name) values('enum');")
tk.MustExec("admin reload expr_pushdown_blacklist;")
rows = tk.MustQuery("desc format='brief' select * from t where a + b;").Rows()
c.Assert(checkFuncPushDown(rows, "plus"), IsFalse)
rows = tk.MustQuery("desc format='brief' select * from t where a + b;").Rows()
c.Assert(checkFuncPushDown(rows, "plus"), IsFalse)
tk.MustExec("delete from mysql.expr_pushdown_blacklist;")
tk.MustExec("admin reload expr_pushdown_blacklist;")
rows = tk.MustQuery("desc format='brief' select * from t where a + b;").Rows()
c.Assert(checkFuncPushDown(rows, "plus"), IsTrue)
rows = tk.MustQuery("desc format='brief' select * from t where a + b;").Rows()
c.Assert(checkFuncPushDown(rows, "plus"), IsTrue)
// Test enum index
tk.MustExec("insert into mysql.expr_pushdown_blacklist(name) values('enum');")
tk.MustExec("admin reload expr_pushdown_blacklist;")
rows = tk.MustQuery("desc format='brief' select * from t where b = 1;").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b)"), IsFalse)
rows = tk.MustQuery("desc format='brief' select * from t where b = 'a';").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b)"), IsFalse)
rows = tk.MustQuery("desc format='brief' select * from t where b > 1;").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b)"), IsFalse)
rows = tk.MustQuery("desc format='brief' select * from t where b > 'a';").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b)"), IsFalse)
tk.MustExec("delete from mysql.expr_pushdown_blacklist;")
tk.MustExec("admin reload expr_pushdown_blacklist;")
rows = tk.MustQuery("desc format='brief' select * from t where b = 1 and a = 1;").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b, a)"), IsTrue)
rows = tk.MustQuery("desc format='brief' select * from t where b = 'a' and a = 'a';").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b, a)"), IsTrue)
rows = tk.MustQuery("desc format='brief' select * from t where b = 1 and a > 1;").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b, a)"), IsTrue)
rows = tk.MustQuery("desc format='brief' select * from t where b = 1 and a > 'a'").Rows()
c.Assert(checkFuncPushDown(rows, "index:idx(b, a)"), IsTrue)
}
func (s *testResourceTagSuite) TestResourceGroupTag(c *C) {
if israce.RaceEnabled {
c.Skip("unstable, skip it and fix it before 20210622")
}
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int, b int, unique index idx(a));")
tbInfo := testGetTableByName(c, tk.Se, "test", "t")
// Enable Top SQL
topsqlstate.GlobalState.Enable.Store(true)
config.UpdateGlobal(func(conf *config.Config) {
conf.TopSQL.ReceiverAddress = "mock-agent"
})
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/unistoreRPCClientSendHook", `return(true)`), IsNil)
defer failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/unistoreRPCClientSendHook")
var sqlDigest, planDigest *parser.Digest
var tagLabel tipb.ResourceGroupTagLabel
checkFn := func() {}
unistore.UnistoreRPCClientSendHook = func(req *tikvrpc.Request) {
var startKey []byte
var ctx *kvrpcpb.Context
switch req.Type {
case tikvrpc.CmdGet:
request := req.Get()
startKey = request.Key
ctx = request.Context
case tikvrpc.CmdBatchGet:
request := req.BatchGet()
startKey = request.Keys[0]
ctx = request.Context
case tikvrpc.CmdPrewrite:
request := req.Prewrite()
startKey = request.Mutations[0].Key
ctx = request.Context
case tikvrpc.CmdCommit:
request := req.Commit()
startKey = request.Keys[0]
ctx = request.Context
case tikvrpc.CmdCop:
request := req.Cop()
startKey = request.Ranges[0].Start
ctx = request.Context
case tikvrpc.CmdPessimisticLock:
request := req.PessimisticLock()
startKey = request.PrimaryLock
ctx = request.Context
}
tid := tablecodec.DecodeTableID(startKey)
if tid != tbInfo.Meta().ID {
return
}
if ctx == nil {
return
}
tag := &tipb.ResourceGroupTag{}
err := tag.Unmarshal(ctx.ResourceGroupTag)
c.Assert(err, IsNil)
sqlDigest = parser.NewDigest(tag.SqlDigest)
planDigest = parser.NewDigest(tag.PlanDigest)
tagLabel = *tag.Label
checkFn()
}
resetVars := func() {
sqlDigest = parser.NewDigest(nil)
planDigest = parser.NewDigest(nil)
}
cases := []struct {
sql string
tagLabels map[tipb.ResourceGroupTagLabel]struct{}
ignore bool
}{
{
sql: "insert into t values(1,1),(2,2),(3,3)",
tagLabels: map[tipb.ResourceGroupTagLabel]struct{}{
tipb.ResourceGroupTagLabel_ResourceGroupTagLabelIndex: {},
},
},
{
sql: "select * from t use index (idx) where a=1",
tagLabels: map[tipb.ResourceGroupTagLabel]struct{}{
tipb.ResourceGroupTagLabel_ResourceGroupTagLabelRow: {},
tipb.ResourceGroupTagLabel_ResourceGroupTagLabelIndex: {},
},
},
{
sql: "select * from t use index (idx) where a in (1,2,3)",
tagLabels: map[tipb.ResourceGroupTagLabel]struct{}{
tipb.ResourceGroupTagLabel_ResourceGroupTagLabelRow: {},
tipb.ResourceGroupTagLabel_ResourceGroupTagLabelIndex: {},
},
},
{
sql: "select * from t use index (idx) where a>1",
tagLabels: map[tipb.ResourceGroupTagLabel]struct{}{
tipb.ResourceGroupTagLabel_ResourceGroupTagLabelRow: {},
tipb.ResourceGroupTagLabel_ResourceGroupTagLabelIndex: {},
},
},
{
sql: "select * from t where b>1",
tagLabels: map[tipb.ResourceGroupTagLabel]struct{}{
tipb.ResourceGroupTagLabel_ResourceGroupTagLabelRow: {},
},
},
{
sql: "select a from t use index (idx) where a>1",
tagLabels: map[tipb.ResourceGroupTagLabel]struct{}{
tipb.ResourceGroupTagLabel_ResourceGroupTagLabelIndex: {},
},
},
{
sql: "begin pessimistic",
ignore: true,
},
{
sql: "insert into t values(4,4)",
tagLabels: map[tipb.ResourceGroupTagLabel]struct{}{
tipb.ResourceGroupTagLabel_ResourceGroupTagLabelRow: {},
tipb.ResourceGroupTagLabel_ResourceGroupTagLabelIndex: {},
},
},
{
sql: "commit",
ignore: true,
},
{
sql: "update t set a=5,b=5 where a=5",
tagLabels: map[tipb.ResourceGroupTagLabel]struct{}{
tipb.ResourceGroupTagLabel_ResourceGroupTagLabelIndex: {},
},
},
{
sql: "replace into t values(6,6)",
tagLabels: map[tipb.ResourceGroupTagLabel]struct{}{
tipb.ResourceGroupTagLabel_ResourceGroupTagLabelIndex: {},
},
},
}
for _, ca := range cases {
resetVars()
commentf := Commentf("%v", ca.sql)
_, expectSQLDigest := parser.NormalizeDigest(ca.sql)
var expectPlanDigest *parser.Digest
checkCnt := 0
checkFn = func() {
if ca.ignore {
return
}
if expectPlanDigest == nil {
info := tk.Se.ShowProcess()
c.Assert(info, NotNil)
p, ok := info.Plan.(plannercore.Plan)
c.Assert(ok, IsTrue)
_, expectPlanDigest = plannercore.NormalizePlan(p)
}
c.Assert(sqlDigest.String(), Equals, expectSQLDigest.String(), commentf)
c.Assert(planDigest.String(), Equals, expectPlanDigest.String())
_, ok := ca.tagLabels[tagLabel]
c.Assert(ok, Equals, true)
checkCnt++
}
if strings.HasPrefix(ca.sql, "select") {
tk.MustQuery(ca.sql)
} else {
tk.MustExec(ca.sql)
}
if ca.ignore {
continue
}
c.Assert(checkCnt > 0, IsTrue, commentf)
}
}
func (s *testSuite) TestIssue24933(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t;")
tk.MustExec("drop view if exists v;")
tk.MustExec("create table t(a int);")
tk.MustExec("insert into t values(1), (2), (3);")
tk.MustExec("create definer='root'@'localhost' view v as select count(*) as c1 from t;")
rows := tk.MustQuery("select * from v;")
rows.Check(testkit.Rows("3"))
// Test subquery and outer field is wildcard.
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select count(*) from t) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("3"))
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select avg(a) from t group by a) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("1.0000", "2.0000", "3.0000"))
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select sum(a) from t group by a) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("1", "2", "3"))
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select group_concat(a) from t group by a) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("1", "2", "3"))
// Test alias names.
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select count(0) as c1 from t) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("3"))
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select count(*) as c1 from t) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("3"))
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select group_concat(a) as `concat(a)` from t group by a) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("1", "2", "3"))
// Test firstrow.
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select a from t group by a) s;")
rows = tk.MustQuery("select * from v order by 1;")
rows.Check(testkit.Rows("1", "2", "3"))
// Test direct select.
err := tk.ExecToErr("SELECT `s`.`count(a)` FROM (SELECT COUNT(`a`) FROM `test`.`t`) AS `s`")
c.Assert(err.Error(), Equals, "[planner:1054]Unknown column 's.count(a)' in 'field list'")
tk.MustExec("drop view v;")
tk.MustExec("create definer='root'@'localhost' view v as select * from (select count(a) from t) s;")
rows = tk.MustQuery("select * from v")
rows.Check(testkit.Rows("3"))
// Test window function.
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(c1 int);")
tk.MustExec("insert into t values(111), (222), (333);")
tk.MustExec("drop view if exists v;")
tk.MustExec("create definer='root'@'localhost' view v as (select * from (select row_number() over (order by c1) from t) s);")
rows = tk.MustQuery("select * from v;")
rows.Check(testkit.Rows("1", "2", "3"))
tk.MustExec("drop view if exists v;")
tk.MustExec("create definer='root'@'localhost' view v as (select * from (select c1, row_number() over (order by c1) from t) s);")
rows = tk.MustQuery("select * from v;")
rows.Check(testkit.Rows("111 1", "222 2", "333 3"))
// Test simple expr.
tk.MustExec("drop view if exists v;")
tk.MustExec("create definer='root'@'localhost' view v as (select * from (select c1 or 0 from t) s)")
rows = tk.MustQuery("select * from v;")
rows.Check(testkit.Rows("1", "1", "1"))
rows = tk.MustQuery("select `c1 or 0` from v;")
rows.Check(testkit.Rows("1", "1", "1"))
tk.MustExec("drop view v;")
}
func (s *testStaleTxnSuite) TestInvalidReadTemporaryTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
// For mocktikv, safe point is not initialized, we manually insert it for snapshot to use.
safePointName := "tikv_gc_safe_point"
safePointValue := "20160102-15:04:05 -0700"
safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)"
updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s')
ON DUPLICATE KEY
UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment)
tk.MustExec(updateSafePoint)
tk.MustExec("use test")
tk.MustExec("drop table if exists tmp1")
tk.MustExec("create global temporary table tmp1 " +
"(id int not null primary key, code int not null, value int default null, unique key code(code))" +
"on commit delete rows")
tk.MustExec("use test")
tk.MustExec("drop table if exists tmp2")
tk.MustExec("create temporary table tmp2 (id int not null primary key, code int not null, value int default null, unique key code(code));")
tk.MustExec("create table tmp3 (id int not null primary key, code int not null, value int default null, unique key code(code));")
tk.MustExec("create table tmp4 (id int not null primary key, code int not null, value int default null, unique key code(code));")
tk.MustExec("create temporary table tmp5(id int);")
tk.MustExec("create table tmp6 (id int primary key);")
// sleep 1us to make test stale
time.Sleep(time.Microsecond)
queries := []struct {
sql string
}{
{
sql: "select * from tmp1 where id=1",
},
{
sql: "select * from tmp1 where code=1",
},
{
sql: "select * from tmp1 where id in (1, 2, 3)",
},
{
sql: "select * from tmp1 where code in (1, 2, 3)",
},
{
sql: "select * from tmp1 where id > 1",
},
{
sql: "select /*+use_index(tmp1, code)*/ * from tmp1 where code > 1",
},
{
sql: "select /*+use_index(tmp1, code)*/ code from tmp1 where code > 1",
},
{
sql: "select /*+ use_index_merge(tmp1, primary, code) */ * from tmp1 where id > 1 or code > 2",
},
}
addStaleReadToSQL := func(sql string) string {
idx := strings.Index(sql, " where ")
if idx < 0 {
return ""
}
return sql[0:idx] + " as of timestamp NOW(6)" + sql[idx:]
}
genLocalTemporarySQL := func(sql string) string {
return strings.Replace(sql, "tmp1", "tmp2", -1)
}
for _, query := range queries {
localSQL := genLocalTemporarySQL(query.sql)
queries = append(queries, struct{ sql string }{sql: localSQL})
}
for _, query := range queries {
sql := addStaleReadToSQL(query.sql)
if sql != "" {
tk.MustGetErrMsg(sql, "can not stale read temporary table")
}
}
tk.MustExec("start transaction read only as of timestamp NOW(6)")
for _, query := range queries {
tk.MustGetErrMsg(query.sql, "can not stale read temporary table")
}
tk.MustExec("commit")
for _, query := range queries {
tk.MustExec(query.sql)
}
// Test normal table when local temporary exits.
tk.MustExec("insert into tmp6 values(1);")
tk.MustExec("set @a=now(6);")
time.Sleep(time.Microsecond)
tk.MustExec("drop table tmp6")
tk.MustExec("create table tmp6 (id int primary key);")
tk.MustQuery("select * from tmp6 as of timestamp(@a) where id=1;").Check(testkit.Rows("1"))
tk.MustQuery("select * from tmp4 as of timestamp(@a), tmp3 as of timestamp(@a) where tmp3.id=1;")
tk.MustGetErrMsg("select * from tmp4 as of timestamp(@a), tmp2 as of timestamp(@a) where tmp2.id=1;", "can not stale read temporary table")
tk.MustExec("set transaction read only as of timestamp NOW(6)")
tk.MustExec("start transaction")
for _, query := range queries {
tk.MustGetErrMsg(query.sql, "can not stale read temporary table")
}
tk.MustExec("commit")
for _, query := range queries {
tk.MustExec(query.sql)
}
tk.MustExec("set @@tidb_snapshot=NOW(6)")
for _, query := range queries {
// forbidden historical read local temporary table
if strings.Contains(query.sql, "tmp2") {
tk.MustGetErrMsg(query.sql, "can not read local temporary table when 'tidb_snapshot' is set")
continue
}
// Will success here for compatibility with some tools like dumping
tk.MustQuery(query.sql).Check(testkit.Rows())
}
}
func (s *testStaleTxnSuite) TestInvalidReadCacheTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
// For mocktikv, safe point is not initialized, we manually insert it for snapshot to use.
safePointName := "tikv_gc_safe_point"
safePointValue := "20160102-15:04:05 -0700"
safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)"
updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s')
ON DUPLICATE KEY
UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment)
tk.MustExec(updateSafePoint)
tk.MustExec("use test")
tk.MustExec("drop table if exists cache_tmp1")
tk.MustExec("create table cache_tmp1 " +
"(id int not null primary key, code int not null, value int default null, unique key code(code))")
tk.MustExec("alter table cache_tmp1 cache")
tk.MustExec("drop table if exists cache_tmp2")
tk.MustExec("create table cache_tmp2 (id int not null primary key, code int not null, value int default null, unique key code(code));")
tk.MustExec("alter table cache_tmp2 cache")
tk.MustExec("drop table if exists cache_tmp3 , cache_tmp4, cache_tmp5")
tk.MustExec("create table cache_tmp3 (id int not null primary key, code int not null, value int default null, unique key code(code));")
tk.MustExec("create table cache_tmp4 (id int not null primary key, code int not null, value int default null, unique key code(code));")
tk.MustExec("create table cache_tmp5 (id int primary key);")
// sleep 1us to make test stale
time.Sleep(time.Microsecond)
queries := []struct {
sql string
}{
{
sql: "select * from cache_tmp1 where id=1",
},
{
sql: "select * from cache_tmp1 where code=1",
},
{
sql: "select * from cache_tmp1 where id in (1, 2, 3)",
},
{
sql: "select * from cache_tmp1 where code in (1, 2, 3)",
},
{
sql: "select * from cache_tmp1 where id > 1",
},
{
sql: "select /*+use_index(cache_tmp1, code)*/ * from cache_tmp1 where code > 1",
},
{
sql: "select /*+use_index(cache_tmp1, code)*/ code from cache_tmp1 where code > 1",
},
}
addStaleReadToSQL := func(sql string) string {
idx := strings.Index(sql, " where ")
if idx < 0 {
return ""
}
return sql[0:idx] + " as of timestamp NOW(6)" + sql[idx:]
}
for _, query := range queries {
sql := addStaleReadToSQL(query.sql)
if sql != "" {
tk.MustGetErrMsg(sql, "can not stale read cache table")
}
}
tk.MustExec("start transaction read only as of timestamp NOW(6)")
for _, query := range queries {
tk.MustGetErrMsg(query.sql, "can not stale read cache table")
}
tk.MustExec("commit")
for _, query := range queries {
tk.MustExec(query.sql)
}
// Test normal table when cache table exits.
tk.MustExec("insert into cache_tmp5 values(1);")
tk.MustExec("set @a=now(6);")
time.Sleep(time.Microsecond)
tk.MustExec("drop table cache_tmp5")
tk.MustExec("create table cache_tmp5 (id int primary key);")
tk.MustQuery("select * from cache_tmp5 as of timestamp(@a) where id=1;").Check(testkit.Rows("1"))
tk.MustQuery("select * from cache_tmp4 as of timestamp(@a), cache_tmp3 as of timestamp(@a) where cache_tmp3.id=1;")
tk.MustGetErrMsg("select * from cache_tmp4 as of timestamp(@a), cache_tmp2 as of timestamp(@a) where cache_tmp2.id=1;", "can not stale read cache table")
tk.MustExec("set transaction read only as of timestamp NOW(6)")
tk.MustExec("start transaction")
for _, query := range queries {
tk.MustGetErrMsg(query.sql, "can not stale read cache table")
}
tk.MustExec("commit")
for _, query := range queries {
tk.MustExec(query.sql)
}
tk.MustExec("set @@tidb_snapshot=NOW(6)")
for _, query := range queries {
// enable historical read cache table
tk.MustExec(query.sql)
}
}
func (s *testSuite) TestTableSampleTemporaryTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
// For mocktikv, safe point is not initialized, we manually insert it for snapshot to use.
safePointName := "tikv_gc_safe_point"
safePointValue := "20160102-15:04:05 -0700"
safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)"
updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s')
ON DUPLICATE KEY
UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment)
tk.MustExec(updateSafePoint)
tk.MustExec("use test")
tk.MustExec("drop table if exists tmp1")
tk.MustExec("create global temporary table tmp1 " +
"(id int not null primary key, code int not null, value int default null, unique key code(code))" +
"on commit delete rows")
tk.MustExec("use test")
tk.MustExec("drop table if exists tmp2")
tk.MustExec("create temporary table tmp2 (id int not null primary key, code int not null, value int default null, unique key code(code));")
// sleep 1us to make test stale
time.Sleep(time.Microsecond)
// test tablesample return empty for global temporary table
tk.MustQuery("select * from tmp1 tablesample regions()").Check(testkit.Rows())
tk.MustExec("begin")
tk.MustExec("insert into tmp1 values (1, 1, 1)")
tk.MustQuery("select * from tmp1 tablesample regions()").Check(testkit.Rows())
tk.MustExec("commit")
// tablesample for global temporary table should not return error for compatibility of tools like dumpling
tk.MustExec("set @@tidb_snapshot=NOW(6)")
tk.MustQuery("select * from tmp1 tablesample regions()").Check(testkit.Rows())
tk.MustExec("begin")
tk.MustQuery("select * from tmp1 tablesample regions()").Check(testkit.Rows())
tk.MustExec("commit")
tk.MustExec("set @@tidb_snapshot=''")
// test tablesample returns error for local temporary table
tk.MustGetErrMsg("select * from tmp2 tablesample regions()", "TABLESAMPLE clause can not be applied to local temporary tables")
tk.MustExec("begin")
tk.MustExec("insert into tmp2 values (1, 1, 1)")
tk.MustGetErrMsg("select * from tmp2 tablesample regions()", "TABLESAMPLE clause can not be applied to local temporary tables")
tk.MustExec("commit")
tk.MustGetErrMsg("select * from tmp2 tablesample regions()", "TABLESAMPLE clause can not be applied to local temporary tables")
}
func (s *testSuite) TestIssue25506(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists tbl_3, tbl_23")
tk.MustExec("create table tbl_3 (col_15 bit(20))")
tk.MustExec("insert into tbl_3 values (0xFFFF)")
tk.MustExec("insert into tbl_3 values (0xFF)")
tk.MustExec("create table tbl_23 (col_15 bit(15))")
tk.MustExec("insert into tbl_23 values (0xF)")
tk.MustQuery("(select col_15 from tbl_23) union all (select col_15 from tbl_3 for update) order by col_15").Check(testkit.Rows("\x00\x00\x0F", "\x00\x00\xFF", "\x00\xFF\xFF"))
}
func (s *testSuite) TestIssue26348(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec(`CREATE TABLE t (
a varchar(8) DEFAULT NULL,
b varchar(8) DEFAULT NULL,
c decimal(20,2) DEFAULT NULL,
d decimal(15,8) DEFAULT NULL
);`)
tk.MustExec(`insert into t values(20210606, 20210606, 50000.00, 5.04600000);`)
tk.MustQuery(`select a * c *(d/36000) from t;`).Check(testkit.Rows("141642663.71666598"))
tk.MustQuery(`select cast(a as double) * cast(c as double) *cast(d/36000 as double) from t;`).Check(testkit.Rows("141642663.71666598"))
tk.MustQuery("select 20210606*50000.00*(5.04600000/36000)").Check(testkit.Rows("141642663.71666599297980"))
// differs from MySQL cause constant-fold .
tk.MustQuery("select \"20210606\"*50000.00*(5.04600000/36000)").Check(testkit.Rows("141642663.71666598"))
tk.MustQuery("select cast(\"20210606\" as double)*50000.00*(5.04600000/36000)").Check(testkit.Rows("141642663.71666598"))
}
func (s *testSuite) TestIssue26532(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustQuery("select greatest(cast(\"2020-01-01 01:01:01\" as datetime), cast(\"2019-01-01 01:01:01\" as datetime) )union select null;").Sort().Check(testkit.Rows("2020-01-01 01:01:01", "<nil>"))
tk.MustQuery("select least(cast(\"2020-01-01 01:01:01\" as datetime), cast(\"2019-01-01 01:01:01\" as datetime) )union select null;").Sort().Check(testkit.Rows("2019-01-01 01:01:01", "<nil>"))
tk.MustQuery("select greatest(\"2020-01-01 01:01:01\" ,\"2019-01-01 01:01:01\" )union select null;").Sort().Check(testkit.Rows("2020-01-01 01:01:01", "<nil>"))
tk.MustQuery("select least(\"2020-01-01 01:01:01\" , \"2019-01-01 01:01:01\" )union select null;").Sort().Check(testkit.Rows("2019-01-01 01:01:01", "<nil>"))
}
func (s *testSuite) TestIssue25447(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int, b varchar(8))")
tk.MustExec("insert into t1 values(1,'1')")
tk.MustExec("create table t2(a int , b varchar(8) GENERATED ALWAYS AS (c) VIRTUAL, c varchar(8), PRIMARY KEY (a))")
tk.MustExec("insert into t2(a) values(1)")
tk.MustQuery("select /*+ tidb_inlj(t2) */ t2.b, t1.b from t1 join t2 ON t2.a=t1.a").Check(testkit.Rows("<nil> 1"))
}
func (s *testSuite) TestIssue23602(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("USE test")
tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("CREATE TABLE t (a bigint unsigned PRIMARY KEY)")
defer tk.MustExec("DROP TABLE t")
tk.MustExec("INSERT INTO t VALUES (0),(1),(2),(3),(18446744073709551600),(18446744073709551605),(18446744073709551610),(18446744073709551615)")
tk.MustExec("ANALYZE TABLE t")
tk.MustQuery(`EXPLAIN FORMAT = 'brief' SELECT a FROM t WHERE a >= 0x1 AND a <= 0x2`).Check(testkit.Rows(
"TableReader 2.00 root data:TableRangeScan]\n" +
"[└─TableRangeScan 2.00 cop[tikv] table:t range:[1,2], keep order:false"))
tk.MustQuery(`EXPLAIN FORMAT = 'brief' SELECT a FROM t WHERE a BETWEEN 0x1 AND 0x2`).Check(testkit.Rows(
"TableReader 2.00 root data:TableRangeScan]\n" +
"[└─TableRangeScan 2.00 cop[tikv] table:t range:[1,2], keep order:false"))
tk.MustQuery("SELECT a FROM t WHERE a BETWEEN 0xFFFFFFFFFFFFFFF5 AND X'FFFFFFFFFFFFFFFA'").Check(testkit.Rows("18446744073709551605", "18446744073709551610"))
}
func (s *testSuite) TestCTEWithIndexLookupJoinDeadLock(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table t (a int(11) default null,b int(11) default null,key b (b),key ba (b))")
tk.MustExec("create table t1 (a int(11) default null,b int(11) default null,key idx_ab (a,b),key idx_a (a),key idx_b (b))")
tk.MustExec("create table t2 (a int(11) default null,b int(11) default null,key idx_ab (a,b),key idx_a (a),key idx_b (b))")
// It's easy to reproduce this problem in 30 times execution of IndexLookUpJoin.
for i := 0; i < 30; i++ {
tk.MustExec("with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a;")
}
}
func (s *testSuite) TestGetResultRowsCount(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int)")
for i := 1; i <= 10; i++ {
tk.MustExec(fmt.Sprintf("insert into t values (%v)", i))
}
cases := []struct {
sql string
row int64
}{
{"select * from t", 10},
{"select * from t where a < 0", 0},
{"select * from t where a <= 3", 3},
{"insert into t values (11)", 0},
{"replace into t values (12)", 0},
{"update t set a=13 where a=12", 0},
}
for _, ca := range cases {
if strings.HasPrefix(ca.sql, "select") {
tk.MustQuery(ca.sql)
} else {
tk.MustExec(ca.sql)
}
info := tk.Se.ShowProcess()
c.Assert(info, NotNil)
p, ok := info.Plan.(plannercore.Plan)
c.Assert(ok, IsTrue)
cnt := executor.GetResultRowsCount(tk.Se, p)
c.Assert(ca.row, Equals, cnt, Commentf("sql: %v", ca.sql))
}
}
func checkFileName(s string) bool {
files := []string{
"config.toml",
"meta.txt",
"stats/test.t_dump_single.json",
"schema/test.t_dump_single.schema.txt",
"variables.toml",
"sqls.sql",
"session_bindings.sql",
"global_bindings.sql",
"explain.txt",
}
for _, f := range files {
if strings.Compare(f, s) == 0 {
return true
}
}
return false
}
func (s *testSuiteWithData) TestPlanReplayerDumpSingle(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_dump_single")
tk.MustExec("create table t_dump_single(a int)")
res := tk.MustQuery("plan replayer dump explain select * from t_dump_single")
path := s.testData.ConvertRowsToStrings(res.Rows())
reader, err := zip.OpenReader(filepath.Join(domain.GetPlanReplayerDirName(), path[0]))
c.Assert(err, IsNil)
defer reader.Close()
for _, file := range reader.File {
c.Assert(checkFileName(file.Name), IsTrue)
}
}
func (s *testSuiteWithData) TestDropColWithPrimaryKey(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int primary key, c1 int, c2 int, c3 int, index idx1(c1, c2), index idx2(c3))")
tk.MustExec("set global tidb_enable_change_multi_schema = off")
tk.MustGetErrMsg("alter table t drop column id", "[ddl:8200]Unsupported drop integer primary key")
tk.MustGetErrMsg("alter table t drop column c1", "[ddl:8200]can't drop column c1 with composite index covered or Primary Key covered now")
tk.MustGetErrMsg("alter table t drop column c3", "[ddl:8200]can't drop column c3 with tidb_enable_change_multi_schema is disable")
tk.MustExec("set global tidb_enable_change_multi_schema = on")
tk.MustExec("alter table t drop column c3")
}
func (s *testSuiteP1) TestIssue28935(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("set @@tidb_enable_vectorized_expression=true")
tk.MustQuery(`select trim(leading from " a "), trim(both from " a "), trim(trailing from " a ")`).Check(testkit.Rows("a a a"))
tk.MustQuery(`select trim(leading null from " a "), trim(both null from " a "), trim(trailing null from " a ")`).Check(testkit.Rows("<nil> <nil> <nil>"))
tk.MustQuery(`select trim(null from " a ")`).Check(testkit.Rows("<nil>"))
tk.MustExec("set @@tidb_enable_vectorized_expression=false")
tk.MustQuery(`select trim(leading from " a "), trim(both from " a "), trim(trailing from " a ")`).Check(testkit.Rows("a a a"))
tk.MustQuery(`select trim(leading null from " a "), trim(both null from " a "), trim(trailing null from " a ")`).Check(testkit.Rows("<nil> <nil> <nil>"))
tk.MustQuery(`select trim(null from " a ")`).Check(testkit.Rows("<nil>"))
}
func (s *testSuiteP1) TestIssue29412(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t29142_1")
tk.MustExec("drop table if exists t29142_2")
tk.MustExec("create table t29142_1(a int);")
tk.MustExec("create table t29142_2(a double);")
tk.MustExec("insert into t29142_1 value(20);")
tk.MustQuery("select sum(distinct a) as x from t29142_1 having x > some ( select a from t29142_2 where x in (a));").Check(nil)
}
func (s *testSerialSuite) TestIssue28650(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2;")
tk.MustExec("create table t1(a int, index(a));")
tk.MustExec("create table t2(a int, c int, b char(50), index(a,c,b));")
tk.MustExec("set tidb_enable_rate_limit_action=off;")
wg := &sync.WaitGroup{}
sql := `explain analyze
select /*+ stream_agg(@sel_1) stream_agg(@sel_3) %s(@sel_2 t2)*/ count(1) from
(
SELECT t2.a AS t2_external_user_ext_id, t2.b AS t2_t1_ext_id FROM t2 INNER JOIN (SELECT t1.a AS d_t1_ext_id FROM t1 GROUP BY t1.a) AS anon_1 ON anon_1.d_t1_ext_id = t2.a WHERE t2.c = 123 AND t2.b
IN ("%s") ) tmp`
wg.Add(1)
sqls := make([]string, 2)
go func() {
defer wg.Done()
inElems := make([]string, 1000)
for i := 0; i < len(inElems); i++ {
inElems[i] = fmt.Sprintf("wm_%dbDgAAwCD-v1QB%dxky-g_dxxQCw", rand.Intn(100), rand.Intn(100))
}
sqls[0] = fmt.Sprintf(sql, "inl_join", strings.Join(inElems, "\",\""))
sqls[1] = fmt.Sprintf(sql, "inl_hash_join", strings.Join(inElems, "\",\""))
}()
tk.MustExec("insert into t1 select rand()*400;")
for i := 0; i < 10; i++ {
tk.MustExec("insert into t1 select rand()*400 from t1;")
}
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionCancel
})
defer func() {
config.UpdateGlobal(func(conf *config.Config) {
conf.OOMAction = config.OOMActionLog
})
}()
wg.Wait()
for _, sql := range sqls {
tk.MustExec("set @@tidb_mem_quota_query = 1073741824") // 1GB
c.Assert(tk.QueryToErr(sql), IsNil)
tk.MustExec("set @@tidb_mem_quota_query = 33554432") // 32MB, out of memory during executing
c.Assert(strings.Contains(tk.QueryToErr(sql).Error(), "Out Of Memory Quota!"), IsTrue)
tk.MustExec("set @@tidb_mem_quota_query = 65536") // 64KB, out of memory during building the plan
func() {
defer func() {
r := recover()
c.Assert(r, NotNil)
err := errors.Errorf("%v", r)
c.Assert(strings.Contains(err.Error(), "Out Of Memory Quota!"), IsTrue)
}()
tk.MustExec(sql)
}()
}
}
func (s *testSerialSuite) TestIssue30289(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
fpName := "github.com/pingcap/tidb/executor/issue30289"
c.Assert(failpoint.Enable(fpName, `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable(fpName), IsNil)
}()
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
err := tk.QueryToErr("select /*+ hash_join(t1) */ * from t t1 join t t2 on t1.a=t2.a")
c.Assert(err.Error(), Matches, "issue30289 build return error")
}
func (s *testSerialSuite) TestIssue29498(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("DROP TABLE IF EXISTS t1;")
tk.MustExec("CREATE TABLE t1 (t3 TIME(3), d DATE, t TIME);")
tk.MustExec("INSERT INTO t1 VALUES ('00:00:00.567', '2002-01-01', '00:00:02');")
res := tk.MustQuery("SELECT CONCAT(IFNULL(t3, d)) AS col1 FROM t1;")
row := res.Rows()[0][0].(string)
c.Assert(len(row), Equals, mysql.MaxDatetimeWidthNoFsp+3+1)
c.Assert(row[len(row)-12:], Equals, "00:00:00.567")
res = tk.MustQuery("SELECT IFNULL(t3, d) AS col1 FROM t1;")
row = res.Rows()[0][0].(string)
c.Assert(len(row), Equals, mysql.MaxDatetimeWidthNoFsp+3+1)
c.Assert(row[len(row)-12:], Equals, "00:00:00.567")
res = tk.MustQuery("SELECT CONCAT(IFNULL(t, d)) AS col1 FROM t1;")
row = res.Rows()[0][0].(string)
c.Assert(len(row), Equals, mysql.MaxDatetimeWidthNoFsp)
c.Assert(row[len(row)-8:], Equals, "00:00:02")
res = tk.MustQuery("SELECT IFNULL(t, d) AS col1 FROM t1;")
row = res.Rows()[0][0].(string)
c.Assert(len(row), Equals, mysql.MaxDatetimeWidthNoFsp)
c.Assert(row[len(row)-8:], Equals, "00:00:02")
res = tk.MustQuery("SELECT CONCAT(xx) FROM (SELECT t3 AS xx FROM t1 UNION SELECT d FROM t1) x ORDER BY -xx LIMIT 1;")
row = res.Rows()[0][0].(string)
c.Assert(len(row), Equals, mysql.MaxDatetimeWidthNoFsp+3+1)
c.Assert(row[len(row)-12:], Equals, "00:00:00.567")
res = tk.MustQuery("SELECT CONCAT(CASE WHEN d IS NOT NULL THEN t3 ELSE d END) AS col1 FROM t1;")
row = res.Rows()[0][0].(string)
c.Assert(len(row), Equals, mysql.MaxDatetimeWidthNoFsp+3+1)
c.Assert(row[len(row)-12:], Equals, "00:00:00.567")
}
// Test invoke Close without invoking Open before for each operators.
func (s *testSerialSuite) TestUnreasonablyClose(c *C) {
defer testleak.AfterTest(c)()
is := infoschema.MockInfoSchema([]*model.TableInfo{plannercore.MockSignedTable(), plannercore.MockUnsignedTable()})
se, err := session.CreateSession4Test(s.store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test")
c.Assert(err, IsNil)
// To enable the shuffleExec operator.
_, err = se.Execute(context.Background(), "set @@tidb_merge_join_concurrency=4")
c.Assert(err, IsNil)
var opsNeedsCovered = []plannercore.PhysicalPlan{
&plannercore.PhysicalHashJoin{},
&plannercore.PhysicalMergeJoin{},
&plannercore.PhysicalIndexJoin{},
&plannercore.PhysicalIndexHashJoin{},
&plannercore.PhysicalTableReader{},
&plannercore.PhysicalIndexReader{},
&plannercore.PhysicalIndexLookUpReader{},
&plannercore.PhysicalIndexMergeReader{},
&plannercore.PhysicalApply{},
&plannercore.PhysicalHashAgg{},
&plannercore.PhysicalStreamAgg{},
&plannercore.PhysicalLimit{},
&plannercore.PhysicalSort{},
&plannercore.PhysicalTopN{},
&plannercore.PhysicalCTE{},
&plannercore.PhysicalCTETable{},
&plannercore.PhysicalMaxOneRow{},
&plannercore.PhysicalProjection{},
&plannercore.PhysicalSelection{},
&plannercore.PhysicalTableDual{},
&plannercore.PhysicalWindow{},
&plannercore.PhysicalShuffle{},
&plannercore.PhysicalUnionAll{},
}
executorBuilder := executor.NewMockExecutorBuilderForTest(se, is, nil, math.MaxUint64, false, "global")
var opsNeedsCoveredMask uint64 = 1<<len(opsNeedsCovered) - 1
opsAlreadyCoveredMask := uint64(0)
for i, tc := range []string{
"select /*+ hash_join(t1)*/ * from t t1 join t t2 on t1.a = t2.a",
"select /*+ merge_join(t1)*/ * from t t1 join t t2 on t1.f = t2.f",
"select t.f from t use index(f)",
"select /*+ inl_join(t1) */ * from t t1 join t t2 on t1.f=t2.f",
"select /*+ inl_hash_join(t1) */ * from t t1 join t t2 on t1.f=t2.f",
"SELECT count(1) FROM (SELECT (SELECT min(a) FROM t as t2 WHERE t2.a > t1.a) AS a from t as t1) t",
"select /*+ hash_agg() */ count(f) from t group by a",
"select /*+ stream_agg() */ count(f) from t group by a",
"select * from t order by a, f",
"select * from t order by a, f limit 1",
"select * from t limit 1",
"select (select t1.a from t t1 where t1.a > t2.a) as a from t t2;",
"select a + 1 from t",
"select count(*) a from t having a > 1",
"select * from t where a = 1.1",
"with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 0) select * from cte1",
"select /*+use_index_merge(t, c_d_e, f)*/ * from t where c < 1 or f > 2",
"select sum(f) over (partition by f) from t",
"select /*+ merge_join(t1)*/ * from t t1 join t t2 on t1.d = t2.d",
"select a from t union all select a from t",
} {
comment := Commentf("case:%v sql:%s", i, tc)
c.Assert(err, IsNil, comment)
stmt, err := s.ParseOneStmt(tc, "", "")
c.Assert(err, IsNil, comment)
err = se.NewTxn(context.Background())
c.Assert(err, IsNil, comment)
p, _, err := planner.Optimize(context.TODO(), se, stmt, is)
c.Assert(err, IsNil, comment)
// This for loop level traverses the plan tree to get which operators are covered.
for child := []plannercore.PhysicalPlan{p.(plannercore.PhysicalPlan)}; len(child) != 0; {
newChild := make([]plannercore.PhysicalPlan, 0, len(child))
for _, ch := range child {
found := false
for k, t := range opsNeedsCovered {
if reflect.TypeOf(t) == reflect.TypeOf(ch) {
opsAlreadyCoveredMask |= 1 << k
found = true
break
}
}
c.Assert(found, IsTrue, Commentf("case: %v sql: %s operator %v is not registered in opsNeedsCoveredMask", i, tc, reflect.TypeOf(ch)))
switch x := ch.(type) {
case *plannercore.PhysicalCTE:
newChild = append(newChild, x.RecurPlan)
newChild = append(newChild, x.SeedPlan)
continue
case *plannercore.PhysicalShuffle:
newChild = append(newChild, x.DataSources...)
newChild = append(newChild, x.Tails...)
continue
}
newChild = append(newChild, ch.Children()...)
}
child = newChild
}
e := executorBuilder.Build(p)
func() {
defer func() {
r := recover()
buf := make([]byte, 4096)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
c.Assert(r, IsNil, Commentf("case: %v\n sql: %s\n error stack: %v", i, tc, string(buf)))
}()
c.Assert(e.Close(), IsNil, comment)
}()
}
// The following code is used to make sure all the operators registered
// in opsNeedsCoveredMask are covered.
commentBuf := strings.Builder{}
if opsAlreadyCoveredMask != opsNeedsCoveredMask {
for i := range opsNeedsCovered {
if opsAlreadyCoveredMask&(1<<i) != 1<<i {
commentBuf.WriteString(fmt.Sprintf(" %v", reflect.TypeOf(opsNeedsCovered[i])))
}
}
}
c.Assert(opsAlreadyCoveredMask, Equals, opsNeedsCoveredMask, Commentf("these operators are not covered %s", commentBuf.String()))
}
func (s *testSerialSuite) TestIssue30971(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (id int);")
tk.MustExec("create table t2 (id int, c int);")
testCases := []struct {
sql string
fields int
}{
// Fix a bug that the column length field returned to client is incorrect using MySQL prepare protocol.
{"select * from t1 union select 1 from t1", 1},
{"select c from t2 union select * from t1", 1},
{"select * from t1", 1},
{"select * from t2 where c in (select * from t1)", 2},
{"insert into t1 values (?)", 0},
{"update t1 set id = ?", 0},
}
for _, test := range testCases {
_, _, fields, err := tk.Se.PrepareStmt(test.sql)
c.Assert(err, IsNil)
c.Assert(fields, HasLen, test.fields)
}
}
|
txn kv.Tran
|
environment_network_azure_v1_params.go
|
// Code generated by go-swagger; DO NOT EDIT.
package model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// EnvironmentNetworkAzureV1Params environment network azure v1 params
// swagger:model EnvironmentNetworkAzureV1Params
type EnvironmentNetworkAzureV1Params struct {
// Azure Network ID of the specified network
// Required: true
// Max Length: 255
// Min Length: 0
NetworkID *string `json:"networkId"`
// Azure Network is private if this flag is true
// Required: true
NoPublicIP *bool `json:"noPublicIp"`
// Full resource id of an existing azure private DNS zone
// Max Length: 255
// Min Length: 0
PrivateDNSZoneID *string `json:"privateDnsZoneId,omitempty"`
// Azure Resource Group Name of the specified network
// Required: true
// Max Length: 255
// Min Length: 0
ResourceGroupName *string `json:"resourceGroupName"`
}
// Validate validates this environment network azure v1 params
func (m *EnvironmentNetworkAzureV1Params) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateNetworkID(formats); err != nil {
res = append(res, err)
}
if err := m.validateNoPublicIP(formats); err != nil {
res = append(res, err)
}
if err := m.validatePrivateDNSZoneID(formats); err != nil {
res = append(res, err)
}
if err := m.validateResourceGroupName(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EnvironmentNetworkAzureV1Params) validateNetworkID(formats strfmt.Registry) error {
if err := validate.Required("networkId", "body", m.NetworkID); err != nil {
return err
}
if err := validate.MinLength("networkId", "body", string(*m.NetworkID), 0); err != nil {
return err
}
if err := validate.MaxLength("networkId", "body", string(*m.NetworkID), 255); err != nil
|
return nil
}
func (m *EnvironmentNetworkAzureV1Params) validateNoPublicIP(formats strfmt.Registry) error {
if err := validate.Required("noPublicIp", "body", m.NoPublicIP); err != nil {
return err
}
return nil
}
func (m *EnvironmentNetworkAzureV1Params) validatePrivateDNSZoneID(formats strfmt.Registry) error {
if swag.IsZero(m.PrivateDNSZoneID) { // not required
return nil
}
if err := validate.MinLength("privateDnsZoneId", "body", string(*m.PrivateDNSZoneID), 0); err != nil {
return err
}
if err := validate.MaxLength("privateDnsZoneId", "body", string(*m.PrivateDNSZoneID), 255); err != nil {
return err
}
return nil
}
func (m *EnvironmentNetworkAzureV1Params) validateResourceGroupName(formats strfmt.Registry) error {
if err := validate.Required("resourceGroupName", "body", m.ResourceGroupName); err != nil {
return err
}
if err := validate.MinLength("resourceGroupName", "body", string(*m.ResourceGroupName), 0); err != nil {
return err
}
if err := validate.MaxLength("resourceGroupName", "body", string(*m.ResourceGroupName), 255); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *EnvironmentNetworkAzureV1Params) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EnvironmentNetworkAzureV1Params) UnmarshalBinary(b []byte) error {
var res EnvironmentNetworkAzureV1Params
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
|
{
return err
}
|
mod.rs
|
//! Component storage types, implementations for component joins, etc.
pub use self::{
data::{ReadStorage, WriteStorage},
entry::{Entries, OccupiedEntry, StorageEntry, VacantEntry},
flagged::FlaggedStorage,
generic::{GenericReadStorage, GenericWriteStorage},
restrict::{
ImmutableParallelRestriction, MutableParallelRestriction, RestrictedStorage,
SequentialRestriction,
},
storages::{
BTreeStorage, DefaultVecStorage, DenseVecStorage, HashMapStorage, NullStorage, VecStorage,
},
track::{ComponentEvent, Tracked},
};
use self::storages::SliceAccess;
use std::{
self,
marker::PhantomData,
ops::{Deref, DerefMut, Not},
};
use hibitset::{BitSet, BitSetLike, BitSetNot};
use shred::{CastFrom, Fetch};
#[cfg(feature = "parallel")]
use crate::join::ParJoin;
use crate::{
error::{Error, WrongGeneration},
join::Join,
world::{Component, EntitiesRes, Entity, Generation, Index},
};
use self::drain::Drain;
mod data;
mod drain;
mod entry;
mod flagged;
mod generic;
mod restrict;
mod storages;
#[cfg(test)]
mod tests;
mod track;
/// An inverted storage type, only useful to iterate entities
/// that do not have a particular component type.
pub struct AntiStorage<'a>(pub &'a BitSet);
impl<'a> Join for AntiStorage<'a> {
type Mask = BitSetNot<&'a BitSet>;
type Type = ();
type Value = ();
// SAFETY: No invariants to meet and no unsafe code.
unsafe fn open(self) -> (Self::Mask, ()) {
(BitSetNot(self.0), ())
}
// SAFETY: No invariants to meet and no unsafe code.
unsafe fn get(_: &mut (), _: Index) {}
}
// SAFETY: Since `get` does not do any memory access, this is safe to implement.
unsafe impl<'a> DistinctStorage for AntiStorage<'a> {}
// SAFETY: Since `get` does not do any memory access, this is safe to implement.
#[cfg(feature = "parallel")]
unsafe impl<'a> ParJoin for AntiStorage<'a> {}
/// A dynamic storage.
pub trait AnyStorage {
/// Drop components of given entities.
fn drop(&mut self, entities: &[Entity]);
}
unsafe impl<T> CastFrom<T> for dyn AnyStorage
where
T: AnyStorage + 'static,
{
fn cast(t: &T) -> &Self {
t
}
fn cast_mut(t: &mut T) -> &mut Self {
t
}
}
impl<T> AnyStorage for MaskedStorage<T>
where
T: Component,
{
fn drop(&mut self, entities: &[Entity]) {
for entity in entities {
MaskedStorage::drop(self, entity.id());
}
}
}
/// This is a marker trait which requires you to uphold the following guarantee:
///
/// > Multiple threads may call `get_mut()` with distinct indices without
/// causing > undefined behavior.
///
/// This is for example valid for `Vec`:
///
/// ```rust
/// vec![1, 2, 3];
/// ```
///
/// We may modify both element 1 and 2 at the same time; indexing the vector
/// mutably does not modify anything else than the respective elements.
///
/// As a counter example, we may have some kind of cached storage; it caches
/// elements when they're retrieved, so pushes a new element to some
/// cache-vector. This storage is not allowed to implement `DistinctStorage`.
///
/// Implementing this trait marks the storage safe for concurrent mutation (of
/// distinct elements), thus allows `join_par()`.
pub unsafe trait DistinctStorage {}
/// The status of an `insert()`ion into a storage.
/// If the insertion was successful then the Ok value will
/// contain the component that was replaced (if any).
pub type InsertResult<T> = Result<Option<T>, Error>;
/// The `UnprotectedStorage` together with the `BitSet` that knows
/// about which elements are stored, and which are not.
pub struct MaskedStorage<T: Component> {
mask: BitSet,
inner: T::Storage,
}
impl<T: Component> Default for MaskedStorage<T>
where
T::Storage: Default,
{
fn default() -> Self {
Self {
mask: Default::default(),
inner: Default::default(),
}
}
}
impl<T: Component> MaskedStorage<T> {
/// Creates a new `MaskedStorage`. This is called when you register
/// a new component type within the world.
pub fn new(inner: T::Storage) -> MaskedStorage<T> {
MaskedStorage {
mask: BitSet::new(),
inner,
}
}
fn open_mut(&mut self) -> (&BitSet, &mut T::Storage) {
(&self.mask, &mut self.inner)
}
/// Clear the contents of this storage.
pub fn clear(&mut self) {
// SAFETY: `self.mask` is the correct mask as specified.
unsafe {
self.inner.clean(&self.mask);
}
self.mask.clear();
}
/// Remove an element by a given index.
pub fn remove(&mut self, id: Index) -> Option<T> {
if self.mask.remove(id) {
// SAFETY: We checked the mask (`remove` returned `true`)
Some(unsafe { self.inner.remove(id) })
} else {
None
}
}
/// Drop an element by a given index.
pub fn drop(&mut self, id: Index) {
if self.mask.remove(id) {
// SAFETY: We checked the mask (`remove` returned `true`)
unsafe {
self.inner.drop(id);
}
}
}
}
impl<T: Component> Drop for MaskedStorage<T> {
fn drop(&mut self) {
self.clear();
}
}
/// A wrapper around the masked storage and the generations vector.
/// Can be used for safe lookup of components, insertions and removes.
/// This is what `World::read/write` fetches for the user.
pub struct Storage<'e, T, D> {
data: D,
entities: Fetch<'e, EntitiesRes>,
phantom: PhantomData<T>,
}
impl<'e, T, D> Storage<'e, T, D> {
/// Creates a new `Storage` from a fetched allocator and a immutable or
/// mutable `MaskedStorage`, named `data`.
pub fn new(entities: Fetch<'e, EntitiesRes>, data: D) -> Storage<'e, T, D> {
Storage {
data,
entities,
phantom: PhantomData,
}
}
}
impl<'e, T, D> Storage<'e, T, D>
where
T: Component,
D: Deref<Target = MaskedStorage<T>>,
{
/// Gets the wrapped storage.
pub fn unprotected_storage(&self) -> &T::Storage {
&self.data.inner
}
/// Returns the `EntitiesRes` resource fetched by this storage.
/// **This does not have anything to do with the components inside.**
/// You only want to use this when implementing additional methods
/// for `Storage` via an extension trait.
pub fn fetched_entities(&self) -> &EntitiesRes {
&self.entities
}
/// Tries to read the data associated with an `Entity`.
pub fn get(&self, e: Entity) -> Option<&T> {
if self.data.mask.contains(e.id()) && self.entities.is_alive(e) {
// SAFETY: We checked the mask, so all invariants are met.
Some(unsafe { self.data.inner.get(e.id()) })
} else {
None
}
}
/// Computes the number of elements this `Storage` contains by counting the
/// bits in the bit set. This operation will never be performed in
/// constant time.
pub fn count(&self) -> usize {
self.mask().iter().count()
}
/// Checks whether this `Storage` is empty. This operation is very cheap.
pub fn is_empty(&self) -> bool {
self.mask().is_empty()
}
/// Returns true if the storage has a component for this entity, and that
/// entity is alive.
pub fn contains(&self, e: Entity) -> bool {
self.data.mask.contains(e.id()) && self.entities.is_alive(e)
}
/// Returns a reference to the bitset of this storage which allows filtering
/// by the component type without actually getting the component.
pub fn mask(&self) -> &BitSet {
&self.data.mask
}
}
impl<'e, T, D> Storage<'e, T, D>
where
T: Component,
D: Deref<Target = MaskedStorage<T>>,
T::Storage: SliceAccess<T>,
{
/// Returns the component data as a slice.
///
/// The indices of this slice may not correspond to anything in particular.
/// Check the underlying storage documentation for details.
pub fn as_slice(&self) -> &[<T::Storage as SliceAccess<T>>::Element] {
self.data.inner.as_slice()
}
}
impl<'e, T, D> Storage<'e, T, D>
where
T: Component,
D: DerefMut<Target = MaskedStorage<T>>,
T::Storage: SliceAccess<T>,
{
/// Returns the component data as a slice.
///
/// The indices of this slice may not correspond to anything in particular.
/// Check the underlying storage documentation for details.
pub fn as_mut_slice(&mut self) -> &mut [<T::Storage as SliceAccess<T>>::Element] {
self.data.inner.as_mut_slice()
}
}
impl<'e, T, D> Storage<'e, T, D>
where
T: Component,
D: DerefMut<Target = MaskedStorage<T>>,
{
/// Gets mutable access to the wrapped storage.
///
/// # Safety
///
/// This is unsafe because modifying the wrapped storage without also
/// updating the mask bitset accordingly can result in illegal memory
/// access.
pub unsafe fn unprotected_storage_mut(&mut self) -> &mut T::Storage {
&mut self.data.inner
}
/// Tries to mutate the data associated with an `Entity`.
pub fn get_mut(&mut self, e: Entity) -> Option<&mut T> {
if self.data.mask.contains(e.id()) && self.entities.is_alive(e) {
// SAFETY: We checked the mask, so all invariants are met.
Some(unsafe { self.data.inner.get_mut(e.id()) })
} else {
None
}
}
/// Inserts new data for a given `Entity`.
/// Returns the result of the operation as a `InsertResult<T>`
///
/// If a component already existed for the given `Entity`, then it will
/// be overwritten with the new component. If it did overwrite, then the
/// result will contain `Some(T)` where `T` is the previous component.
pub fn insert(&mut self, e: Entity, mut v: T) -> InsertResult<T> {
if self.entities.is_alive(e) {
let id = e.id();
if self.data.mask.contains(id) {
// SAFETY: We checked the mask, so all invariants are met.
std::mem::swap(&mut v, unsafe { self.data.inner.get_mut(id) });
Ok(Some(v))
} else {
self.data.mask.add(id);
// SAFETY: The mask was previously empty, so it is safe to insert.
unsafe { self.data.inner.insert(id, v) };
Ok(None)
}
} else {
Err(Error::WrongGeneration(WrongGeneration {
action: "insert component for entity",
actual_gen: self.entities.entity(e.id()).gen(),
entity: e,
}))
}
}
/// Removes the data associated with an `Entity`.
pub fn remove(&mut self, e: Entity) -> Option<T>
|
/// Clears the contents of the storage.
pub fn clear(&mut self) {
self.data.clear();
}
/// Creates a draining storage wrapper which can be `.join`ed
/// to get a draining iterator.
pub fn drain(&mut self) -> Drain<T> {
Drain {
data: &mut self.data,
}
}
}
impl<'a, T, D: Clone> Clone for Storage<'a, T, D> {
fn clone(&self) -> Self {
Storage::new(self.entities.clone(), self.data.clone())
}
}
// SAFETY: This is safe, since `T::Storage` is `DistinctStorage` and `Join::get`
// only accesses the storage and nothing else.
unsafe impl<'a, T: Component, D> DistinctStorage for Storage<'a, T, D> where
T::Storage: DistinctStorage
{
}
impl<'a, 'e, T, D> Join for &'a Storage<'e, T, D>
where
T: Component,
D: Deref<Target = MaskedStorage<T>>,
{
type Mask = &'a BitSet;
type Type = &'a T;
type Value = &'a T::Storage;
// SAFETY: No unsafe code and no invariants.
unsafe fn open(self) -> (Self::Mask, Self::Value) {
(&self.data.mask, &self.data.inner)
}
// SAFETY: Since we require that the mask was checked, an element for `i` must
// have been inserted without being removed.
unsafe fn get(v: &mut Self::Value, i: Index) -> &'a T {
v.get(i)
}
}
impl<'a, 'e, T, D> Not for &'a Storage<'e, T, D>
where
T: Component,
D: Deref<Target = MaskedStorage<T>>,
{
type Output = AntiStorage<'a>;
fn not(self) -> Self::Output {
AntiStorage(&self.data.mask)
}
}
// SAFETY: This is always safe because immutable access can in no case cause
// memory issues, even if access to common memory occurs.
#[cfg(feature = "parallel")]
unsafe impl<'a, 'e, T, D> ParJoin for &'a Storage<'e, T, D>
where
T: Component,
D: Deref<Target = MaskedStorage<T>>,
T::Storage: Sync,
{
}
impl<'a, 'e, T, D> Join for &'a mut Storage<'e, T, D>
where
T: Component,
D: DerefMut<Target = MaskedStorage<T>>,
{
type Mask = &'a BitSet;
type Type = &'a mut T;
type Value = &'a mut T::Storage;
// SAFETY: No unsafe code and no invariants to fulfill.
unsafe fn open(self) -> (Self::Mask, Self::Value) {
self.data.open_mut()
}
// TODO: audit unsafe
unsafe fn get(v: &mut Self::Value, i: Index) -> &'a mut T {
// This is horribly unsafe. Unfortunately, Rust doesn't provide a way
// to abstract mutable/immutable state at the moment, so we have to hack
// our way through it.
let value: *mut Self::Value = v as *mut Self::Value;
(*value).get_mut(i)
}
}
// SAFETY: This is safe because of the `DistinctStorage` guarantees.
#[cfg(feature = "parallel")]
unsafe impl<'a, 'e, T, D> ParJoin for &'a mut Storage<'e, T, D>
where
T: Component,
D: DerefMut<Target = MaskedStorage<T>>,
T::Storage: Sync + DistinctStorage,
{
}
/// Tries to create a default value, returns an `Err` with the name of the
/// storage and/or component if there's no default.
pub trait TryDefault: Sized {
/// Tries to create the default.
fn try_default() -> Result<Self, String>;
/// Calls `try_default` and panics on an error case.
fn unwrap_default() -> Self {
match Self::try_default() {
Ok(x) => x,
Err(e) => panic!("Failed to create a default value for storage ({:?})", e),
}
}
}
impl<T> TryDefault for T
where
T: Default,
{
fn try_default() -> Result<Self, String> {
Ok(T::default())
}
}
/// Used by the framework to quickly join components.
pub trait UnprotectedStorage<T>: TryDefault {
/// Clean the storage given a bitset with bits set for valid indices.
/// Allows us to safely drop the storage.
///
/// # Safety
///
/// May only be called with the mask which keeps track of the elements
/// existing in this storage.
unsafe fn clean<B>(&mut self, has: B)
where
B: BitSetLike;
/// Tries reading the data associated with an `Index`.
/// This is unsafe because the external set used
/// to protect this storage is absent.
///
/// # Safety
///
/// May only be called after a call to `insert` with `id` and
/// no following call to `remove` with `id`.
///
/// A mask should keep track of those states, and an `id` being contained
/// in the tracking mask is sufficient to call this method.
unsafe fn get(&self, id: Index) -> &T;
/// Tries mutating the data associated with an `Index`.
/// This is unsafe because the external set used
/// to protect this storage is absent.
///
/// # Safety
///
/// May only be called after a call to `insert` with `id` and
/// no following call to `remove` with `id`.
///
/// A mask should keep track of those states, and an `id` being contained
/// in the tracking mask is sufficient to call this method.
unsafe fn get_mut(&mut self, id: Index) -> &mut T;
/// Inserts new data for a given `Index`.
///
/// # Safety
///
/// May only be called if `insert` was not called with `id` before, or
/// was reverted by a call to `remove` with `id.
///
/// A mask should keep track of those states, and an `id` missing from the
/// mask is sufficient to call `insert`.
unsafe fn insert(&mut self, id: Index, value: T);
/// Removes the data associated with an `Index`.
///
/// # Safety
///
/// May only be called if an element with `id` was `insert`ed and not yet
/// removed / dropped.
unsafe fn remove(&mut self, id: Index) -> T;
/// Drops the data associated with an `Index`.
/// This is simply more efficient than `remove` and can be used if the data
/// is no longer needed.
///
/// # Safety
///
/// May only be called if an element with `id` was `insert`ed and not yet
/// removed / dropped.
unsafe fn drop(&mut self, id: Index) {
self.remove(id);
}
}
#[cfg(test)]
#[cfg(feature = "parallel")]
mod tests_inline {
use crate::{
Builder, Component, DenseVecStorage, Entities, ParJoin, ReadStorage, World, WorldExt,
};
use rayon::iter::ParallelIterator;
struct Pos;
impl Component for Pos {
type Storage = DenseVecStorage<Self>;
}
#[test]
fn test_anti_par_join() {
let mut world = World::new();
world.create_entity().build();
world.exec(|(entities, pos): (Entities, ReadStorage<Pos>)| {
(&entities, !&pos).par_join().for_each(|(ent, ())| {
println!("Processing entity: {:?}", ent);
});
});
}
}
|
{
if self.entities.is_alive(e) {
self.data.remove(e.id())
} else {
None
}
}
|
struct_cluster_type_white_user_list.go
|
package emr
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
|
// ClusterTypeWhiteUserList is a nested struct in emr response
type ClusterTypeWhiteUserList struct {
ClusterTypeWhiteUser []ClusterTypeWhiteUser `json:"ClusterTypeWhiteUser" xml:"ClusterTypeWhiteUser"`
}
|
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
workload.go
|
// Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package contextgraph
import (
"fmt"
"net/url"
"strings"
"time"
"istio.io/istio/mixer/pkg/adapter"
)
const (
membershipTypeName = "google.cloud.contextgraph.Membership"
grpcComm = "google.cloud.contextgraph.Communication.Grpc"
httpComm = "google.cloud.contextgraph.Communication.Http"
httpsComm = "google.cloud.contextgraph.Communication.Https"
tcpComm = "google.cloud.contextgraph.Communication.Tcp"
)
var (
protocolMap = map[string]string{
"http": httpComm,
"https": httpsComm,
"tcp": tcpComm,
"grpc": grpcComm,
}
)
type workloadInstance struct {
// N.B. The projects can potentially be different for each workload.
meshUID string
istioProject string
clusterProject, clusterLocation, clusterName string
uid string
owner string
workloadName, workloadNamespace string
}
type service struct {
meshUID string
namespace string
name string
istioProject string
}
// Reify turns wi into a set of Context API entities and edges.
func (wi workloadInstance) Reify(logger adapter.Logger) ([]entity, []edge) {
gcpContainer := fmt.Sprintf("//cloudresourcemanager.googleapis.com/projects/%s", wi.istioProject)
// N.B. Project names can contain ":" which needs to /not/ be escaped.
istioContainer := fmt.Sprintf("//istio.io/projects/%s", wi.istioProject)
meshUID := url.QueryEscape(wi.meshUID)
clusterProject := wi.clusterProject
// TODO: Figure out if locations should be URL-escaped or not ("aws:us-east-1" is a valid region).
clusterLocation := url.QueryEscape(wi.clusterLocation)
clusterName := url.QueryEscape(wi.clusterName)
uid := url.QueryEscape(wi.uid)
ownerUID := url.QueryEscape(wi.owner)
workloadName := url.QueryEscape(wi.workloadName)
workloadNamespace := url.QueryEscape(wi.workloadNamespace)
wiFullName := fmt.Sprintf(
"%s/meshes/%s/clusterProjects/%s/locations/%s/clusters/%s/workloadInstances/%s",
istioContainer, meshUID, clusterProject, clusterLocation, clusterName, uid,
)
wiEntity := entity{
gcpContainer,
"io.istio.WorkloadInstance",
wiFullName,
clusterLocation,
[4]string{meshUID, clusterProject, clusterName, uid},
}
ownerFullName := fmt.Sprintf(
"%s/meshes/%s/clusterProjects/%s/locations/%s/clusters/%s/owners/%s",
istioContainer, meshUID, clusterProject, clusterLocation, clusterName, ownerUID,
)
owner := entity{
gcpContainer,
"io.istio.Owner",
ownerFullName,
clusterLocation,
[4]string{meshUID, clusterProject, clusterName, ownerUID},
}
workloadFullName := fmt.Sprintf(
"%s/meshes/%s/workloads/%s/%s",
istioContainer, meshUID, workloadNamespace, workloadName,
)
workload := entity{
gcpContainer,
"io.istio.Workload",
workloadFullName,
"global",
[4]string{meshUID, workloadNamespace, workloadName, ""},
}
clusterContainer := clusterContainer(wi.clusterProject, wi.clusterLocation, wi.clusterName)
var ownerK8sFullName string
t := strings.Split(wi.owner, "/")
if len(t) >= 3 && t[0] == "kubernetes:" {
var name, namespace, typeName string
t = t[2:]
switch {
case len(t) >= 6 && t[0] == "apis" && t[1] == "v1":
// pods, RC
namespace = t[3]
name = t[5]
typeName = t[4]
case len(t) >= 7 && t[0] == "apis" && (t[1] == "extensions" || t[1] == "apps" || t[1] == "batch"):
// cronjobs, jobs, daemonsets, deployments, replicasets, statefulsets
namespace = t[4]
name = t[6]
typeName = t[1] + "/" + t[5]
}
if name != "" {
ownerK8sFullName = fmt.Sprintf("%s/k8s/namespaces/%s/%s/%s",
clusterContainer, namespace, typeName, name)
} else {
logger.Warningf("Couldn't parse owner into k8s obj: %s", wi.owner)
}
} else if wi.owner != "unknown" {
|
// TODO: Non-k8s owners.
// "kubernetes://istio-pilot-65d79b966c-xnbx8.istio-system"
var wiK8sFullName string
t = strings.Split(wi.uid, "/")
if len(t) == 3 && t[0] == "kubernetes:" {
nameNs := strings.Split(t[2], ".")
if len(nameNs) == 2 {
namespace, name := nameNs[1], nameNs[0]
wiK8sFullName = fmt.Sprintf("%s/k8s/namespaces/%s/pods/%s", clusterContainer, namespace, name)
} else {
logger.Warningf("Unknown workload instance type: %s", wi.uid)
}
} else if wi.uid != "Unknown" {
logger.Warningf("Unknown workload instance type: %s", wi.uid)
}
// TODO: Non-k8s workload instances
edges := []edge{
{workloadFullName, ownerFullName, membershipTypeName},
{ownerFullName, wiFullName, membershipTypeName},
}
if ownerK8sFullName != "" {
edges = append(edges, edge{ownerFullName, ownerK8sFullName, membershipTypeName})
}
if wiK8sFullName != "" {
edges = append(edges, edge{wiFullName, wiK8sFullName, membershipTypeName})
}
return []entity{wiEntity, owner, workload}, edges
}
func (s service) Reify() entity {
return entity{
containerFullName: fmt.Sprintf("//cloudresourcemanager.googleapis.com/projects/%s",
s.istioProject),
typeName: "io.istio.Service",
fullName: fmt.Sprintf("//istio.io/projects/%s/meshes/%s/services/%s/%s",
s.istioProject,
url.QueryEscape(s.meshUID),
url.QueryEscape(s.namespace),
url.QueryEscape(s.name)),
location: "global",
shortNames: [4]string{
url.QueryEscape(s.meshUID),
url.QueryEscape(s.namespace),
url.QueryEscape(s.name),
"",
},
}
}
type trafficAssertion struct {
source, destination workloadInstance
contextProtocol, apiProtocol string
destinationService service
timestamp time.Time
}
func (t trafficAssertion) Reify(logger adapter.Logger) ([]entity, []edge) {
commType, ok := protocolMap[t.contextProtocol]
if !ok {
if commType, ok = protocolMap[t.apiProtocol]; !ok {
logger.Warningf("Unknown type of protocol: %s", t.apiProtocol)
}
}
serviceEntity := t.destinationService.Reify()
entities, edges := t.source.Reify(logger)
var sourceFullNames []string
for _, entity := range entities {
sourceFullNames = append(sourceFullNames, entity.fullName)
if len(commType) > 0 {
edges = append(edges, edge{entity.fullName, serviceEntity.fullName, commType})
}
}
destEntities, destEdges := t.destination.Reify(logger)
entities = append(entities, destEntities...)
edges = append(edges, destEdges...)
var k8sSvc string
for _, entity := range destEntities {
if len(commType) > 0 {
for _, s := range sourceFullNames {
edges = append(edges, edge{s, entity.fullName, commType})
}
edges = append(edges, edge{serviceEntity.fullName, entity.fullName, commType})
if k8sSvc == "" {
k8sSvc = k8sSvcFullname(entity.shortNames[1], entity.location, entity.shortNames[2], t.destinationService.namespace, t.destinationService.name)
}
}
}
entities = append(entities, serviceEntity)
if len(k8sSvc) > 0 {
edges = append(edges, edge{serviceEntity.fullName, k8sSvc, membershipTypeName})
}
return entities, edges
}
// example: //container.googleapis.com/projects/<project>/locations/us-central1-a/clusters/<cluster>/k8s/namespaces/default/services/<service>
func k8sSvcFullname(project, location, cluster, namespace, name string) string {
return fmt.Sprintf("%s/k8s/namespaces/%s/services/%s", clusterContainer(project, location, cluster), namespace, name)
}
func clusterContainer(project, location, cluster string) string {
// TODO: Figure out what the container is for non-GCE clusters.
locType := "locations"
if strings.Count(location, "-") == 2 {
locType = "zones"
}
return fmt.Sprintf("//container.googleapis.com/projects/%s/%s/%s/clusters/%s", project, locType, location, cluster)
}
type entity struct {
containerFullName string
typeName string
fullName string
location string
// N.B. map keys can only have arrays, not slices.
// 4 is enough for all our entity types.
shortNames [4]string
}
type edge struct {
sourceFullName, destinationFullName string
typeName string
}
|
logger.Warningf("Unknown owner type: %s", wi.owner)
}
|
relative.d.ts
|
export declare function load(): void;
| ||
func_graph.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FuncGraph and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework.auto_control_deps import AutomaticControlDependencies
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.lazy_loader import LazyLoader
# This is to avoid a circular dependency:
# function -> func_graph
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
def_function = LazyLoader(
"def_function", globals(),
"tensorflow.python.eager.def_function")
WHITELIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY, # pylint: disable=protected-access
variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access
]
class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
seed: The graph-level random seed.
"""
def
|
(self, name, read_only_collections=True):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, and distribution
strategy stack from the current context or graph.
Args:
name: the name of the function.
read_only_collections: whether to not write function graph collections
back to default graph. Defaults to True.
"""
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.structured_outputs = None
self._read_only_collections = read_only_collections
self._weak_variables = []
self.outer_graph = ops.get_default_graph()
self.captures = collections.OrderedDict()
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on inheriting
# this stack from the default graph even in eager mode. Maybe it should be
# part of the eager context? This would also allow us to remove a
# get_default_graph() call from the function cache lookup.
self._distribution_strategy_stack = list(graph._distribution_strategy_stack)
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
# so that the same trace of the Python function may be placed on several
# different devices and saved functions may be placed on new devices when
# restored.
if context.executing_eagerly():
self.seed = context.global_seed()
self._xla_compile = (context.context().device_spec.device_type == "TPU")
if self._distribution_strategy_stack or self._xla_compile:
self._add_device_to_stack(context.context().device_name)
else:
self.seed = graph.seed
self._xla_compile = getattr(graph, "_xla_compile", False)
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy()
if (self._distribution_strategy_stack
or self._xla_compile
or device_stack_has_callable(graph._device_function_stack)):
# Hard-code devices from device functions in the function body
self._device_function_stack = graph._device_function_stack.copy()
if not self._read_only_collections:
self._collections = graph._collections
else:
for collection_name in graph.get_all_collection_keys():
if collection_name not in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in WHITELIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
self._variable_creator_stack = graph._variable_creator_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
self._graph_key = graph._graph_key
# pylint: enable=protected-access
@property
def variables(self):
"""A list of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Yields:
Strong references to variables accessed by this FuncGraph.
"""
for weak_v in self._weak_variables:
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
yield v
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def create_op(
self,
op_type,
inputs,
dtypes,
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: A list of `DType` objects that will be the types of the tensors
that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
for i, inp in enumerate(inputs):
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
inputs[i] = inp
return super(FuncGraph, self).create_op(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_device=compute_device)
def capture(self, tensor, name=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
Returns:
Tensor from this FuncGraph.
"""
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
return self._capture_helper(tensor, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name):
captured_tensor = self.captures.get(tensor, None)
if captured_tensor is None:
captured_tensor = _create_substitute_placeholder(tensor, name=name,
dtype=tensor.dtype)
self.captures[tensor] = captured_tensor
self.inputs.append(captured_tensor)
tape.record_operation("captured_value", [captured_tensor], [tensor],
lambda x: [x])
return captured_tensor
@property
def external_captures(self):
"""External tensors captured by this function."""
return list(self.captures.keys())
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return list(self.captures.values())
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
autograph=False,
add_control_dependencies=True,
arg_names=None,
op_return_value=None):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwargs: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwargs` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
func_graph: Optional. An instance of FuncGraph. If provided, we will use
this graph else a new one is built and returned.
autograph: whether to use autograph to compile `python_func`.
See https://www.tensorflow.org/guide/autograph for more information.
add_control_dependencies: If True, automatically adds control dependencies
to ensure program order matches execution order and stateful ops always
execute.
arg_names: Optional list of argument names, used to give input placeholders
recognizable names.
op_return_value: Optional. A Tensor. If set and `python_func` returns
Operations, those return values will be replaced with this value. If not
set, returning an Operation triggers an error.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None` nor a
`Tensor`.
"""
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
control_manager = AutomaticControlDependencies
else:
control_manager = ops.NullContextmanager
with func_graph.as_default(), control_manager() as a:
current_scope = variable_scope.get_variable_scope()
default_use_recource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None:
args = signature
kwargs = {}
# Creates and names placeholders for all arguments.
func_args = _get_defun_inputs_from_args(args, arg_names)
func_kwargs = _get_defun_inputs_from_kwargs(kwargs)
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, nest.flatten(func_args))
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, nest.flatten(func_kwargs))
def convert(x):
"""Converts a function output to a Tensor."""
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# this won't work if x needs to be captured (i.e. if python_func returns
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_indexed_slices(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.contrib.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
if add_control_dependencies:
x = a.mark_as_return(x)
return x
this_tape = tape.push_new_tape()
try:
if autograph:
from tensorflow.python import autograph # pylint: disable=g-import-not-at-top
_, original_func = tf_decorator.unwrap(python_func)
def wrapper(*args, **kwargs):
return autograph.converted_call(
original_func, None,
autograph.ConversionOptions(
verbose=autograph.Verbosity.BRIEF,
recursive=True,
strip_decorators=(def_function.function,),
optional_features=(),
), *args, **kwargs)
# Wrapping around a decorator allows checks like tf_inspect.getargspec
# to be accurate.
converted_func = tf_decorator.make_decorator(original_func, wrapper)
tf_decorator.rewrap(python_func, original_func, converted_func)
func_outputs = python_func(*func_args, **func_kwargs)
# invariant: `func_outputs` contains only Tensors, IndexedSlices,
# SparseTensors, TensorArrays and `None`s.
func_outputs = nest.map_structure(convert, func_outputs)
check_mutation(func_args_before, func_args)
check_mutation(func_kwargs_before, func_kwargs)
finally:
tape.pop_tape(this_tape)
current_scope.set_use_resource(default_use_recource)
# Variables in `func_args`, `func_kwargs` should be explicit inputs
# to the function, not captured inputs.
tape_variables = this_tape.watched_variables()
arg_variables = set()
inputs = []
for arg in nest.flatten(func_args) + nest.flatten(func_kwargs):
if isinstance(arg, resource_variable_ops.ResourceVariable):
# Even if an argument variable was not used in the function, we've
# already manually captured the resource Tensor when creating argument
# placeholders.
resource_placeholder = func_graph.captures.pop(arg.handle)
arg_variables.add(arg)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in tape_variables if v not in arg_variables]
func_graph.inputs = inputs + list(func_graph.captures.values())
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
# Register any other functions defined in the graph.
with ops.init_scope():
if context.executing_eagerly():
for f in func_graph._functions.values(): # pylint: disable=protected-access
# TODO(ashankar): What about the gradient registry?
context.add_function(f._c_func.func) # pylint: disable=protected-access
return func_graph
def maybe_captured(tensor):
"""If t is a captured value placeholder, returns the original captured value.
Args:
tensor: Tensor.
Returns:
A tensor, potentially from a different Graph/FuncGraph.
"""
if (not isinstance(tensor, ops.EagerTensor) and
tensor.op.graph.building_function and tensor.op.type == "Placeholder"):
for input_t, placeholder_t in tensor.op.graph.captures.items():
if tensor == placeholder_t:
return maybe_captured(input_t)
# pylint: enable=protected-access
return tensor
def device_stack_has_callable(device_stack):
"""Checks whether a device stack contains a callable."""
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2):
"""Check if two list of arguments are exactly the same."""
errmsg = ("Function to be traced should not modify structure of input "
"arguments. Check if your function has list and dictionary "
"operations that alter input arguments, "
"such as `list.pop`, `list.append`")
try:
nest.assert_same_structure(n1, n2)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1), nest.flatten(n2)):
if arg1 is not arg2:
raise ValueError(errmsg)
def flatten(sequence):
"""Like `nest.flatten` but also unpacks other Tensor-like objects.
Flattens non-tensor objects into their constituent tensors.
Args:
sequence: A nested structure of Tensors, IndexedSlices, SparseTensors and
TensorArrays.
Returns:
A list of tensors.
"""
# TODO(akshayka): Support `SparseTensor` in a similar fashion.
flat_sequence = nest.flatten(sequence)
outputs = []
for item in flat_sequence:
if isinstance(item, ops.IndexedSlices):
if item.dense_shape is not None:
outputs.extend([item.values, item.indices, item.dense_shape])
else:
outputs.extend([item.values, item.indices])
elif isinstance(item, sparse_tensor.SparseTensor):
outputs.extend([item.indices, item.values, item.dense_shape])
elif isinstance(item, tensor_array_ops.TensorArray):
outputs.append(item.flow)
else:
outputs.append(item)
return outputs
def pack_sequence_as(structure, flat_sequence):
"""Like `nest.pack_sequence_as` but also packs other Tensor-like objects.
Args:
structure: The structure to pack into. May contain Tensors, IndexedSlices,
TensorArrays or SparseTensors.
flat_sequence: An iterable containing tensors.
Returns:
A nested structure.
Raises:
AssertionError if `structure` and `flat_sequence` are not compatible.
"""
flattened_structure = nest.flatten(structure)
flat_sequence_with_slices_and_tas = []
index = 0
for t in flattened_structure:
if isinstance(t, ops.IndexedSlices):
if t.dense_shape is not None:
flat_sequence_with_slices_and_tas.append(
ops.IndexedSlices(*flat_sequence[index:index + 3]))
index += 3
else:
flat_sequence_with_slices_and_tas.append(
ops.IndexedSlices(*flat_sequence[index:index + 2]))
index += 2
elif isinstance(t, sparse_tensor.SparseTensor):
flat_sequence_with_slices_and_tas.append(
sparse_tensor.SparseTensor(*flat_sequence[index:index + 3]))
index += 3
elif isinstance(t, tensor_array_ops.TensorArray):
flow = flat_sequence[index]
ta = tensor_array_ops.build_ta_with_new_flow(t, flow)
flat_sequence_with_slices_and_tas.append(ta)
index += 1
else:
flat_sequence_with_slices_and_tas.append(flat_sequence[index])
index += 1
assert len(flattened_structure) == len(flat_sequence_with_slices_and_tas)
return nest.pack_sequence_as(structure, flat_sequence_with_slices_and_tas)
def _create_substitute_placeholder(value, name=None, dtype=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
custom_gradient.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names):
"""Maps Python function positional args to graph-construction inputs."""
return _get_defun_inputs(args, names, structure=args)
def _get_defun_inputs(flat_args, names, structure):
"""Maps python function args to graph-construction inputs.
Args:
flat_args: A flat list of user-specified arguments.
names: A list of strings with user-specified argument names, same length as
`flat_args`. May be `None`, in which case a generic name is used.
structure: The original argument list or dictionary.
Returns:
Placeholders with the same structure as `structure`.
"""
func_graph = ops.get_default_graph()
function_inputs = []
if names is None:
names = [None] * len(flat_args)
for arg_value, name in zip(flat_args, names):
for arg in nest.flatten(arg_value):
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
if isinstance(arg, tensor_spec.TensorSpec) and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder = graph_placeholder(
arg.dtype, arg.shape,
name=requested_name)
if name is not None:
# Record the requested/user-specified name in case it's different than
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
elif isinstance(arg, resource_variable_ops.ResourceVariable):
# Capture arg variables to create placeholders for them. These will be
# removed as captures after the function is traced (since otherwise we'd
# just add it back with a new placeholder when the variable was
# referenced).
placeholder = func_graph.capture(arg.handle, name=name)
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
function_inputs.append(arg)
else:
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs)
def _get_defun_inputs_from_kwargs(kwargs):
"""Maps Python function keyword args to graph-construction inputs."""
if kwargs:
names, flat_args = zip(*sorted(kwargs.items()))
else:
names = []
flat_args = []
return _get_defun_inputs(flat_args, names, structure=kwargs)
|
__init__
|
v1beta1_stateful_set_status.py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
from aiokubernetes.models.v1beta1_stateful_set_condition import V1beta1StatefulSetCondition # noqa: F401,E501
class V1beta1StatefulSetStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'collision_count': 'int',
'conditions': 'list[V1beta1StatefulSetCondition]',
'current_replicas': 'int',
'current_revision': 'str',
'observed_generation': 'int',
'ready_replicas': 'int',
'replicas': 'int',
'update_revision': 'str',
'updated_replicas': 'int'
}
attribute_map = {
'collision_count': 'collisionCount',
'conditions': 'conditions',
'current_replicas': 'currentReplicas',
'current_revision': 'currentRevision',
'observed_generation': 'observedGeneration',
'ready_replicas': 'readyReplicas',
'replicas': 'replicas',
'update_revision': 'updateRevision',
'updated_replicas': 'updatedReplicas'
}
def __init__(self, collision_count=None, conditions=None, current_replicas=None, current_revision=None, observed_generation=None, ready_replicas=None, replicas=None, update_revision=None, updated_replicas=None): # noqa: E501
"""V1beta1StatefulSetStatus - a model defined in Swagger""" # noqa: E501
self._collision_count = None
self._conditions = None
self._current_replicas = None
self._current_revision = None
self._observed_generation = None
self._ready_replicas = None
self._replicas = None
self._update_revision = None
self._updated_replicas = None
self.discriminator = None
if collision_count is not None:
self.collision_count = collision_count
if conditions is not None:
self.conditions = conditions
if current_replicas is not None:
self.current_replicas = current_replicas
if current_revision is not None:
self.current_revision = current_revision
if observed_generation is not None:
self.observed_generation = observed_generation
if ready_replicas is not None:
self.ready_replicas = ready_replicas
self.replicas = replicas
if update_revision is not None:
self.update_revision = update_revision
if updated_replicas is not None:
self.updated_replicas = updated_replicas
@property
def collision_count(self):
"""Gets the collision_count of this V1beta1StatefulSetStatus. # noqa: E501
collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
:return: The collision_count of this V1beta1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._collision_count
@collision_count.setter
def collision_count(self, collision_count):
"""Sets the collision_count of this V1beta1StatefulSetStatus.
collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
:param collision_count: The collision_count of this V1beta1StatefulSetStatus. # noqa: E501
:type: int
"""
self._collision_count = collision_count
@property
def conditions(self):
"""Gets the conditions of this V1beta1StatefulSetStatus. # noqa: E501
Represents the latest available observations of a statefulset's current state. # noqa: E501
:return: The conditions of this V1beta1StatefulSetStatus. # noqa: E501
:rtype: list[V1beta1StatefulSetCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1beta1StatefulSetStatus.
Represents the latest available observations of a statefulset's current state. # noqa: E501
:param conditions: The conditions of this V1beta1StatefulSetStatus. # noqa: E501
:type: list[V1beta1StatefulSetCondition]
"""
self._conditions = conditions
|
@property
def current_replicas(self):
"""Gets the current_replicas of this V1beta1StatefulSetStatus. # noqa: E501
currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision. # noqa: E501
:return: The current_replicas of this V1beta1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._current_replicas
@current_replicas.setter
def current_replicas(self, current_replicas):
"""Sets the current_replicas of this V1beta1StatefulSetStatus.
currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision. # noqa: E501
:param current_replicas: The current_replicas of this V1beta1StatefulSetStatus. # noqa: E501
:type: int
"""
self._current_replicas = current_replicas
@property
def current_revision(self):
"""Gets the current_revision of this V1beta1StatefulSetStatus. # noqa: E501
currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). # noqa: E501
:return: The current_revision of this V1beta1StatefulSetStatus. # noqa: E501
:rtype: str
"""
return self._current_revision
@current_revision.setter
def current_revision(self, current_revision):
"""Sets the current_revision of this V1beta1StatefulSetStatus.
currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). # noqa: E501
:param current_revision: The current_revision of this V1beta1StatefulSetStatus. # noqa: E501
:type: str
"""
self._current_revision = current_revision
@property
def observed_generation(self):
"""Gets the observed_generation of this V1beta1StatefulSetStatus. # noqa: E501
observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server. # noqa: E501
:return: The observed_generation of this V1beta1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this V1beta1StatefulSetStatus.
observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server. # noqa: E501
:param observed_generation: The observed_generation of this V1beta1StatefulSetStatus. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
@property
def ready_replicas(self):
"""Gets the ready_replicas of this V1beta1StatefulSetStatus. # noqa: E501
readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. # noqa: E501
:return: The ready_replicas of this V1beta1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._ready_replicas
@ready_replicas.setter
def ready_replicas(self, ready_replicas):
"""Sets the ready_replicas of this V1beta1StatefulSetStatus.
readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. # noqa: E501
:param ready_replicas: The ready_replicas of this V1beta1StatefulSetStatus. # noqa: E501
:type: int
"""
self._ready_replicas = ready_replicas
@property
def replicas(self):
"""Gets the replicas of this V1beta1StatefulSetStatus. # noqa: E501
replicas is the number of Pods created by the StatefulSet controller. # noqa: E501
:return: The replicas of this V1beta1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1beta1StatefulSetStatus.
replicas is the number of Pods created by the StatefulSet controller. # noqa: E501
:param replicas: The replicas of this V1beta1StatefulSetStatus. # noqa: E501
:type: int
"""
if replicas is None:
raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
self._replicas = replicas
@property
def update_revision(self):
"""Gets the update_revision of this V1beta1StatefulSetStatus. # noqa: E501
updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) # noqa: E501
:return: The update_revision of this V1beta1StatefulSetStatus. # noqa: E501
:rtype: str
"""
return self._update_revision
@update_revision.setter
def update_revision(self, update_revision):
"""Sets the update_revision of this V1beta1StatefulSetStatus.
updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) # noqa: E501
:param update_revision: The update_revision of this V1beta1StatefulSetStatus. # noqa: E501
:type: str
"""
self._update_revision = update_revision
@property
def updated_replicas(self):
"""Gets the updated_replicas of this V1beta1StatefulSetStatus. # noqa: E501
updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision. # noqa: E501
:return: The updated_replicas of this V1beta1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._updated_replicas
@updated_replicas.setter
def updated_replicas(self, updated_replicas):
"""Sets the updated_replicas of this V1beta1StatefulSetStatus.
updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision. # noqa: E501
:param updated_replicas: The updated_replicas of this V1beta1StatefulSetStatus. # noqa: E501
:type: int
"""
self._updated_replicas = updated_replicas
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in self.swagger_types.items():
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1StatefulSetStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| |
tests1.rs
|
// tests1.rs
// Tests are important to ensure that your code does what you think it should do.
// Tests can be run on this file with the following command:
// rustlings run tests1
// This test has a problem with it -- make the test compile! Make the test
// pass! Make the test fail! Execute `rustlings hint tests1` for hints :)
// I AM NOT DONE
#[cfg(test)]
mod tests {
#[test]
fn you_can_assert() {
assert!();
}
|
}
|
|
win_handler.rs
|
// Copyright 2019 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The implementation of the WinHandler trait (druid-shell integration).
use std::any::{Any, TypeId};
use std::cell::RefCell;
use std::collections::{HashMap, VecDeque};
use std::rc::Rc;
use crate::kurbo::{Rect, Size};
use crate::piet::Piet;
use crate::shell::{Application, IdleToken, MouseEvent, Scale, WinHandler, WindowHandle};
use crate::app_delegate::{AppDelegate, DelegateCtx};
use crate::core::CommandQueue;
use crate::ext_event::ExtEventHost;
use crate::menu::ContextMenu;
use crate::window::Window;
use crate::{
Command, Data, Env, Event, InternalEvent, KeyEvent, MenuDesc, Target, TimerToken, WindowDesc,
WindowId,
};
use crate::command::sys as sys_cmd;
pub(crate) const RUN_COMMANDS_TOKEN: IdleToken = IdleToken::new(1);
/// A token we are called back with if an external event was submitted.
pub(crate) const EXT_EVENT_IDLE_TOKEN: IdleToken = IdleToken::new(2);
/// The struct implements the druid-shell `WinHandler` trait.
///
/// One `DruidHandler` exists per window.
///
/// This is something of an internal detail and possibly we don't want to surface
/// it publicly.
pub struct DruidHandler<T> {
/// The shared app state.
app_state: AppState<T>,
/// The id for the current window.
window_id: WindowId,
}
/// The top level event handler.
///
/// This corresponds to the `AppHandler` trait in druid-shell, which is only
/// used to handle events that are not associated with a window.
///
/// Currently, this means only menu items on macOS when no window is open.
pub(crate) struct AppHandler<T> {
app_state: AppState<T>,
}
/// State shared by all windows in the UI.
#[derive(Clone)]
pub(crate) struct AppState<T> {
inner: Rc<RefCell<Inner<T>>>,
}
struct Inner<T> {
app: Application,
delegate: Option<Box<dyn AppDelegate<T>>>,
command_queue: CommandQueue,
ext_event_host: ExtEventHost,
windows: Windows<T>,
/// the application-level menu, only set on macos and only if there
/// are no open windows.
root_menu: Option<MenuDesc<T>>,
pub(crate) env: Env,
pub(crate) data: T,
}
/// All active windows.
struct Windows<T> {
pending: HashMap<WindowId, WindowDesc<T>>,
windows: HashMap<WindowId, Window<T>>,
}
impl<T> Windows<T> {
fn connect(&mut self, id: WindowId, handle: WindowHandle) {
if let Some(pending) = self.pending.remove(&id) {
let win = Window::new(id, handle, pending);
assert!(self.windows.insert(id, win).is_none(), "duplicate window");
} else {
log::error!("no window for connecting handle {:?}", id);
}
}
fn add(&mut self, id: WindowId, win: WindowDesc<T>) {
assert!(self.pending.insert(id, win).is_none(), "duplicate pending");
}
fn remove(&mut self, id: WindowId) -> Option<Window<T>> {
self.windows.remove(&id)
}
fn iter_mut(&mut self) -> impl Iterator<Item = &'_ mut Window<T>> {
self.windows.values_mut()
}
fn get(&self, id: WindowId) -> Option<&Window<T>> {
self.windows.get(&id)
}
fn get_mut(&mut self, id: WindowId) -> Option<&mut Window<T>> {
self.windows.get_mut(&id)
}
fn count(&self) -> usize {
self.windows.len() + self.pending.len()
}
}
impl<T> AppHandler<T> {
pub(crate) fn new(app_state: AppState<T>) -> Self {
Self { app_state }
}
}
impl<T> AppState<T> {
pub(crate) fn new(
app: Application,
data: T,
env: Env,
delegate: Option<Box<dyn AppDelegate<T>>>,
ext_event_host: ExtEventHost,
) -> Self {
let inner = Rc::new(RefCell::new(Inner {
app,
delegate,
command_queue: VecDeque::new(),
root_menu: None,
ext_event_host,
data,
env,
windows: Windows::default(),
}));
AppState { inner }
}
pub(crate) fn app(&self) -> Application {
self.inner.borrow().app.clone()
}
}
impl<T: Data> Inner<T> {
fn get_menu_cmd(&self, window_id: Option<WindowId>, cmd_id: u32) -> Option<Command> {
match window_id {
Some(id) => self.windows.get(id).and_then(|w| w.get_menu_cmd(cmd_id)),
None => self
.root_menu
.as_ref()
.and_then(|m| m.command_for_id(cmd_id)),
}
}
fn append_command(&mut self, target: Target, cmd: Command) {
self.command_queue.push_back((target, cmd));
}
/// A helper fn for setting up the `DelegateCtx`. Takes a closure with
/// an arbitrary return type `R`, and returns `Some(R)` if an `AppDelegate`
/// is configured.
fn with_delegate<R, F>(&mut self, f: F) -> Option<R>
where
F: FnOnce(&mut Box<dyn AppDelegate<T>>, &mut T, &Env, &mut DelegateCtx) -> R,
{
let Inner {
ref mut delegate,
ref mut command_queue,
ref mut data,
ref env,
..
} = self;
let mut ctx = DelegateCtx {
command_queue,
app_data_type: TypeId::of::<T>(),
};
if let Some(delegate) = delegate {
Some(f(delegate, data, env, &mut ctx))
} else {
None
}
}
fn delegate_event(&mut self, id: WindowId, event: Event) -> Option<Event> {
if self.delegate.is_some() {
self.with_delegate(|del, data, env, ctx| del.event(ctx, id, event, data, env))
.unwrap()
} else {
Some(event)
}
}
fn delegate_cmd(&mut self, target: Target, cmd: &Command) -> bool {
self.with_delegate(|del, data, env, ctx| del.command(ctx, target, cmd, data, env))
.unwrap_or(true)
}
fn connect(&mut self, id: WindowId, handle: WindowHandle) {
self.windows.connect(id, handle);
// If the external event host has no handle, it cannot wake us
// when an event arrives.
if self.ext_event_host.handle_window_id.is_none() {
self.set_ext_event_idle_handler(id);
}
self.with_delegate(|del, data, env, ctx| del.window_added(id, data, env, ctx));
}
/// Called after this window has been closed by the platform.
///
/// We clean up resources and notifiy the delegate, if necessary.
fn remove_window(&mut self, window_id: WindowId) {
self.with_delegate(|del, data, env, ctx| del.window_removed(window_id, data, env, ctx));
// when closing the last window:
if let Some(mut win) = self.windows.remove(window_id) {
if self.windows.windows.is_empty() {
// on mac we need to keep the menu around
self.root_menu = win.menu.take();
// If there are even no pending windows, we quit the run loop.
if self.windows.count() == 0 {
#[cfg(any(target_os = "windows", feature = "x11"))]
self.app.quit();
}
}
}
// if we are closing the window that is currently responsible for
// waking us when external events arrive, we want to pass that responsibility
// to another window.
if self.ext_event_host.handle_window_id == Some(window_id) {
self.ext_event_host.handle_window_id = None;
// find any other live window
let win_id = self.windows.windows.keys().find(|k| *k != &window_id);
if let Some(any_other_window) = win_id.cloned() {
self.set_ext_event_idle_handler(any_other_window);
}
}
}
/// Set the idle handle that will be used to wake us when external events arrive.
fn set_ext_event_idle_handler(&mut self, id: WindowId) {
if let Some(mut idle) = self
.windows
.get_mut(id)
.and_then(|win| win.handle.get_idle_handle())
{
if self.ext_event_host.has_pending_items() {
idle.schedule_idle(EXT_EVENT_IDLE_TOKEN);
}
self.ext_event_host.set_idle(idle, id);
}
}
/// triggered by a menu item or other command.
///
/// This doesn't close the window; it calls the close method on the platform
/// window handle; the platform should close the window, and then call
/// our handlers `destroy()` method, at which point we can do our cleanup.
fn request_close_window(&mut self, window_id: WindowId) {
if let Some(win) = self.windows.get_mut(window_id) {
win.handle.close();
}
}
/// Requests the platform to close all windows.
fn request_close_all_windows(&mut self) {
for win in self.windows.iter_mut() {
win.handle.close();
}
}
fn show_window(&mut self, id: WindowId) {
if let Some(win) = self.windows.get_mut(id) {
win.handle.bring_to_front_and_focus();
}
}
/// Returns `true` if an animation frame was requested.
fn paint(&mut self, window_id: WindowId, piet: &mut Piet, rect: Rect) -> bool {
if let Some(win) = self.windows.get_mut(window_id) {
win.do_paint(piet, rect, &mut self.command_queue, &self.data, &self.env);
if win.wants_animation_frame() {
win.handle.invalidate();
true
} else {
false
}
} else {
false
}
}
fn dispatch_cmd(&mut self, target: Target, cmd: Command) {
if !self.delegate_cmd(target, &cmd) {
return;
}
match target {
Target::Window(id) => {
// first handle special window-level events
if cmd.is(sys_cmd::SET_MENU) {
return self.set_menu(id, &cmd);
}
if cmd.is(sys_cmd::SHOW_CONTEXT_MENU) {
return self.show_context_menu(id, &cmd);
}
if let Some(w) = self.windows.get_mut(id) {
let event = Event::Command(cmd);
w.event(&mut self.command_queue, event, &mut self.data, &self.env);
}
}
// in this case we send it to every window that might contain
// this widget, breaking if the event is handled.
Target::Widget(id) => {
for w in self.windows.iter_mut().filter(|w| w.may_contain_widget(id)) {
let event =
Event::Internal(InternalEvent::TargetedCommand(id.into(), cmd.clone()));
if w.event(&mut self.command_queue, event, &mut self.data, &self.env) {
break;
}
}
}
Target::Global => {
for w in self.windows.iter_mut() {
let event = Event::Command(cmd.clone());
if w.event(&mut self.command_queue, event, &mut self.data, &self.env) {
break;
}
}
}
}
}
fn do_window_event(&mut self, source_id: WindowId, event: Event) -> bool {
match event {
Event::Command(..) | Event::Internal(InternalEvent::TargetedCommand(..)) => {
panic!("commands should be dispatched via dispatch_cmd");
}
_ => (),
}
// if the event was swallowed by the delegate we consider it handled?
let event = match self.delegate_event(source_id, event) {
Some(event) => event,
None => return true,
};
if let Some(win) = self.windows.get_mut(source_id) {
win.event(&mut self.command_queue, event, &mut self.data, &self.env)
} else {
false
}
}
fn set_menu(&mut self, window_id: WindowId, cmd: &Command) {
if let Some(win) = self.windows.get_mut(window_id) {
match cmd
.get_unchecked(sys_cmd::SET_MENU)
.downcast_ref::<MenuDesc<T>>()
{
Some(menu) => win.set_menu(menu.clone(), &self.data, &self.env),
None => panic!(
"{} command must carry a MenuDesc<application state>.",
sys_cmd::SET_MENU
),
}
}
}
fn show_context_menu(&mut self, window_id: WindowId, cmd: &Command) {
if let Some(win) = self.windows.get_mut(window_id) {
match cmd
.get_unchecked(sys_cmd::SHOW_CONTEXT_MENU)
.downcast_ref::<ContextMenu<T>>()
{
Some(ContextMenu { menu, location }) => {
win.show_context_menu(menu.to_owned(), *location, &self.data, &self.env)
}
None => panic!(
"{} command must carry a ContextMenu<application state>.",
sys_cmd::SHOW_CONTEXT_MENU
),
}
}
}
fn do_update(&mut self) {
// we send `update` to all windows, not just the active one:
for window in self.windows.iter_mut() {
window.update(&mut self.command_queue, &self.data, &self.env);
}
self.invalidate_and_finalize();
}
/// invalidate any window handles that need it.
///
/// This should always be called at the end of an event update cycle,
/// including for lifecycle events.
fn invalidate_and_finalize(&mut self) {
for win in self.windows.iter_mut() {
win.invalidate_and_finalize();
}
}
#[cfg(target_os = "macos")]
fn window_got_focus(&mut self, window_id: WindowId) {
if let Some(win) = self.windows.get_mut(window_id) {
win.macos_update_app_menu(&self.data, &self.env)
}
}
#[cfg(not(target_os = "macos"))]
fn window_got_focus(&mut self, _: WindowId) {}
}
impl<T: Data> DruidHandler<T> {
/// Note: the root widget doesn't go in here, because it gets added to the
/// app state.
pub(crate) fn new_shared(app_state: AppState<T>, window_id: WindowId) -> DruidHandler<T> {
DruidHandler {
app_state,
window_id,
}
}
}
impl<T: Data> AppState<T> {
pub(crate) fn data(&self) -> T {
self.inner.borrow().data.clone()
}
pub(crate) fn env(&self) -> Env {
self.inner.borrow().env.clone()
}
pub(crate) fn add_window(&self, id: WindowId, window: WindowDesc<T>) {
self.inner.borrow_mut().windows.add(id, window);
}
fn connect_window(&mut self, window_id: WindowId, handle: WindowHandle) {
self.inner.borrow_mut().connect(window_id, handle)
}
fn remove_window(&mut self, window_id: WindowId) {
self.inner.borrow_mut().remove_window(window_id)
}
fn window_got_focus(&mut self, window_id: WindowId) {
self.inner.borrow_mut().window_got_focus(window_id)
}
/// Send an event to the widget hierarchy.
///
/// Returns `true` if the event produced an action.
///
/// This is principally because in certain cases (such as keydown on Windows)
/// the OS needs to know if an event was handled.
fn do_window_event(&mut self, event: Event, window_id: WindowId) -> bool {
let result = self.inner.borrow_mut().do_window_event(window_id, event);
self.process_commands();
self.inner.borrow_mut().do_update();
result
}
fn paint_window(&mut self, window_id: WindowId, piet: &mut Piet, rect: Rect) -> bool {
self.inner.borrow_mut().paint(window_id, piet, rect)
}
fn idle(&mut self, token: IdleToken) {
match token {
RUN_COMMANDS_TOKEN => {
self.process_commands();
self.inner.borrow_mut().invalidate_and_finalize();
}
EXT_EVENT_IDLE_TOKEN => {
self.process_ext_events();
self.process_commands();
self.inner.borrow_mut().do_update();
}
other => log::warn!("unexpected idle token {:?}", other),
}
}
fn process_commands(&mut self) {
loop {
let next_cmd = self.inner.borrow_mut().command_queue.pop_front();
match next_cmd {
Some((target, cmd)) => self.handle_cmd(target, cmd),
None => break,
}
}
}
fn process_ext_events(&mut self) {
loop {
let ext_cmd = self.inner.borrow_mut().ext_event_host.recv();
match ext_cmd {
Some((targ, cmd)) => self.handle_cmd(targ.unwrap_or(Target::Global), cmd),
None => break,
}
}
}
/// Handle a 'command' message from druid-shell. These map to an item
/// in an application, window, or context (right-click) menu.
///
/// If the menu is associated with a window (the general case) then
/// the `window_id` will be `Some(_)`, otherwise (such as if no window
/// is open but a menu exists, as on macOS) it will be `None`.
fn handle_system_cmd(&mut self, cmd_id: u32, window_id: Option<WindowId>) {
let cmd = self.inner.borrow().get_menu_cmd(window_id, cmd_id);
let target = window_id.map(Into::into).unwrap_or(Target::Global);
match cmd {
Some(cmd) => self.inner.borrow_mut().append_command(target, cmd),
None => log::warn!("No command for menu id {}", cmd_id),
}
self.process_commands();
self.inner.borrow_mut().do_update();
}
/// Handle a command. Top level commands (e.g. for creating and destroying
/// windows) have their logic here; other commands are passed to the window.
fn handle_cmd(&mut self, target: Target, cmd: Command) {
use Target as T;
match target {
// these are handled the same no matter where they come from
_ if cmd.is(sys_cmd::QUIT_APP) => self.quit(),
_ if cmd.is(sys_cmd::HIDE_APPLICATION) => self.hide_app(),
_ if cmd.is(sys_cmd::HIDE_OTHERS) => self.hide_others(),
_ if cmd.is(sys_cmd::NEW_WINDOW) => {
if let Err(e) = self.new_window(cmd) {
log::error!("failed to create window: '{}'", e);
}
}
_ if cmd.is(sys_cmd::CLOSE_ALL_WINDOWS) => self.request_close_all_windows(),
// these should come from a window
// FIXME: we need to be able to open a file without a window handle
T::Window(id) if cmd.is(sys_cmd::SHOW_OPEN_PANEL) => self.show_open_panel(cmd, id),
T::Window(id) if cmd.is(sys_cmd::SHOW_SAVE_PANEL) => self.show_save_panel(cmd, id),
T::Window(id) if cmd.is(sys_cmd::CLOSE_WINDOW) => self.request_close_window(id),
T::Window(id) if cmd.is(sys_cmd::SHOW_WINDOW) => self.show_window(id),
T::Window(id) if cmd.is(sys_cmd::PASTE) => self.do_paste(id),
_ if cmd.is(sys_cmd::CLOSE_WINDOW) => {
log::warn!("CLOSE_WINDOW command must target a window.")
}
_ if cmd.is(sys_cmd::SHOW_WINDOW) => {
log::warn!("SHOW_WINDOW command must target a window.")
}
_ => self.inner.borrow_mut().dispatch_cmd(target, cmd),
}
}
fn show_open_panel(&mut self, cmd: Command, window_id: WindowId) {
let options = cmd.get_unchecked(sys_cmd::SHOW_OPEN_PANEL).to_owned();
//FIXME: this is blocking; if we hold `borrow_mut` we are likely to cause
//a crash. as a workaround we take a clone of the window handle.
//it's less clear what the better solution would be.
let handle = self
.inner
.borrow_mut()
.windows
.get_mut(window_id)
|
let result = handle.and_then(|mut handle| handle.open_file_sync(options));
if let Some(info) = result {
let cmd = Command::new(sys_cmd::OPEN_FILE, info);
self.inner.borrow_mut().dispatch_cmd(window_id.into(), cmd);
}
}
fn show_save_panel(&mut self, cmd: Command, window_id: WindowId) {
let options = cmd.get_unchecked(sys_cmd::SHOW_SAVE_PANEL).to_owned();
let handle = self
.inner
.borrow_mut()
.windows
.get_mut(window_id)
.map(|w| w.handle.clone());
let result = handle.and_then(|mut handle| handle.save_as_sync(options));
if let Some(info) = result {
let cmd = Command::new(sys_cmd::SAVE_FILE, Some(info));
self.inner.borrow_mut().dispatch_cmd(window_id.into(), cmd);
}
}
fn new_window(&mut self, cmd: Command) -> Result<(), Box<dyn std::error::Error>> {
let desc = cmd.get_unchecked(sys_cmd::NEW_WINDOW);
// The NEW_WINDOW command is private and only druid can receive it by normal means,
// thus unwrapping can be considered safe and deserves a panic.
let desc = desc.take().unwrap().downcast::<WindowDesc<T>>().unwrap();
let window = desc.build_native(self)?;
window.show();
Ok(())
}
fn request_close_window(&mut self, id: WindowId) {
self.inner.borrow_mut().request_close_window(id);
}
fn request_close_all_windows(&mut self) {
self.inner.borrow_mut().request_close_all_windows();
}
fn show_window(&mut self, id: WindowId) {
self.inner.borrow_mut().show_window(id);
}
fn do_paste(&mut self, window_id: WindowId) {
let event = Event::Paste(self.inner.borrow().app.clipboard());
self.inner.borrow_mut().do_window_event(window_id, event);
}
fn quit(&self) {
self.inner.borrow().app.quit()
}
fn hide_app(&self) {
#[cfg(target_os = "macos")]
self.inner.borrow().app.hide()
}
fn hide_others(&mut self) {
#[cfg(target_os = "macos")]
self.inner.borrow().app.hide_others()
}
}
impl<T: Data> crate::shell::AppHandler for AppHandler<T> {
fn command(&mut self, id: u32) {
self.app_state.handle_system_cmd(id, None)
}
}
impl<T: Data> WinHandler for DruidHandler<T> {
fn connect(&mut self, handle: &WindowHandle) {
self.app_state
.connect_window(self.window_id, handle.clone());
let event = Event::WindowConnected;
self.app_state.do_window_event(event, self.window_id);
}
fn paint(&mut self, piet: &mut Piet, rect: Rect) -> bool {
self.app_state.paint_window(self.window_id, piet, rect)
}
fn size(&mut self, size: Size) {
let event = Event::WindowSize(size);
self.app_state.do_window_event(event, self.window_id);
}
fn scale(&mut self, _scale: Scale) {
// TODO: Do something with the scale
}
fn command(&mut self, id: u32) {
self.app_state.handle_system_cmd(id, Some(self.window_id));
}
fn mouse_down(&mut self, event: &MouseEvent) {
// TODO: double-click detection (or is this done in druid-shell?)
let event = Event::MouseDown(event.clone().into());
self.app_state.do_window_event(event, self.window_id);
}
fn mouse_up(&mut self, event: &MouseEvent) {
let event = Event::MouseUp(event.clone().into());
self.app_state.do_window_event(event, self.window_id);
}
fn mouse_move(&mut self, event: &MouseEvent) {
let event = Event::MouseMove(event.clone().into());
self.app_state.do_window_event(event, self.window_id);
}
fn mouse_leave(&mut self) {
self.app_state
.do_window_event(Event::Internal(InternalEvent::MouseLeave), self.window_id);
}
fn key_down(&mut self, event: KeyEvent) -> bool {
self.app_state
.do_window_event(Event::KeyDown(event), self.window_id)
}
fn key_up(&mut self, event: KeyEvent) {
self.app_state
.do_window_event(Event::KeyUp(event), self.window_id);
}
fn wheel(&mut self, event: &MouseEvent) {
self.app_state
.do_window_event(Event::Wheel(event.clone().into()), self.window_id);
}
fn zoom(&mut self, delta: f64) {
let event = Event::Zoom(delta);
self.app_state.do_window_event(event, self.window_id);
}
fn got_focus(&mut self) {
self.app_state.window_got_focus(self.window_id);
}
fn timer(&mut self, token: TimerToken) {
self.app_state
.do_window_event(Event::Timer(token), self.window_id);
}
fn idle(&mut self, token: IdleToken) {
self.app_state.idle(token);
}
fn as_any(&mut self) -> &mut dyn Any {
self
}
fn destroy(&mut self) {
self.app_state.remove_window(self.window_id);
}
}
impl<T> Default for Windows<T> {
fn default() -> Self {
Windows {
windows: HashMap::new(),
pending: HashMap::new(),
}
}
}
|
.map(|w| w.handle.clone());
|
valuation.reducer.ts
|
import { ImmutableState, PayloadAction, ValuationState } from 'app/app.state';
import { Valuation } from 'app/model/valuation';
import { ValuationHistory } from 'app/model/valuationHistory';
import { ValuationActions } from 'app/services/valuation/valuation.actions';
import * as Immutable from 'seamless-immutable';
const INITIAL_STATE = Immutable<ValuationState>({
type: '',
valuation: new Valuation(),
valuationHistory: new ValuationHistory()
});
/**
* Valuation reducer.
* @param state The valuation state.
* @param action The action type.
*/
export function ValuationReducer(state: ImmutableState<ValuationState> = INITIAL_STATE, action: PayloadAction): ImmutableState<ValuationState> {
switch (action.type) {
case ValuationActions.LIST_LOAD:
case ValuationActions.HISTORY_LOAD: {
return valuationLoad(state, action);
}
case ValuationActions.LIST_FAILED:
case ValuationActions.HISTORY_FAILED: {
return valuationFailed(state, action);
}
case ValuationActions.LIST_SUCCESS: {
return valuationListSuccess(state, action);
}
case ValuationActions.HISTORY_SUCCESS: {
return valuationHistorySuccess(state, action);
}
default: {
return state;
}
}
}
/**
* Returns new state after valuation list was successfully loaded.
* @param state The valuation state.
* @param action The payload action.
*/
function valuationListSuccess(state: ImmutableState<ValuationState>, action: PayloadAction): ImmutableState<ValuationState> {
return state.merge({ type: action.type, valuation: action.payload });
}
/**
* Returns new state after valuation history was successfully loaded.
* @param state The valuation state.
* @param action The payload action.
*/
function valuationHistorySuccess(state: ImmutableState<ValuationState>, action: PayloadAction): ImmutableState<ValuationState> {
return state.merge({ type: action.type, valuationHistory: action.payload });
}
/**
* Valuation load reducer.
* @param state The valuation state.
* @param action The payload action.
*/
function valuationLoad(state: ImmutableState<ValuationState>, action: PayloadAction): ImmutableState<ValuationState> {
return state.merge({ type: action.type });
}
/**
* Valuation failed reducer.
* @param state The valuation state.
* @param action The payload action.
*/
function
|
(state: ImmutableState<ValuationState>, action: PayloadAction): ImmutableState<ValuationState> {
return state.merge({ type: action.type });
}
|
valuationFailed
|
SegmentationSettings.js
|
import React, { useState, useEffect } from 'react';
import PropTypes from 'prop-types';
import { Range } from '@ohif/ui';
import './SegmentationSettings.css';
const SegmentationSettings = ({ configuration, onBack, onChange }) => {
const [state, setState] = useState({
renderFill: configuration.renderFill,
renderOutline: configuration.renderOutline,
shouldRenderInactiveLabelmaps: configuration.shouldRenderInactiveLabelmaps,
fillAlpha: configuration.fillAlpha,
outlineAlpha: configuration.outlineAlpha,
outlineWidth: configuration.outlineWidth,
fillAlphaInactive: configuration.fillAlphaInactive,
outlineAlphaInactive: configuration.outlineAlphaInactive
});
useEffect(() => {
onChange(state);
}, [state]);
const check = field => {
setState(state => ({ ...state, [field]: !state[field] }));
};
const save = (field, value) => {
setState(state => ({ ...state, [field]: value }));
};
const toFloat = value => parseFloat(value / 100).toFixed(2);
return (
<div className="dcmseg-segmentation-settings">
<div className="settings-title">
<h3>Segmentations Settings</h3>
<button className="return-button" onClick={onBack}>
Back
</button>
</div>
<div
className="settings-group"
style={{ marginBottom: state.renderFill ? 15 : 0 }}
>
<CustomCheck
label="Segment Fill"
checked={state.renderFill}
onChange={() => check('renderFill')}
/>
{state.renderFill && (
<CustomRange
label="Opacity"
step={1}
min={0}
max={100}
value={state.fillAlpha * 100}
onChange={event => save('fillAlpha', toFloat(event.target.value))}
showPercentage
/>
)}
</div>
<div
className="settings-group"
style={{ marginBottom: state.renderOutline ? 15 : 0 }}
>
<CustomCheck
label="Segment Outline"
checked={state.renderOutline}
onChange={() => check('renderOutline')}
/>
{state.renderOutline && (
<>
<CustomRange
value={state.outlineAlpha * 100}
label="Opacity"
showPercentage
step={1}
min={0}
max={100}
onChange={event => save('outlineAlpha', toFloat(event.target.value))}
/>
<CustomRange
value={state.outlineWidth}
label="Width"
showValue
step={1}
min={0}
max={5}
onChange={event => save('outlineWidth', parseInt(event.target.value))}
/>
</>
)}
</div>
{(state.renderFill || state.renderOutline) && (
<div
className="settings-group"
style={{ marginBottom: state.shouldRenderInactiveLabelmaps ? 15 : 0 }}
>
<CustomCheck
label="Render inactive segmentations"
checked={state.shouldRenderInactiveLabelmaps}
onChange={() => check('shouldRenderInactiveLabelmaps')}
/>
{state.shouldRenderInactiveLabelmaps && (
<>
{state.renderFill && (
<CustomRange
label="Fill Opacity"
showPercentage
step={1}
min={0}
|
/>
)}
{state.renderOutline && (
<CustomRange
label="Outline Opacity"
showPercentage
step={1}
min={0}
max={100}
value={state.outlineAlphaInactive * 100}
onChange={event => save('outlineAlphaInactive', toFloat(event.target.value))}
/>
)}
</>
)}
</div>
)}
</div>
);
};
const CustomCheck = ({ label, checked, onChange }) => {
return (
<div className="custom-check">
<label>
<span>{label}</span>
<input type="checkbox" checked={checked} onChange={onChange} />
</label>
</div>
);
};
const CustomRange = props => {
const { label, onChange } = props;
return (
<div className="range">
<label htmlFor="range">{label}</label>
<Range
{...props}
onChange={event => {
event.persist();
onChange(event);
}}
/>
</div>
);
};
SegmentationSettings.propTypes = {
configuration: PropTypes.shape({
renderFill: PropTypes.bool.isRequired,
renderOutline: PropTypes.bool.isRequired,
shouldRenderInactiveLabelmaps: PropTypes.bool.isRequired,
fillAlpha: PropTypes.oneOfType([PropTypes.string, PropTypes.number]).isRequired, /* TODO: why fillAlpha is string? */
outlineAlpha: PropTypes.oneOfType([PropTypes.string, PropTypes.number]).isRequired, /* TODO: why fillAlpha is string? */
outlineWidth: PropTypes.number.isRequired,
fillAlphaInactive: PropTypes.number.isRequired,
outlineAlphaInactive: PropTypes.number.isRequired,
}).isRequired,
onBack: PropTypes.func.isRequired,
onChange: PropTypes.func.isRequired,
};
export default SegmentationSettings;
|
max={100}
value={state.fillAlphaInactive * 100}
onChange={event => save('fillAlphaInactive', toFloat(event.target.value))}
|
config_creator.py
|
from collections import OrderedDict
import tldextract
import re
from . import helpers
from . import helpdesk_helper
from urllib.parse import urlparse
def extract_root_from_input(input_string):
# We cant parse the url since user might have not enter a proper link
# We assume that the string is already the proper root
if input_string.endswith('/'):
return input_string
# extracting substring before the first isolated / (not //)
domain = re.match(".+?([^/]/(?!/))",
input_string)
try:
url_parsed = urlparse(input_string)
# Removing unused parameters
url_parsed._replace(params='', query='', fragment='')
path_splited = url_parsed.path.split('/')
# Path is redirecting to a page
if ('html' in path_splited[-1]):
url_parsed = url_parsed._replace(path='/'.join(path_splited[: -1]))
# We are fine
else:
pass
return url_parsed.geturl() + '/'
except ValueError:
return domain.group() if domain else input_string
def to_docusaurus_config(config, urls=None):
if urls:
config["sitemap_urls"] = [
extract_root_from_input(urls[0]) + "sitemap.xml"]
config["sitemap_alternate_links"] = True
config["custom_settings"] = {"attributesForFaceting": ["language",
"version"]
}
config["selectors"]["lvl0"] = OrderedDict((
("selector",
"//*[contains(@class,'navGroups')]//*[contains(@class,'navListItemActive')]/preceding::h3[1]"),
("type", "xpath"),
("global", True),
("default_value", "Documentation")
))
config["selectors"]["lvl1"] = ".post h1"
config["selectors"]["lvl2"] = ".post h2"
config["selectors"]["lvl3"] = ".post h3"
config["selectors"]["lvl4"] = ".post h4"
config["selectors"]["lvl5"] = ".post h5"
config["selectors"]["text"] = ".post article p, .post article li"
config["selectors_exclude"] = [".hash-link"]
return config
def to_gitbook_config(config):
config["selectors"]["lvl0"] = ".markdown-section h1"
config["selectors"]["lvl1"] = ".markdown-section h2"
config["selectors"]["lvl2"] = ".markdown-section h3"
config["selectors"]["lvl3"] = ".markdown-section h4"
config["selectors"]["lvl4"] = ".markdown-section h4"
config["selectors"]["lvl5"] = ".markdown-section h5"
config["selectors"]["text"] = ".markdown-section p, .markdown-section li"
return config
def to_pkgdown_config(config, urls=None):
if urls:
root = extract_root_from_input(urls[0])
config["start_urls"] = [{
"url": root + "index.html",
"selectors_key": "homepage",
"tags": [
"homepage"
]
},
{
"url": root + "reference",
"selectors_key": "reference",
"tags": [
|
"reference"
]
},
{
"url": root + "articles",
"selectors_key": "articles",
"tags": [
"articles"
]
}]
config["sitemap_urls"] = [
root + "sitemap.xml"]
config["selectors"] = OrderedDict((
("homepage", OrderedDict((
("lvl0", OrderedDict((
("selector", ".contents h1"),
("default_value", "pkgdown Home page")
))),
("lvl1", ".contents h2"),
("lvl2", OrderedDict((
("selector", ".contents h3"),
("default_value", "Context")
))),
("lvl3", ".ref-arguments td, .ref-description"),
("text", ".contents p, .contents li, .contents .pre")
))),
("reference", OrderedDict((
("lvl0", ".contents h1"),
("lvl1", OrderedDict((
("selector", ".contents .name"),
("default_value", "Argument")
))),
("lvl2", OrderedDict((
("selector", ".ref-arguments th"),
("default_value", "Description")
))),
("lvl3", ".ref-arguments td, .ref-description"),
("text", ".contents p, .contents li")
))),
("articles", OrderedDict((
("lvl0", ".contents h1"),
("lvl1", ".contents .name"),
("lvl2", OrderedDict((
("selector", ".contents h2, .contents h3"),
("default_value", "Context")
))),
("text",
".contents p, .contents li")
))),
("default", OrderedDict((
("lvl1", ".contents h2"),
("lvl2", ".contents h3, .contents th"),
("lvl3", ".contents h4"),
("lvl4", ".contents h5"),
("text",
".contents p, .contents li, .usage, .template-article .contents .pre")
)))
))
config["selectors_exclude"] = [".dont-index"]
config["stop_urls"] = ["/reference/$",
"/reference/index.html",
"/articles/$",
"/articles/index.html"]
config["custom_settings"] = {
"separatorsToIndex": "_",
"attributesToRetrieve": ["hierarchy",
"content", "anchor", "url",
"url_without_anchor"]
}
config["min_indexed_level"] = 2
return config
def to_vuepress_config(config):
config["selectors"]["lvl0"] = OrderedDict((
("selector", "p.sidebar-heading.open"),
("global", True),
("default_value", "Documentation")
))
config["custom_settings"] = {"attributesForFaceting": ["lang"]
}
config["selectors"]["lvl1"] = ".content h1"
config["selectors"]["lvl2"] = ".content h2"
config["selectors"]["lvl3"] = ".content h3"
config["selectors"]["lvl4"] = ".content h4"
config["selectors"]["lvl5"] = ".content h5"
config["selectors"]["text"] = ".content p, .content li"
config["selectors"]["lang"] = OrderedDict((
("selector", "/html/@lang"),
("type", "xpath"),
("global", True),
("default_value", "en-US")
))
config["scrap_start_urls"] = False
config["strip_chars"] = " .,;:#"
return config
def to_larecipe_config(config, urls=None):
if urls:
config["sitemap_urls"] = [
extract_root_from_input(urls[0]) + "sitemap.xml"]
config["selectors"]["lvl0"] = OrderedDict((
("selector",
"//div[contains(@class, 'sidebar')]//li/a[text()=//div[contains(@class, 'article')]//h1[1]/text()]"),
("global", True),
("type", "xpath"),
("default_value", "Documentation")
))
config["selectors"]["lvl1"] = "div.article h1"
config["selectors"]["lvl2"] = "div.article h2"
config["selectors"]["lvl3"] = "div.article h3"
config["selectors"]["lvl4"] = "div.article h4"
config["selectors"]["lvl5"] = "div.article h5"
config["selectors"]["text"] = "div.article p, div.article li"
return config
def to_publii_config(config, urls=None):
if urls:
config["sitemap_urls"] = [
extract_root_from_input(urls[0]) + "sitemap.xml"]
config["selectors"]["lvl0"] = OrderedDict((
("selector", ".active-parent > span"),
("global", True),
("default_value", "Documentation")
))
config["selectors"]["lvl1"] = ".content h1"
config["selectors"]["lvl2"] = ".content h2"
config["selectors"]["lvl3"] = ".content h3"
config["selectors"]["lvl4"] = ".content h4"
config["selectors"]["lvl5"] = ".content h5"
config["selectors"]["text"] = ".content p, .content li"
config["only_content_level"] = True
return config
def to_jsdoc_config(config, urls=None):
config["stop_urls"] = ["\\.js\\.html",
"/index\\.html$"]
config["selectors"]["lvl0"] = OrderedDict((
("selector", "#main .page-title"),
("global", True),
("default_value", "Documentation")
))
config["selectors"]["lvl1"] = "#main h3"
config["selectors"]["lvl2"] = "#main h4"
config["selectors"]["lvl3"] = "#main h5"
config["selectors"]["lvl4"] = "#main h6, #main td.name"
del config["selectors"]["lvl5"]
config["selectors"]["text"] = "#main p, #main li"
config["selectors_exclude"] = [".signature",
".type-signature",
".details"]
return config
def create_config(u=None):
config = OrderedDict((
("index_name", ""),
("start_urls", []),
("stop_urls", []),
("selectors", OrderedDict((
("lvl0", "FIXME h1"),
("lvl1", "FIXME h2"),
("lvl2", "FIXME h3"),
("lvl3", "FIXME h4"),
("lvl4", "FIXME h5"),
("lvl5", "FIXME h6"),
("text", "FIXME p, FIXME li")
)))
))
if u is None:
u = helpers.get_user_value("start url: ")
urls = [u]
if helpdesk_helper.is_helpdesk_url(u):
cuid = helpdesk_helper.get_conversation_ID_from_url(u)
conversation = helpdesk_helper.get_conversation(cuid)
url_from_conversation = helpdesk_helper.get_start_url_from_conversation(
conversation)
urls = [url_from_conversation]
u = url_from_conversation
if helpdesk_helper.is_docusaurus_conversation(conversation):
config = to_docusaurus_config(config, urls)
elif helpdesk_helper.is_gitbook_conversation(conversation):
config = to_gitbook_config(config)
elif helpdesk_helper.is_pkgdown_conversation(conversation):
config = to_pkgdown_config(config, urls)
elif helpdesk_helper.is_vuepress_conversation(conversation):
config = to_vuepress_config(config)
elif helpdesk_helper.is_larecipe_conversation(conversation):
config = to_larecipe_config(config, urls)
elif helpdesk_helper.is_publii_conversation(conversation):
config = to_publii_config(config, urls)
elif helpdesk_helper.is_jsdoc_conversation(conversation):
config = to_jsdoc_config(config, urls)
config["conversation_id"] = [cuid]
if '.html' in u:
urls.append(u.rsplit('/', 1)[0])
# Use subdomain for github website https://<subdomain>.github.io/
config['index_name'] = tldextract.extract(
u).subdomain if tldextract.extract(
u).domain == 'github' else tldextract.extract(u).domain
if len(config['start_urls']) == 0:
config['start_urls'] = urls
user_index_name = helpers.get_user_value(
'index_name is \033[1;33m{}\033[0m [enter to confirm]: '.format(config[
"index_name"]))
if user_index_name != "":
config['index_name'] = user_index_name
print('index_name is now \033[1;33m{}\033[0m'.format(config[
"index_name"]))
return config
| |
SignOutPage.tsx
|
import React, { FC } from 'react'
import { useSnackbar } from 'notistack'
import { useHistory } from 'react-router-dom'
|
import { useMountEffect } from '../../hooks/utils/MountEffectHook'
import { useLocale } from '../../hooks/LocaleContextHook'
import { useUserContext } from '../../hooks/UserContextHook'
import { useUIPaths } from '../../hooks/UIPathHook'
import { useApi } from '../../hooks/ApiHook'
export const SignOutPage: FC = () => {
const l = useLocale()
const api = useApi()
const uiPaths = useUIPaths()
const { enqueueSnackbar } = useSnackbar()
const { reset: resetUser } = useUserContext()
const history = useHistory()
useMountEffect(() => {
resetUser()
void api
.signOut()
.then(() => {
enqueueSnackbar(l.snackbars.signOutComplete)
})
.finally(() => {
history.push(uiPaths.signInPath())
})
})
return <div></div>
}
| |
main.rs
|
use std::str::FromStr;
use std::env;
mod lib;
fn main() {
let mut numbers = Vec::new();
for arg in env::args().skip(1) {
numbers.push(u64::from_str(&arg)
.expect("error parsing argument"));
}
if numbers.len() == 0 {
eprintln!("Usage: gcd NUMBER ...");
std::process::exit(1);
}
|
}
println!("The greatest common divison of {:?} is {}",
numbers, d);
}
|
let mut d = numbers[0];
for m in &numbers[1..] {
d = lib::gcd(d, *m);
|
primes.rs
|
use bit_vec::BitVec;
use num::{PrimInt, FromPrimitive, Unsigned};
pub struct Sieve<T: PrimInt + Unsigned + FromPrimitive> {
/// is_prime only deals with odd numbers, so e.g. the 0th index is 3, etc.
prime_bits: BitVec,
primes: Vec<T>,
}
impl<T> Sieve<T>
where T: PrimInt + Unsigned + FromPrimitive
{
/// Creates a new prime sieve and finds all primes up to and including the given limit
pub fn sieve_to(limit: T) -> Sieve<T> {
let is_prime_size = limit.to_usize().unwrap() / 2 - 1;
// The prime_bits vector and the vector of primes which will make up the sieve
let mut prime_bits = BitVec::from_elem(is_prime_size, true);
let mut primes: Vec<T> = Vec::new();
primes.push(T::one() + T::one());
// Implement sieve of Eratosthenes
// Recall that the number represented by index k in is_prime is 2k+3
// Helpful identities:
// (2k+3)^2 = 4k^2 + 12k + 9 = 2(2k^2 + 6k + 3) + 3
// 2j+3 + 2(2k+3) = 2(j + 2k + 3) + 3
// The index of the current prime in the process
let mut idx = 0;
while 2 * idx * idx + 6 * idx + 3 < prime_bits.len() {
// The number at the current index is prime; cross off all its multiples
// Until I can use step_by, this will have to do
let mut i = 2 * idx * idx + 6 * idx + 3;
while i < prime_bits.len() {
prime_bits.set(i, false);
i += 2 * idx + 3;
}
// Find the next prime
idx += 1;
while idx < prime_bits.len() && !prime_bits[idx] {
idx += 1;
}
}
// Now, populate the primes vector
for i in 0..prime_bits.len() {
if prime_bits[i] {
// We are guaranteed not to have an error here, since
// 2i + 3 <= limit, which is a valid number of type T
primes.push(FromPrimitive::from_usize(2 * i + 3).unwrap());
}
}
Sieve {
prime_bits: prime_bits,
primes: primes,
}
}
/// Returns the nth prime, if available (otherwise returns `None`)
pub fn nth(&self, n: usize) -> Option<T> {
if let Some(&p) = self.primes.get(n - 1) {
Some(p)
} else {
None
}
}
/// Checks whether the given number is prime
pub fn is_prime(&self, n: T) -> bool {
// Helpful constants
let two = T::one() + T::one();
let three = two + T::one();
// If n is less than two, not prime
if n <= T::one() {
return false;
}
// If n is even and not two, not prime
if n % two == T::zero() {
return n == two;
}
// If n is within this sieve's range, just check directly
let n_index = (n - three) / two;
if n_index < FromPrimitive::from_usize(self.prime_bits.len()).unwrap() {
return self.prime_bits[n_index.to_usize().unwrap()];
}
// Otherwise, we'll actually have to do trial division
let mut iter = self.primes.iter();
let mut d = *iter.next().unwrap();
while d * d <= n {
if n % d == T::zero() {
return false;
}
if let Some(p) = iter.next() {
d = *p;
} else {
// Check all odd numbers if we run out of primes
d = d + two;
}
}
true
}
/// Counts the prime divisors of a number
pub fn count_divisors(&self, n: T) -> u32 {
// Try primes in sieve first, and then move on by counting odd numbers
// This is similar to the is_prime method, except we're counting them
let mut divisors = 1;
let mut n = n;
let mut iter = self.primes.iter();
let mut d = *iter.next().unwrap();
while n > T::one() {
if n % d == T::zero() {
// This is the power of the prime + 1
let mut choices = 2;
// Divide out all powers of this prime
n = n / d;
while n % d == T::zero() {
choices += 1;
n = n / d;
}
// Update number of divisors given the "choices"
divisors *= choices;
}
// Get next divisor
if let Some(p) = iter.next() {
d = *p;
} else {
d = d + T::one() + T::one();
}
}
divisors
}
/// Sums the prime divisors of a number
pub fn
|
(&self, n: T) -> T {
// This is just the count_divisors method, except we
// keep track of the sum instead of the number
// Use the fact that the sum of divisors function is
// multiplicative
let mut sum = T::one();
let mut n = n;
let mut iter = self.primes.iter();
let mut d = *iter.next().unwrap();
while n > T::one() {
if n % d == T::zero() {
// The sum of divisors of this prime power
let mut sum_power = T::one() + d;
// Divide out all powers of this prime
n = n / d;
while n % d == T::zero() {
sum_power = d * sum_power + T::one();
n = n / d;
}
// Update the sum of divisors with this multiplicative factor
sum = sum * sum_power;
}
// Get next divisor
if let Some(p) = iter.next() {
d = *p;
} else {
d = d + T::one() + T::one();
}
}
sum
}
/// Sums only proper divisors
pub fn sum_proper_divisors(&self, n: T) -> T {
self.sum_divisors(n) - n
}
/// Returns a vector containing all the primes which were sieved
pub fn primes(&self) -> Vec<T> {
self.primes.clone()
}
}
|
sum_divisors
|
connect_4_options2.py
|
#-------------------------------------------------------------------------------
# Name: CONNECT 4 GAME
# Purpose: PROJECT FOR GAME DEVELOPMENT IN OOP
#
# Author: GROUP CATACUTAN, PASCUAL, LAURENT, VENERACION
#
# Created: 30/10/2019
# Copyright: (c) XENON_XEIN_XENLY 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
import pygame as pg
from pygame.locals import *
from connect_4_images import * # BLIT, RECT, SCREEN
from connect_4_scene_switch import *
from _connect4_logic import *
class OPTIONS2():
def __init__(self):
self.gear_bottom_right = ROTATE(image.GEAR,(1336,610),1.5)
self.gear_bottom = ROTATE(image.GEAR,(1030,740),-1.5)
self.__counter1 = 0
self.options_panelX = 400
self.SOUND = False
self.FULL_SCREEN = False
self.HOME = False
self.HOME_DISABLED = False
self.BACK = False
self.CURSOR_AVAILABLE = False
self.STARTING = True
self.ENDING = False
self.soundX = 400
self.fullX = 400
self.homeX = 400
self.played_once = 0
self.GEAR_START = False
self.pause = False
def reset(self):
self.HOME_DISABLED = False
self.CURSOR_AVAILABLE = False
self.STARTING = True
self.ENDING = False
self.__counter1 = 0
self.options_panelX = 400
self.soundX = 400
self.fullX = 400
self.homeX = 400
self.played_once = 0
self.GEAR_START = False
def
|
(self,n):
image.OPTIONS_BLUR2.set_alpha(n)
BLIT(image.OPTIONS_BLUR2,image.ORIGIN)
def OPTIONS2_event_handler(self):
if self.CURSOR_AVAILABLE:
##----------------------------------------------- checking mouse position
if MOUSE_inside((1098,1205),(246,274)):
self.HOME = True
else: self.HOME = False
if MOUSE_inside((1085,1217),(308,335)):
self.SOUND = True
else: self.SOUND = False
if MOUSE_inside((1100,1207),(365,440)):
self.FULL_SCREEN = True
else: self.FULL_SCREEN = False
if MOUSE_inside((0,965),(0,718)):
self.BACK = True
else: self.BACK = False
##------------------------------------------------- events
for event in pg.event.get():
if event.type == QUIT:
scenes.create_scene('EXIT DIALOG')
elif event.type == MOUSEBUTTONDOWN:
click = get_MOUSECLICK()
if click[0]:
if self.SOUND: ##------------------------------------- SOUND
sounds.toggle_mute('INGAME')
elif self.FULL_SCREEN: ##----------------------- FULL SCREEN
game_window.toggle_fullscreen()
elif self.HOME: ##------------------------------------- HOME
self.HOME_DISABLED = True
self.CURSOR_AVAILABLE = False
self.GEAR_START = False
self.STARTING = False; self.ENDING = True
self.HOME = False
elif self.BACK: ##------------------------------------- BACK
self.CURSOR_AVAILABLE = False
self.GEAR_START = False
self.STARTING = False; self.ENDING = True
'''elif click[2]:
self.start_OPTIONS2()'''
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.CURSOR_AVAILABLE = False
self.GEAR_START = False
self.STARTING = False; self.ENDING = True
def start_OPTIONS2(self):
self.reset()
while scenes.scene == 'OPTIONS2':
if self.__counter1 < 300:
if self.STARTING:
self.__counter1 += 10
self.SHOW_OPTIONS2_BLUR(self.__counter1)
if self.__counter1 == 300: #---------------- PANEL MOVE
self.GEAR_START= True
if self.STARTING:
if sounds.GLOBAL_SOUND:
if self.played_once == 1: pass
else: sounds.MOVING_PANEL_SOUND(); self.played_once = 1
else: self.played_once = 1
if self.options_panelX > 0:
self.options_panelX -= 10
if self.soundX > 0:
self.soundX -= 10
if self.fullX > 0:
self.fullX -= 10
if self.homeX > 0:
self.homeX -= 10
if self.ENDING:
if sounds.GLOBAL_SOUND:
if self.played_once == 1: sounds.MOVING_PANEL_SOUND(); self.played_once = 0
else: pass
if self.options_panelX < 400:
self.options_panelX += 10
else:
if self.HOME_DISABLED: #--- if home is clicked go to HOME ELSE GOTO PREVIOUS SCENE
fade_out.start_fade_out(); scenes.create_scene('HOME'); game_window.MAIN_WINDOW.fill(BLACK)
else: fade_out.start_fade_out(); scenes.create_scene('INGAME'); game_window.MAIN_WINDOW.fill(BLACK)
if self.options_panelX == 0: self.CURSOR_AVAILABLE = True
if self.pause: BLIT(image.GAME_PAUSED_PANEL,(self.options_panelX,image.ORIGIN[1]))
else: BLIT(image.OPTIONS2_PANEL,(self.options_panelX,image.ORIGIN[1]))
if self.SOUND:
BLIT(image.SOUND_GLOW2,image.ORIGIN)
if self.FULL_SCREEN:
BLIT(image.FULL_SCREEN_GLOW2,image.ORIGIN)
if self.HOME:
BLIT(image.HOME_GLOW2,image.ORIGIN)
##------------------------------------------- NON ACTIVE
if sounds.get_sound_condition():
if self.ENDING:
if self.soundX < 400:
self.soundX += 10
BLIT(image.SOUND_NON_ACTIVE2,(self.soundX,image.ORIGIN[1]))
if game_window.get_screen_condition():
if self.ENDING:
if self.fullX < 400:
self.fullX += 10
BLIT(image.FULL_SCREEN_NON_ACTIVE2,(self.fullX,image.ORIGIN[1]))
if self.HOME_DISABLED:
if self.ENDING:
if self.homeX < 400:
self.homeX += 10
BLIT(image.HOME_NON_ACTIVE2,(self.homeX,image.ORIGIN[1]))
##---------------------------------------- GEARS
if self.GEAR_START:
self.gear_bottom_right.show_rotation()
self.gear_bottom.show_rotation()
image._SHADOW()
##------------------------------------------- CURSOR
if self.BACK:
if self.CURSOR_AVAILABLE:
image._CURSOR_BACK()
else:
if self.CURSOR_AVAILABLE:
image._CURSOR_MAIN()
self.OPTIONS2_event_handler()
#print_current_mouse_position()
UPDATE()
options2 = OPTIONS2()
if __name__ == '__main__':
scenes.scene = 'OPTIONS2'
options2.start_OPTIONS2()
|
SHOW_OPTIONS2_BLUR
|
json_test.go
|
package formatters_test
import (
"errors"
"regexp"
"sort"
"strings"
"testing"
"time"
"github.com/flywave/logr/v2"
"github.com/flywave/logr/v2/formatters"
"github.com/flywave/logr/v2/targets"
"github.com/flywave/logr/v2/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type Props struct {
Key string
Blap int
}
type User struct {
Name string
Age int
Props *Props
}
func TestJSONFieldTypes(t *testing.T) {
lgr, _ := logr.New()
filter := &logr.StdFilter{Lvl: logr.Error, Stacktrace: logr.Error}
formatter := &formatters.JSON{
DisableTimestamp: true,
DisableStacktrace: true,
}
t.Run("basic types", func(t *testing.T) {
buf := &test.Buffer{}
target := targets.NewWriterTarget(buf)
err := lgr.AddTarget(target, "basicTest", filter, formatter, 1000)
if err != nil {
t.Error(err)
}
logger := lgr.NewLogger()
logger.Error("Basic types test",
logr.String("f1", "one"),
logr.Int("f2", 77),
logr.Bool("f3", true),
logr.Float64("f4", 3.14),
logr.Err(errors.New("test error")),
)
err = lgr.Flush()
require.NoError(t, err)
want := NL(`{"level":"error","msg":"Basic types test","f1":"one","f2":77,"f3":true,"f4":3.14,"error":"test error"}`)
if strings.Compare(want, buf.String()) != 0 {
t.Errorf("JSON does not match: expected %s got %s", want, buf.String())
}
})
t.Run("time types", func(t *testing.T) {
buf := &test.Buffer{}
target := targets.NewWriterTarget(buf)
err := lgr.AddTarget(target, "timeTest", filter, formatter, 1000)
if err != nil {
t.Error(err)
}
logger := lgr.NewLogger()
now, _ := time.Parse(logr.DefTimestampFormat, "2021-05-16 22:23:10.989 -04:00")
millis := int64(1621218819966) // May 16, 2021 22:33:39.966
dur := (time.Hour * 1) + (time.Minute * 34) + (time.Second * 17) + (time.Millisecond * 230)
logger.Error("Time types test",
logr.Time("f1", now),
logr.Millis("f2", millis),
logr.Duration("f3", dur),
)
err = lgr.Flush()
require.NoError(t, err)
want := NL(`{"level":"error","msg":"Time types test","f1":"2021-05-16 22:23:10.989 -04:00","f2":"May 17 02:33:39.966","f3":"1h34m17.23s"}`)
if strings.Compare(want, buf.String()) != 0 {
t.Errorf("JSON does not match: expected %s got %s", want, buf.String())
}
})
t.Run("struct types", func(t *testing.T) {
buf := &test.Buffer{}
target := targets.NewWriterTarget(buf)
err := lgr.AddTarget(target, "structTest", filter, formatter, 1000)
if err != nil {
t.Error(err)
}
logger := lgr.NewLogger()
user := User{Name: "wiggin", Age: 13, Props: &Props{Key: "foo", Blap: 77}}
logger.Error("Struct types test",
logr.Any("f1", user),
logr.Any("f2", &user),
)
err = lgr.Flush()
require.NoError(t, err)
want := NL(`{"level":"error","msg":"Struct types test","f1":{"Name":"wiggin","Age":13,"Props":{"Key":"foo","Blap":77}},"f2":{"Name":"wiggin","Age":13,"Props":{"Key":"foo","Blap":77}}}`)
if strings.Compare(want, buf.String()) != 0 {
t.Errorf("JSON does not match: expected %s got %s", want, buf.String())
}
})
t.Run("array type", func(t *testing.T) {
buf := &test.Buffer{}
target := targets.NewWriterTarget(buf)
err := lgr.AddTarget(target, "arrayTest", filter, formatter, 1000)
if err != nil {
t.Error(err)
}
logger := lgr.NewLogger()
f1 := []int{2, 4, 6, 8}
f2 := []*User{
{Name: "wiggin", Age: 13, Props: &Props{Key: "foo", Blap: 77}},
{Name: "Jude", Age: 44, Props: &Props{Key: "foo", Blap: 78}},
}
logger.Error("Array test",
logr.Array("f1", f1),
logr.Array("f2", f2),
)
err = lgr.Flush()
require.NoError(t, err)
want := NL(`{"level":"error","msg":"Array test","f1":[2,4,6,8],"f2":[{"Name":"wiggin","Age":13,"Props":{"Key":"foo","Blap":77}},{"Name":"Jude","Age":44,"Props":{"Key":"foo","Blap":78}}]}`)
if strings.Compare(want, buf.String()) != 0 {
t.Errorf("JSON does not match: expected %s got %s", want, buf.String())
}
})
t.Run("map type", func(t *testing.T) {
buf := &test.Buffer{}
target := targets.NewWriterTarget(buf)
err := lgr.AddTarget(target, "mapTest", filter, formatter, 1000)
if err != nil {
t.Error(err)
}
logger := lgr.NewLogger()
f1 := map[string]int{"two": 2, "four": 4, "six": 6, "eight": 8}
f2 := map[string]*User{
"one": {Name: "wiggin", Age: 13, Props: &Props{Key: "foo", Blap: 77}},
"two": {Name: "Jude", Age: 44, Props: &Props{Key: "foo", Blap: 78}},
}
logger.Error("Array test",
logr.Map("f1", f1),
logr.Map("f2", f2),
)
err = lgr.Flush()
require.NoError(t, err)
want := NL(`{"level":"error","msg":"Array test","f1":{"eight":8,"four":4,"six":6,"two":2},"f2":{"one":{"Name":"wiggin","Age":13,"Props":{"Key":"foo","Blap":77}},"two":{"Name":"Jude","Age":44,"Props":{"Key":"foo","Blap":78}}}}`)
if strings.Compare(want, buf.String()) != 0 {
t.Errorf("JSON does not match: expected %s got %s", want, buf.String())
}
})
err := lgr.Shutdown()
require.NoError(t, err)
}
func TestJSON(t *testing.T) {
lgr, _ := logr.New()
filter := &logr.StdFilter{Lvl: logr.Error, Stacktrace: logr.Error}
formatter := &formatters.JSON{
DisableTimestamp: true,
DisableStacktrace: true,
FieldSorter: sorter,
}
t.Run("sorted, one field", func(t *testing.T) {
buf := &test.Buffer{}
target := targets.NewWriterTarget(buf)
err := lgr.AddTarget(target, "jsonTest", filter, formatter, 1000)
if err != nil {
t.Error(err)
}
logger := lgr.NewLogger().With(logr.String("name", "wiggin"))
logger.Error("This is an error.")
err = lgr.Flush()
require.NoError(t, err)
want := NL(`{"level":"error","msg":"This is an error.","name":"wiggin"}`)
if strings.Compare(want, buf.String()) != 0 {
t.Errorf("JSON does not match: expected %s got %s", want, buf.String())
}
})
t.Run("sorted, zero fields", func(t *testing.T) {
buf := &test.Buffer{}
target := targets.NewWriterTarget(buf)
err := lgr.AddTarget(target, "jsonTest", filter, formatter, 1000)
if err != nil {
t.Error(err)
}
logger := lgr.NewLogger()
logger.Error("This is an error.")
err = lgr.Flush()
require.NoError(t, err)
want := NL(`{"level":"error","msg":"This is an error."}`)
if strings.Compare(want, buf.String()) != 0 {
t.Errorf("JSON does not match: expected %s got %s", want, buf.String())
}
})
t.Run("sorted, three fields", func(t *testing.T) {
buf := &test.Buffer{}
target := targets.NewWriterTarget(buf)
err := lgr.AddTarget(target, "jsonTest", filter, formatter, 1000)
require.NoError(t, err)
logger := lgr.NewLogger().With(
logr.String("middle_name", "Thomas"),
logr.String("last_name", "Wiggin"),
logr.String("first_name", "Ender"),
)
logger.Error("This is an error.")
err = lgr.Flush()
require.NoError(t, err)
want := NL(`{"level":"error","msg":"This is an error.","first_name":"Ender","last_name":"Wiggin","middle_name":"Thomas"}`)
if strings.Compare(want, buf.String()) != 0 {
t.Errorf("JSON does not match: expected %s got %s", want, buf.String())
}
})
t.Run("sorted, three fields, grouped", func(t *testing.T) {
formatter := &formatters.JSON{
DisableTimestamp: true,
DisableStacktrace: true,
KeyGroupFields: "group",
FieldSorter: sorter,
}
buf := &test.Buffer{}
target := targets.NewWriterTarget(buf)
err := lgr.AddTarget(target, "jsonTest", filter, formatter, 1000)
require.NoError(t, err)
logger := lgr.NewLogger().With(
logr.String("middle_name", "Thomas"),
logr.String("last_name", "Wiggin"),
logr.String("first_name", "Ender"),
)
logger.Error("This is an error.")
err = lgr.Flush()
require.NoError(t, err)
want := NL(`{"level":"error","msg":"This is an error.","group":{"first_name":"Ender","last_name":"Wiggin","middle_name":"Thomas"}}`)
if strings.Compare(want, buf.String()) != 0 {
t.Errorf("JSON does not match: expected %s got %s", want, buf.String())
}
})
t.Run("reverse sorted, three fields", func(t *testing.T) {
formatterWithReverseSort := &formatters.JSON{DisableTimestamp: true, DisableStacktrace: true, FieldSorter: reverseSorter}
buf := &test.Buffer{}
target := targets.NewWriterTarget(buf)
err := lgr.AddTarget(target, "jsonTest", filter, formatterWithReverseSort, 1000)
require.NoError(t, err)
logger := lgr.NewLogger().With(
logr.String("middle_name", "Thomas"),
logr.String("last_name", "Wiggin"),
logr.String("first_name", "Ender"),
)
logger.Error("This is an error.")
err = lgr.Flush()
require.NoError(t, err)
want := NL(`{"level":"error","msg":"This is an error.","middle_name":"Thomas","last_name":"Wiggin","first_name":"Ender"}`)
if strings.Compare(want, buf.String()) != 0 {
t.Errorf("JSON does not match: expected %s got %s", want, buf.String())
}
})
t.Run("reverse sorted, three fields, grouped", func(t *testing.T) {
formatter := &formatters.JSON{
DisableTimestamp: true,
DisableStacktrace: true,
FieldSorter: reverseSorter,
KeyGroupFields: "group",
}
buf := &test.Buffer{}
target := targets.NewWriterTarget(buf)
err := lgr.AddTarget(target, "jsonTest", filter, formatter, 1000)
require.NoError(t, err)
logger := lgr.NewLogger().With(
logr.String("middle_name", "Thomas"),
logr.String("last_name", "Wiggin"),
logr.String("first_name", "Ender"),
)
logger.Error("This is an error.")
err = lgr.Flush()
require.NoError(t, err)
want := NL(`{"level":"error","msg":"This is an error.","group":{"middle_name":"Thomas","last_name":"Wiggin","first_name":"Ender"}}`)
if strings.Compare(want, buf.String()) != 0 {
t.Errorf("JSON does not match: expected %s got %s", want, buf.String())
}
})
t.Run("sorted, three fields, grouped, with caller", func(t *testing.T) {
formatter := &formatters.JSON{
DisableTimestamp: true,
DisableStacktrace: true,
EnableCaller: true,
KeyGroupFields: "group",
FieldSorter: sorter,
}
buf := &test.Buffer{}
target := targets.NewWriterTarget(buf)
err := lgr.AddTarget(target, "jsonTest", filter, formatter, 1000)
require.NoError(t, err)
logger := lgr.NewLogger().With(
logr.String("middle_name", "Thomas"),
logr.String("last_name", "Wiggin"),
logr.String("first_name", "Ender"),
)
logger.Error("This is an error.")
err = lgr.Flush()
require.NoError(t, err)
// {"level":"error","msg":"This is an error.","caller":"formatters/json_test.go:357","group":{"first_name":"Ender","last_name":"Wiggin","middle_name":"Thomas"}}
want := regexp.MustCompile(`^{\"level\":\"error\",\"msg\":\"This is an error\.\",\"caller\":\"formatters/json_test.go:[0-9]+\",\"group\":{\"first_name\":\"Ender\",\"last_name\":\"Wiggin\",\"middle_name\":\"Thomas\"}}`)
assert.Regexp(t, want, buf.String(), "JSON does not match")
})
err := lgr.Shutdown()
require.NoError(t, err)
}
func sorter(fields []logr.Field) []logr.Field {
cf := make([]logr.Field, len(fields))
copy(cf, fields)
sort.Sort(logr.FieldSorter(cf))
return cf
}
func
|
(fields []logr.Field) []logr.Field {
cf := make([]logr.Field, len(fields))
copy(cf, fields)
sort.Sort(sort.Reverse(logr.FieldSorter(cf)))
return cf
}
func NL(s string) string {
return s + "\n"
}
|
reverseSorter
|
ons-search-input.d.ts
|
import { ElementRef, EventEmitter, OnChanges, OnDestroy, SimpleChange } from '@angular/core';
/**
* @element ons-search-input
* @directive OnsSearchInput
* @selector ons-search-input
* @description
* [en]Angular directive for `<ons-search-input>` component.[/en]
* [ja]`<ons-search-input>`要素のAngularディレクティブです。[/ja]
* @example
* <ons-search-input [(value)]="value"></ons-search-input>
*/
export declare class OnsSearchInput implements
|
estroy {
private _elementRef;
private _element;
private _boundOnChange;
/**
* @input value
* @type {string}
* @desc
* [en]Input value for the internal `<input>` element.[/en]
* [ja]内部の`input`要素に対する入力値を設定します。[/ja]
*/
_value: string;
/**
* @output valueChange
* @type {string}
* @desc
* [en]Triggers when the value is changed.[/en]
* [ja]内部の`input`要素の値が変更された時に発火します。[/ja]
*/
_valueChange: EventEmitter<string>;
constructor(_elementRef: ElementRef);
_onChange(event: any): void;
ngOnChanges(changeRecord: {
[key: string]: SimpleChange;
}): void;
readonly element: any;
readonly nativeElement: any;
ngOnDestroy(): void;
}
|
OnChanges, OnD
|
check_tum_user_state.py
|
import ldap3
from allauth.socialaccount.models import SocialAccount
from django.conf import settings
from base.management import BaseCommand
def init_ldap():
server = ldap3.Server("ldap://ads.mwn.de")
connection = ldap3.Connection(
server,
"CN=%s,OU=Users,ou=TU,ou=IAM,dc=ads,dc=mwn,dc=de" % settings.LDAP_USER,
settings.LDAP_PASSWORD,
)
return connection
def check_user_in_ldap(connection, uid):
return connection.search(
"ou=Users,ou=TU,ou=IAM,dc=ads,dc=mwn,dc=de", "(uid=%s)" % uid
)
class Command(BaseCommand):
help = "Verify state of users in Active Directory. Deactivate old users."
def add_arguments(self, parser):
parser.add_argument(
"--delete",
action="store_true",
dest="delete",
default=False,
help="Whether to delete instead of deactivate old users.",
)
def handle(self, *args, **options):
if not (
hasattr(settings, "LDAP_USER")
and hasattr(settings, "LDAP_PASSWORD")
):
self.stdout.write(
"Please set LDAP_USER and LDAP_PASSWORD in configuration.py."
)
return
connection = init_ldap()
connection.bind()
activate_count = 0
delete_count = 0
deactivate_count = 0
for sa in SocialAccount.objects.all():
if check_user_in_ldap(connection, sa.uid):
if not sa.user.is_active:
sa.user.is_active = True
sa.user.save()
activate_count += 1
elif options["delete"]:
|
elif sa.user.is_active:
sa.user.is_active = False
sa.user.save()
deactivate_count += 1
connection.unbind()
self.stdout.write(
"Activated: %s, Deactivated: %s, Deleted: %s, Verified: %s"
% (
activate_count,
deactivate_count,
delete_count,
len(SocialAccount.objects.all()),
)
)
|
sa.user.delete()
delete_count += 1
|
listening_for_updates.rs
|
extern crate eventific;
extern crate futures;
extern crate tokio;
extern crate sloggers;
#[macro_use]
extern crate slog;
#[macro_use]
extern crate strum_macros;
use eventific::{EventificBuilder, Eventific};
use futures::future::Future;
use eventific::store::MemoryStore;
use sloggers::terminal::TerminalLoggerBuilder;
use sloggers::Build;
use uuid::Uuid;
use futures::Stream;
use sloggers::types::Format;
#[derive(Default, Debug)]
struct SimpleState;
#[derive(Debug, Clone, EnumIter, AsRefStr)]
enum EventData {
TitleChanged(String)
}
/// This example showcases how you can use eventific to store and retrieve events. In a real world use case this would
/// probably not happen in the same service, you would instead have one service for persisting and another for reading
fn main() {
let logger = TerminalLoggerBuilder::new().format(Format::Compact).build().unwrap();
let run_future = EventificBuilder::new()
.logger(&logger)
.start()
|
info!(logger, "Received aggregate {:#?}", aggregate);
})
.take(3)
.collect()
.map_err(|err| eprintln!("{}", err))
.map(|_|());
tokio::spawn(listen_stream);
futures::future::join_all(vec![
eventific.create_aggregate(Uuid::new_v4(), vec![EventData::TitleChanged("HelloWorld".to_owned())], None),
eventific.create_aggregate(Uuid::new_v4(), vec![EventData::TitleChanged("HelloWorld".to_owned())], None),
eventific.create_aggregate(Uuid::new_v4(), vec![EventData::TitleChanged("HelloWorld".to_owned())], None),
])
.map(|_|())
})
.map_err(|err| eprintln!("{}", err));
// We always start eventific by scheduling on a executor. Tokio is one of the simplest implementations
tokio::run(run_future);
}
|
.and_then(move |eventific: Eventific<SimpleState, EventData>| {
// Setup listener
let listen_stream = eventific.updated_aggregates().inspect(move |aggregate| {
|
implied_outlives_bounds.rs
|
//! Provider for the `implied_outlives_bounds` query.
//! Do not call this query directory. See
//! [`rustc_trait_selection::traits::query::type_op::implied_outlives_bounds`].
use rustc_hir as hir;
use rustc_infer::infer::canonical::{self, Canonical};
use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
use rustc_infer::traits::TraitEngineExt as _;
use rustc_middle::ty::outlives::Component;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc_span::source_map::DUMMY_SP;
use rustc_trait_selection::infer::InferCtxtBuilderExt;
use rustc_trait_selection::traits::query::outlives_bounds::OutlivesBound;
use rustc_trait_selection::traits::query::{CanonicalTyGoal, Fallible, NoSolution};
use rustc_trait_selection::traits::wf;
use rustc_trait_selection::traits::FulfillmentContext;
use rustc_trait_selection::traits::TraitEngine;
use smallvec::{smallvec, SmallVec};
crate fn provide(p: &mut Providers) {
*p = Providers { implied_outlives_bounds, ..*p };
}
fn implied_outlives_bounds<'tcx>(
tcx: TyCtxt<'tcx>,
goal: CanonicalTyGoal<'tcx>,
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Vec<OutlivesBound<'tcx>>>>,
NoSolution,
> {
tcx.infer_ctxt().enter_canonical_trait_query(&goal, |infcx, _fulfill_cx, key| {
let (param_env, ty) = key.into_parts();
compute_implied_outlives_bounds(&infcx, param_env, ty)
})
}
fn compute_implied_outlives_bounds<'tcx>(
infcx: &InferCtxt<'_, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
ty: Ty<'tcx>,
) -> Fallible<Vec<OutlivesBound<'tcx>>> {
let tcx = infcx.tcx;
// Sometimes when we ask what it takes for T: WF, we get back that
// U: WF is required; in that case, we push U onto this stack and
// process it next. Currently (at least) these resulting
// predicates are always guaranteed to be a subset of the original
// type, so we need not fear non-termination.
let mut wf_args = vec![ty.into()];
let mut implied_bounds = vec![];
let mut fulfill_cx = FulfillmentContext::new();
while let Some(arg) = wf_args.pop() {
// Compute the obligations for `arg` to be well-formed. If `arg` is
// an unresolved inference variable, just substituted an empty set
// -- because the return type here is going to be things we *add*
// to the environment, it's always ok for this set to be smaller
// than the ultimate set. (Note: normally there won't be
// unresolved inference variables here anyway, but there might be
// during typeck under some circumstances.)
let obligations =
wf::obligations(infcx, param_env, hir::CRATE_HIR_ID, arg, DUMMY_SP).unwrap_or(vec![]);
// N.B., all of these predicates *ought* to be easily proven
// true. In fact, their correctness is (mostly) implied by
|
//
// - Some `T::Foo` gets normalized, resulting in a
// variable `_1` and a `T: Trait<Foo=_1>` constraint
// (not sure why it couldn't immediately get
// solved). This result of `_1` got cached.
// - These obligations were dropped on the floor here,
// rather than being registered.
// - Then later we would get a request to normalize
// `T::Foo` which would result in `_1` being used from
// the cache, but hence without the `T: Trait<Foo=_1>`
// constraint. As a result, `_1` never gets resolved,
// and we get an ICE (in dropck).
//
// Therefore, we register any predicates involving
// inference variables. We restrict ourselves to those
// involving inference variables both for efficiency and
// to avoids duplicate errors that otherwise show up.
fulfill_cx.register_predicate_obligations(
infcx,
obligations.iter().filter(|o| o.predicate.has_infer_types_or_consts()).cloned(),
);
// From the full set of obligations, just filter down to the
// region relationships.
implied_bounds.extend(obligations.into_iter().flat_map(|obligation| {
assert!(!obligation.has_escaping_bound_vars());
match obligation.predicate.kind() {
&ty::PredicateKind::ForAll(..) => vec![],
&ty::PredicateKind::Atom(atom) => match atom {
ty::PredicateAtom::Trait(..)
| ty::PredicateAtom::Subtype(..)
| ty::PredicateAtom::Projection(..)
| ty::PredicateAtom::ClosureKind(..)
| ty::PredicateAtom::ObjectSafe(..)
| ty::PredicateAtom::ConstEvaluatable(..)
| ty::PredicateAtom::ConstEquate(..)
| ty::PredicateAtom::TypeWellFormedFromEnv(..) => vec![],
ty::PredicateAtom::WellFormed(arg) => {
wf_args.push(arg);
vec![]
}
ty::PredicateAtom::RegionOutlives(ty::OutlivesPredicate(r_a, r_b)) => {
vec![OutlivesBound::RegionSubRegion(r_b, r_a)]
}
ty::PredicateAtom::TypeOutlives(ty::OutlivesPredicate(ty_a, r_b)) => {
let ty_a = infcx.resolve_vars_if_possible(&ty_a);
let mut components = smallvec![];
tcx.push_outlives_components(ty_a, &mut components);
implied_bounds_from_components(r_b, components)
}
},
}
}));
}
// Ensure that those obligations that we had to solve
// get solved *here*.
match fulfill_cx.select_all_or_error(infcx) {
Ok(()) => Ok(implied_bounds),
Err(_) => Err(NoSolution),
}
}
/// When we have an implied bound that `T: 'a`, we can further break
/// this down to determine what relationships would have to hold for
/// `T: 'a` to hold. We get to assume that the caller has validated
/// those relationships.
fn implied_bounds_from_components(
sub_region: ty::Region<'tcx>,
sup_components: SmallVec<[Component<'tcx>; 4]>,
) -> Vec<OutlivesBound<'tcx>> {
sup_components
.into_iter()
.filter_map(|component| {
match component {
Component::Region(r) => Some(OutlivesBound::RegionSubRegion(sub_region, r)),
Component::Param(p) => Some(OutlivesBound::RegionSubParam(sub_region, p)),
Component::Projection(p) => Some(OutlivesBound::RegionSubProjection(sub_region, p)),
Component::EscapingProjection(_) =>
// If the projection has escaping regions, don't
// try to infer any implied bounds even for its
// free components. This is conservative, because
// the caller will still have to prove that those
// free components outlive `sub_region`. But the
// idea is that the WAY that the caller proves
// that may change in the future and we want to
// give ourselves room to get smarter here.
{
None
}
Component::UnresolvedInferenceVariable(..) => None,
}
})
.collect()
}
|
// other parts of the program. However, in #42552, we had
// an annoying scenario where:
|
uploadImg.go
|
package media
import (
"io"
"os"
"path/filepath"
"github.com/chanxuehong/wechat/work/core"
)
// UploadImg 上传多媒体图片
func UploadImg(clt *core.Client, filepath string) (link string, err error) {
return uploadImg(clt, filepath)
}
// UploadImgFromReader 上传多媒体图片
// NOTE: 参数 filename 不是文件路径, 是 multipart/form-data 里面 filename 的值.
func UploadImgFromReader(clt *core.Client, filename string, reader io.Reader) (link string, err error) {
return uploadImgFromReader(clt, filename, reader)
}
// =====================================================================================================================
func uploadImg(clt *core.Client, _filepath string) (link st
|
error) {
file, err := os.Open(_filepath)
if err != nil {
return
}
defer file.Close()
return uploadImgFromReader(clt, filepath.Base(_filepath), file)
}
func uploadImgFromReader(clt *core.Client, filename string, reader io.Reader) (link string, err error) {
const incompleteURL = "https://qyapi.weixin.qq.com/cgi-bin/media/uploadimg?access_token="
var fields = []core.MultipartFormField{
{
IsFile: true,
Name: "media",
FileName: filename,
Value: reader,
},
}
var result struct {
core.Error
Url string `json:"url"`
}
if err = clt.PostMultipartForm(incompleteURL, fields, &result); err != nil {
return
}
if result.ErrCode != core.ErrCodeOK {
err = &result.Error
return
}
link = result.Url
return
}
|
ring, err
|
component.template.rs
|
#![allow(dead_code)]
enum_from_primitive! {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum ComponentType {
{% for _, component in components %}
{{ component.name }} = {{ component.index }},
{% endfor %}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum
|
{
{% for _, component in components %}
{% if component.type %}
{{ component.name }}({{ component.type }}),
{% else %}
{{ component.name }},
{% endif %}
{% endfor %}
}
pub enum ComponentRef<'a> {
{% for _, component in components %}
{% if component.type %}
{{ component.name }}(&'a {{ component.type }}),
{% else %}
{{ component.name }},
{% endif %}
{% endfor %}
}
impl ComponentValue {
pub fn typ(&self) -> ComponentType {
match self {
{% for _, component in components %}
{% if component.type %}
&ComponentValue::{{ component.name }}(_) => ComponentType::{{ component.name }},
{% else %}
&ComponentValue::{{ component.name }} => ComponentType::{{ component.name }},
{% endif %}
{% endfor %}
}
}
}
impl<'a> ComponentRef<'a> {
pub fn typ(&self) -> ComponentType {
match self {
{% for _, component in components %}
{% if component.type %}
&ComponentRef::{{ component.name }}(_) => ComponentType::{{ component.name }},
{% else %}
&ComponentRef::{{ component.name }} => ComponentType::{{ component.name }},
{% endif %}
{% endfor %}
}
}
}
|
ComponentValue
|
gen_reading_correction_data.py
|
# -*- coding: utf-8 -*-
# Copyright 2010-2020, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Converter of reading correction data from TSV to binary format.
Usage:
python gen_reading_correction_data.py
--input=input.tsv
--output_value_array=value_array.data
--output_error_array=error_array.data
--output_correction_array=correction_array.data
"""
__author__ = "komatsu"
import codecs
import optparse
from build_tools import code_generator_util
from build_tools import serialized_string_array_builder
def ParseOptions():
"""Parse command line options."""
parser = optparse.OptionParser()
parser.add_option('--input', dest='input', help='Input TSV file path.')
parser.add_option('--output_value_array', dest='output_value_array',
help='Output serialized string array for values.')
parser.add_option('--output_error_array', dest='output_error_array',
help='Output serialized string array for errors.')
parser.add_option('--output_correction_array', dest='output_correction_array',
help='Output serialized string array for corrections.')
return parser.parse_args()[0]
def WriteData(input_path, output_value_array_path, output_error_array_path,
output_correction_array_path):
outputs = []
with codecs.open(input_path, 'r', encoding='utf-8') as input_stream:
|
num_column=3)
# ex. (value, error, correction) = ("雰囲気", "ふいんき", "ふんいき")
for value, error, correction in input_stream:
outputs.append([value, error, correction])
# In order to lookup the entries via |error| with binary search,
# sort outputs here.
outputs.sort(key=lambda x: (x[1], x[0]))
serialized_string_array_builder.SerializeToFile(
[value for (value, _, _) in outputs], output_value_array_path)
serialized_string_array_builder.SerializeToFile(
[error for (_, error, _) in outputs], output_error_array_path)
serialized_string_array_builder.SerializeToFile(
[correction for (_, _, correction) in outputs],
output_correction_array_path)
def main():
options = ParseOptions()
WriteData(options.input, options.output_value_array,
options.output_error_array, options.output_correction_array)
if __name__ == '__main__':
main()
|
input_stream = code_generator_util.SkipLineComment(input_stream)
input_stream = code_generator_util.ParseColumnStream(input_stream,
|
ctrl_or_command.py
|
import platform
def
|
():
if platform.system() == 'Darwin':
return 'COMMAND'
return 'CONTROL'
|
ctrl_or_command_key
|
channel_client_pvt_test.go
|
// +build !prev
/*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package channel
import (
"fmt"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/hyperledger/fabric-sdk-go/pkg/common/errors/retry"
"github.com/hyperledger/fabric-sdk-go/pkg/common/errors/status"
"github.com/hyperledger/fabric-sdk-go/pkg/common/errors/multi"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hyperledger/fabric-sdk-go/pkg/client/channel"
"github.com/hyperledger/fabric-sdk-go/pkg/common/providers/fab"
"github.com/hyperledger/fabric-sdk-go/third_party/github.com/hyperledger/fabric/common/cauthdsl"
cb "github.com/hyperledger/fabric-protos-go/common"
"github.com/hyperledger/fabric-sdk-go/pkg/fabsdk"
"github.com/hyperledger/fabric-sdk-go/test/integration"
)
// TestPrivateDataPutAndGet tests put and get for private data
func TestPrivateDataPutAndGet(t *testing.T) {
sdk := mainSDK
orgsContext := setupMultiOrgContext(t, sdk)
err := integration.EnsureChannelCreatedAndPeersJoined(t, sdk, orgChannelID, "orgchannel.tx", orgsContext)
require.NoError(t, err)
coll1 := "collection1"
ccID := integration.GenerateExamplePvtID(true)
collConfig, err := newCollectionConfig(coll1, "OR('Org1MSP.member','Org2MSP.member')", 0, 2, 1000)
require.NoError(t, err)
err = integration.InstallExamplePvtChaincode(orgsContext, ccID)
require.NoError(t, err)
err = integration.InstantiateExamplePvtChaincode(orgsContext, orgChannelID, ccID, "OR('Org1MSP.member','Org2MSP.member')", collConfig)
require.NoError(t, err)
ctxProvider := sdk.ChannelContext(orgChannelID, fabsdk.WithUser(org1User), fabsdk.WithOrg(org1Name))
chClient, err := channel.New(ctxProvider)
require.NoError(t, err)
key1 := "key1"
key2 := "key2"
key3 := "key3"
value1 := "pvtValue1"
value2 := "pvtValue2"
value3 := "pvtValue3"
response, err := chClient.Query(
channel.Request{
ChaincodeID: ccID,
Fcn: "getprivate",
Args: [][]byte{[]byte(coll1), []byte(key1)},
},
channel.WithRetry(retry.DefaultChannelOpts),
)
require.NoError(t, err)
t.Logf("Got response payload: [%s]", string(response.Payload))
require.Nil(t, response.Payload)
response, err = chClient.Query(
channel.Request{
ChaincodeID: ccID,
Fcn: "getprivatebyrange",
Args: [][]byte{[]byte(coll1), []byte(key1), []byte(key3)},
},
channel.WithRetry(retry.DefaultChannelOpts),
)
require.NoError(t, err)
t.Logf("Got response payload: [%s]", string(response.Payload))
require.Empty(t, string(response.Payload))
response, err = chClient.Execute(
channel.Request{
ChaincodeID: ccID,
Fcn: "putprivate",
Args: [][]byte{[]byte(coll1), []byte(key1), []byte(value1)},
},
channel.WithRetry(retry.DefaultChannelOpts),
)
require.NoError(t, err)
require.NotEmptyf(t, response.Responses, "expecting at least one response")
response, err = chClient.Execute(
channel.Request{
ChaincodeID: ccID,
Fcn: "putprivate",
Args: [][]byte{[]byte(coll1), []byte(key2), []byte(value2)},
},
channel.WithRetry(retry.DefaultChannelOpts),
)
require.NoError(t, err)
require.NotEmptyf(t, response.Responses, "expecting at least one response")
response, err = chClient.Execute(
channel.Request{
ChaincodeID: ccID,
Fcn: "putprivate",
Args: [][]byte{[]byte(coll1), []byte(key3), []byte(value3)},
},
channel.WithRetry(retry.TestRetryOpts),
)
require.NoError(t, err)
require.NotEmptyf(t, response.Responses, "expecting at least one response")
response, err = chClient.Query(
channel.Request{
ChaincodeID: ccID,
Fcn: "getprivate",
Args: [][]byte{[]byte(coll1), []byte(key1)},
},
channel.WithRetry(retry.TestRetryOpts),
)
require.NoError(t, err)
t.Logf("Got response payload: %s", string(response.Payload))
require.Equal(t, value1, string(response.Payload))
response, err = chClient.Query(
channel.Request{
ChaincodeID: ccID,
Fcn: "getprivatebyrange",
Args: [][]byte{[]byte(coll1), []byte(key1), []byte(key3)},
},
channel.WithRetry(retry.DefaultChannelOpts),
)
require.NoError(t, err)
t.Logf("Got response payload: [%s]", string(response.Payload))
require.NotEmpty(t, string(response.Payload))
}
// TestPrivateData tests selection of endorsers in the case where the chaincode policy contains a different
// set of MSPs than that of the collection policy. The chaincode policy is defined as (Org1MSP OR Org2MSP) and the
// collection policy is defined as (Org2MSP).
func TestPrivateData(t *testing.T)
|
// TestPrivateDataWithOrgDown tests selection of endorsers in the case where a chaincode endorsement can succeed with
// none of the peers of a private collection's org being available. The chaincode policy is defined as (Org1MSP OR Org2MSP)
// and the collection policy is defined as (Org2MSP).
func TestPrivateDataWithOrgDown(t *testing.T) {
sdk := mainSDK
orgsContext := setupMultiOrgContext(t, sdk)
// Just join peers in org1 for now
err := integration.EnsureChannelCreatedAndPeersJoined(t, sdk, orgChannelID, "orgchannel.tx", orgsContext)
require.NoError(t, err)
coll1 := "collection1"
ccID := integration.GenerateExamplePvtID(true)
collConfig, err := newCollectionConfig(coll1, "OR('Org3MSP.member')", 0, 2, 1000)
require.NoError(t, err)
err = integration.InstallExamplePvtChaincode(orgsContext, ccID)
require.NoError(t, err)
err = integration.InstantiateExamplePvtChaincode(orgsContext, orgChannelID, ccID, "OR('Org1MSP.member','Org2MSP.member','Org3MSP.member')", collConfig)
require.NoError(t, err)
ctxProvider := sdk.ChannelContext(orgChannelID, fabsdk.WithUser(org1User), fabsdk.WithOrg(org1Name))
chClient, err := channel.New(ctxProvider)
require.NoError(t, err)
t.Run("Specified Invocation Chain", func(t *testing.T) {
_, err := chClient.Execute(
channel.Request{
ChaincodeID: ccID,
Fcn: "putprivate",
Args: [][]byte{[]byte(coll1), []byte("key"), []byte("value")},
InvocationChain: []*fab.ChaincodeCall{
{ID: ccID, Collections: []string{coll1}},
},
},
channel.WithRetry(retry.TestRetryOpts),
)
require.Errorf(t, err, "expecting error due to all Org2MSP peers down")
})
t.Run("Automatic Invocation Chain", func(t *testing.T) {
response, err := chClient.Execute(
channel.Request{
ChaincodeID: ccID,
Fcn: "putprivate",
Args: [][]byte{[]byte(coll1), []byte("key"), []byte("value")},
},
channel.WithRetry(retry.TestRetryOpts),
)
require.NoError(t, err)
t.Logf("Got %d response(s)", len(response.Responses))
require.NotEmptyf(t, response.Responses, "expecting at least one response")
})
}
// Data in a private data collection must be left untouched if the client receives an MVCC_READ_CONFLICT error.
// We test this by submitting two cumulative changes to a private data collection, ensuring that the MVCC_READ_CONFLICT error
// is reproduced, then asserting that only one of the changes was applied.
func TestChannelClientRollsBackPvtDataIfMvccReadConflict(t *testing.T) {
orgsContext := setupMultiOrgContext(t, mainSDK)
require.NoError(t, integration.EnsureChannelCreatedAndPeersJoined(t, mainSDK, orgChannelID, "orgchannel.tx", orgsContext))
// private data collection used for test
const coll = "collection1"
// collection key used for test
const key = "collection_key"
ccID := integration.GenerateExamplePvtID(true)
collConfig, err := newCollectionConfig(coll, "OR('Org1MSP.member','Org2MSP.member','Org3MSP.member')", 0, 2, 1000)
require.NoError(t, err)
require.NoError(t, integration.InstallExamplePvtChaincode(orgsContext, ccID))
require.NoError(t, integration.InstantiateExamplePvtChaincode(orgsContext, orgChannelID, ccID, "OR('Org1MSP.member','Org2MSP.member','Org3MSP.member')", collConfig))
ctxProvider := mainSDK.ChannelContext(orgChannelID, fabsdk.WithUser(org1User), fabsdk.WithOrg(org1Name))
chClient, err := channel.New(ctxProvider)
require.NoError(t, err)
var errMtx sync.Mutex
errs := multi.Errors{}
var wg sync.WaitGroup
// test function; invokes a CC function that mutates the private data collection
changePvtData := func(amount int) {
defer wg.Done()
_, err := chClient.Execute(
channel.Request{
ChaincodeID: ccID,
Fcn: "addToInt",
Args: [][]byte{[]byte(coll), []byte(key), []byte(strconv.Itoa(amount))},
},
)
if err != nil {
errMtx.Lock()
errs = append(errs, err)
errMtx.Unlock()
return
}
}
// expected value at the end of the test
const expected = 10
wg.Add(2)
go changePvtData(expected)
go changePvtData(expected)
wg.Wait()
// ensure the MVCC_READ_CONFLICT was reproduced
require.Truef(t, len(errs) > 0 && strings.Contains(errs[0].Error(), "MVCC_READ_CONFLICT"), "could not reproduce MVCC_READ_CONFLICT")
// read current value of private data collection
//resp, err := chClient.Query(
// channel.Request{
// ChaincodeID: ccID,
// Fcn: "getprivate",
// Args: [][]byte{[]byte(coll), []byte(key)},
// },
// channel.WithRetry(retry.TestRetryOpts),
//)
resp, err := retry.NewInvoker(retry.New(retry.TestRetryOpts)).Invoke(
func() (interface{}, error) {
b, e := chClient.Query(
channel.Request{
ChaincodeID: ccID,
Fcn: "getprivate",
Args: [][]byte{[]byte(coll), []byte(key)},
},
channel.WithRetry(retry.TestRetryOpts),
)
if e != nil || strings.TrimSpace(string(b.Payload)) == "" {
return nil, status.New(status.TestStatus, status.GenericTransient.ToInt32(), fmt.Sprintf("getprivate data returned error: %v", e), nil)
}
return b, e
},
)
require.NoErrorf(t, err, "error attempting to read private data")
require.NotEmptyf(t, strings.TrimSpace(string(resp.(channel.Response).Payload)), "reading private data returned empty response")
actual, err := strconv.Atoi(string(resp.(channel.Response).Payload))
require.NoError(t, err)
assert.Truef(t, actual == expected, "Private data not rolled back during MVCC_READ_CONFLICT")
}
func newCollectionConfig(colName, policy string, reqPeerCount, maxPeerCount int32, blockToLive uint64) (*cb.CollectionConfig, error) {
p, err := cauthdsl.FromString(policy)
if err != nil {
return nil, err
}
cpc := &cb.CollectionPolicyConfig{
Payload: &cb.CollectionPolicyConfig_SignaturePolicy{
SignaturePolicy: p,
},
}
return &cb.CollectionConfig{
Payload: &cb.CollectionConfig_StaticCollectionConfig{
StaticCollectionConfig: &cb.StaticCollectionConfig{
Name: colName,
MemberOrgsPolicy: cpc,
RequiredPeerCount: reqPeerCount,
MaximumPeerCount: maxPeerCount,
BlockToLive: blockToLive,
},
},
}, nil
}
// TestPrivateDataReconcilePutAndGet tests put and get for private data with reconciliation of missing eligible data on some peers (org2's peers)
// the idea to test private data reconciliation is to set a test collection with a policy of 1 member org, put/get private data
// then update the collection config with a new policy of 2 member orgs, private data should be reconciled on peers of the newly added org
func TestPrivateDataReconcilePutAndGet(t *testing.T) {
sdk := mainSDK
singleOrgPolicy := "AND('Org1MSP.member')"
multiOrgsPolicy := "OR('Org1MSP.member','Org2MSP.member')"
coll1 := "collectionx"
ccID := integration.GenerateExamplePvtID(true)
orgsContext := setupMultiOrgContext(t, sdk)
// instantiate and install CC on all peers using collection policy for org1 only then put/get some pvt data
runPvtDataPreReconcilePutAndGet(t, sdk, orgsContext, singleOrgPolicy, ccID, coll1)
// now verify pvt data is not available on org2 peers
verifyPvtDataPreReconcileGet(t, sdk, ccID, coll1)
// upgrade CC to include org2 in collection policy then verify pvt data is available on org2's peers
runPvtDataPostReconcileGet(t, sdk, orgsContext, multiOrgsPolicy, ccID, coll1)
}
func runPvtDataPreReconcilePutAndGet(t *testing.T, sdk *fabsdk.FabricSDK, orgsContext []*integration.OrgContext, policy, ccID, coll1 string) {
err := integration.EnsureChannelCreatedAndPeersJoined(t, sdk, orgChannelID, "orgchannel.tx", orgsContext)
require.NoError(t, err)
collConfig, err := newCollectionConfig(coll1, policy, 0, 2, 1000)
require.NoError(t, err)
err = integration.InstallExamplePvtChaincode(orgsContext, ccID)
require.NoError(t, err)
err = integration.InstantiateExamplePvtChaincode(orgsContext, orgChannelID, ccID, policy, collConfig)
require.NoError(t, err)
ctxProvider := sdk.ChannelContext(orgChannelID, fabsdk.WithUser(org1User), fabsdk.WithOrg(org1Name))
chClient, err := channel.New(ctxProvider)
require.NoError(t, err)
key1 := "key1"
key2 := "key2"
key3 := "key3"
value1 := "pvtValue1"
value2 := "pvtValue2"
value3 := "pvtValue3"
response, err := chClient.Query(
channel.Request{
ChaincodeID: ccID,
Fcn: "getprivate",
Args: [][]byte{[]byte(coll1), []byte(key1)},
},
channel.WithRetry(retry.DefaultChannelOpts),
)
require.NoError(t, err)
t.Logf("Got response payload: [%s]", string(response.Payload))
require.Nil(t, response.Payload)
response, err = chClient.Query(
channel.Request{
ChaincodeID: ccID,
Fcn: "getprivatebyrange",
Args: [][]byte{[]byte(coll1), []byte(key1), []byte(key3)},
},
channel.WithRetry(retry.DefaultChannelOpts),
)
require.NoError(t, err)
t.Logf("Got response payload: [%s]", string(response.Payload))
require.Empty(t, string(response.Payload))
response, err = chClient.Execute(
channel.Request{
ChaincodeID: ccID,
Fcn: "putprivate",
Args: [][]byte{[]byte(coll1), []byte(key1), []byte(value1)},
},
channel.WithRetry(retry.DefaultChannelOpts),
)
require.NoError(t, err)
require.NotEmptyf(t, response.Responses, "expecting at least one response")
response, err = chClient.Execute(
channel.Request{
ChaincodeID: ccID,
Fcn: "putprivate",
Args: [][]byte{[]byte(coll1), []byte(key2), []byte(value2)},
},
channel.WithRetry(retry.DefaultChannelOpts),
)
require.NoError(t, err)
require.NotEmptyf(t, response.Responses, "expecting at least one response")
response, err = chClient.Execute(
channel.Request{
ChaincodeID: ccID,
Fcn: "putprivate",
Args: [][]byte{[]byte(coll1), []byte(key3), []byte(value3)},
},
channel.WithRetry(retry.TestRetryOpts),
)
require.NoError(t, err)
require.NotEmptyf(t, response.Responses, "expecting at least one response")
response, err = chClient.Query(
channel.Request{
ChaincodeID: ccID,
Fcn: "getprivate",
Args: [][]byte{[]byte(coll1), []byte(key1)},
},
channel.WithRetry(retry.TestRetryOpts),
)
require.NoError(t, err)
t.Logf("Got response payload for getprivate: %s", string(response.Payload))
require.Equal(t, value1, string(response.Payload))
response, err = chClient.Query(
channel.Request{
ChaincodeID: ccID,
Fcn: "getprivatebyrange",
Args: [][]byte{[]byte(coll1), []byte(key1), []byte(key3)},
},
channel.WithRetry(retry.DefaultChannelOpts),
)
require.NoError(t, err)
t.Logf("Got response payload for getprivatebyrange: [%s]", string(response.Payload))
require.NotEmpty(t, string(response.Payload))
}
func verifyPvtDataPreReconcileGet(t *testing.T, sdk *fabsdk.FabricSDK, ccID, coll1 string) {
// create ctxProvider for org2 to query org2 peers for pvt data (should be empty)
ctxProvider := sdk.ChannelContext(orgChannelID, fabsdk.WithUser(org1User), fabsdk.WithOrg(org2Name))
// org2 peers are the only targets to test pre reconciliation as they should not have the pvt data as per the collection policy (singleOrgPolicy)
org2TargetOpts := channel.WithTargetEndpoints("peer0.org2.example.com", "peer1.org2.example.com")
chClient, err := channel.New(ctxProvider)
require.NoError(t, err)
key1 := "key1"
key3 := "key3"
response, err := chClient.Query(
channel.Request{
ChaincodeID: ccID,
Fcn: "getprivate",
Args: [][]byte{[]byte(coll1), []byte(key1)},
},
channel.WithRetry(retry.TestRetryOpts),
org2TargetOpts, // query org2 peers to ensure they don't have pvt data
)
require.Error(t, err)
t.Logf("Got response payload for getprivate: %s", string(response.Payload))
require.Empty(t, response.Payload)
response, err = chClient.Query(
channel.Request{
ChaincodeID: ccID,
Fcn: "getprivatebyrange",
Args: [][]byte{[]byte(coll1), []byte(key1), []byte(key3)},
},
channel.WithRetry(retry.DefaultChannelOpts),
org2TargetOpts, // query org2 peers to ensure they don't have pvt data
)
// for some reason, the peer throws an error for getprivate cc invoke(Failed to handle GET_STATE. error: private data matching public hash version is not available.),
// but not for getprivatebyrange cc invoke (it only returns empty payload)
require.NoError(t, err)
t.Logf("Got response payload for getprivatebyrange: [%s]", string(response.Payload))
require.Empty(t, string(response.Payload))
}
func runPvtDataPostReconcileGet(t *testing.T, sdk *fabsdk.FabricSDK, orgsContext []*integration.OrgContext, policy, ccID, coll1 string) {
collConfig, err := newCollectionConfig(coll1, policy, 0, 2, 1000)
require.NoError(t, err)
// org2 peers are the only targets to test post reconciliation as they should have the pvt data after cc upgrade as per the new collection policy (multiOrgsPolicy)
org2TargetOpts := channel.WithTargetEndpoints("peer0.org2.example.com", "peer1.org2.example.com")
err = integration.UpgradeExamplePvtChaincode(orgsContext, orgChannelID, ccID, policy, collConfig)
require.NoError(t, err)
// wait for pvt data reconciliation occurs on peers of org2
time.Sleep(2 * time.Second)
// create ctxProvider for org2 to query org2 peers for pvt data (should be not empty/reconciled)
ctxProvider := sdk.ChannelContext(orgChannelID, fabsdk.WithUser(org1User), fabsdk.WithOrg(org2Name))
chClient, err := channel.New(ctxProvider)
require.NoError(t, err)
key1 := "key1"
key3 := "key3"
value1 := "pvtValue1"
response, err := chClient.Query(
channel.Request{
ChaincodeID: ccID,
Fcn: "getprivate",
Args: [][]byte{[]byte(coll1), []byte(key1)},
},
channel.WithRetry(retry.TestRetryOpts),
org2TargetOpts,
)
require.NoError(t, err)
t.Logf("Got response payload for getprivate: %s", string(response.Payload))
require.Equal(t, value1, string(response.Payload))
response, err = chClient.Query(
channel.Request{
ChaincodeID: ccID,
Fcn: "getprivatebyrange",
Args: [][]byte{[]byte(coll1), []byte(key1), []byte(key3)},
},
channel.WithRetry(retry.DefaultChannelOpts),
org2TargetOpts,
)
require.NoError(t, err)
t.Logf("Got response payload for getprivatebyrange: [%s]", string(response.Payload))
require.NotEmpty(t, string(response.Payload))
}
|
{
sdk := mainSDK
orgsContext := setupMultiOrgContext(t, sdk)
err := integration.EnsureChannelCreatedAndPeersJoined(t, sdk, orgChannelID, "orgchannel.tx", orgsContext)
require.NoError(t, err)
coll1 := "collection1"
ccID := integration.GenerateExamplePvtID(true)
collConfig, err := newCollectionConfig(coll1, "OR('Org2MSP.member')", 0, 2, 1000)
require.NoError(t, err)
err = integration.InstallExamplePvtChaincode(orgsContext, ccID)
require.NoError(t, err)
err = integration.InstantiateExamplePvtChaincode(orgsContext, orgChannelID, ccID, "OR('Org1MSP.member','Org2MSP.member')", collConfig)
require.NoError(t, err)
ctxProvider := sdk.ChannelContext(orgChannelID, fabsdk.WithUser(org1User), fabsdk.WithOrg(org1Name))
chClient, err := channel.New(ctxProvider)
require.NoError(t, err)
t.Run("Specified Invocation Chain", func(t *testing.T) {
response, err := chClient.Execute(
channel.Request{
ChaincodeID: ccID,
Fcn: "putprivate",
Args: [][]byte{[]byte(coll1), []byte("key"), []byte("value")},
InvocationChain: []*fab.ChaincodeCall{
{ID: ccID, Collections: []string{coll1}},
},
},
channel.WithRetry(retry.TestRetryOpts),
)
require.NoError(t, err)
t.Logf("Got %d response(s)", len(response.Responses))
require.NotEmptyf(t, response.Responses, "expecting at least one response")
})
t.Run("Auto-detect Invocation Chain", func(t *testing.T) {
response, err := chClient.Execute(
channel.Request{
ChaincodeID: ccID,
Fcn: "putprivate",
Args: [][]byte{[]byte(coll1), []byte("key"), []byte("value")},
},
channel.WithRetry(retry.TestRetryOpts),
)
require.NoError(t, err)
t.Logf("Got %d response(s)", len(response.Responses))
require.NotEmptyf(t, response.Responses, "expecting at least one response")
})
}
|
test_IPuzzleLedger.rs
|
use super::*;
use crate::AtoFinanceLedger;
#[test]
fn test_do_bonus() {
new_test_ext().execute_with(|| {
System::set_block_number(5);
const ACCOUNT_ID_1: u64 = 1;
const ACCOUNT_ID_2: u64 = 2;
// Dispatch a signed extrinsic.
assert_eq!(Balances::free_balance(ACCOUNT_ID_1), 100_000_000_000_000);
assert_eq!(Balances::free_balance(ACCOUNT_ID_2), 200_000_000_000_000);
//
let puzzle_hash = toVec("TEST_PUZZLE_HASH");
// assert_noop!(
// AtochaPot::do_bonus(puzzle_hash.clone(), ACCOUNT_ID_1, 150000000000000),
// Error::<Test>::InsufficientBalance
// );
// Get Error::<Test>::InsufficientBalance
let res = AtochaPot::do_bonus(
puzzle_hash.clone(),
ACCOUNT_ID_1,
150_000_000_000_000,
5u32.into(),
);
assert!(res.is_err());
// pid: PuzzleSubjectHash,
// who: T::AccountId,
// amount: BalanceOf<T>,
assert_ok!(AtochaPot::do_bonus(
puzzle_hash.clone(),
ACCOUNT_ID_1,
50_000_000_000_000,
5u32.into()
));
assert_eq!(Balances::free_balance(ACCOUNT_ID_1), 50_000_000_000_000);
let pot_ledger = AtoFinanceLedger::<Test>::get(&puzzle_hash);
assert_eq!(pot_ledger.funds, 50_000_000_000_000);
assert_eq!(pot_ledger.total, 50_000_000_000_000);
// Change owner not allowed.
assert_noop!(
AtochaPot::do_bonus(puzzle_hash.clone(), ACCOUNT_ID_2, 50_000_000_000_000, 5u32.into()),
Error::<Test>::LedgerOwnerNotMatch
);
// Additional bound
assert_ok!(AtochaPot::do_bonus(
puzzle_hash.clone(),
ACCOUNT_ID_1,
10_000_000_000_000,
5u32.into()
));
assert_eq!(Balances::free_balance(ACCOUNT_ID_1), 40_000_000_000_000);
let pot_ledger = AtoFinanceLedger::<Test>::get(&puzzle_hash);
assert_eq!(pot_ledger.funds, 60_000_000_000_000);
assert_eq!(pot_ledger.total, 60_000_000_000_000);
});
}
#[test]
fn
|
() {
new_test_ext().execute_with(|| {
const ACCOUNT_ID_1: u64 = 1;
const ACCOUNT_ID_2: u64 = 2;
// Dispatch a signed extrinsic.
assert_eq!(Balances::free_balance(ACCOUNT_ID_1), 100_000_000_000_000);
assert_eq!(Balances::free_balance(ACCOUNT_ID_2), 200_000_000_000_000);
//
let puzzle_hash = toVec("TEST_PUZZLE_HASH");
// puzzle must exists.
assert_noop!(
AtochaPot::do_sponsorship(
puzzle_hash.clone(),
ACCOUNT_ID_1,
20_000_000_000_000,
1u32.into(),
"Some-Things".as_bytes().to_vec()
),
Error::<Test>::PuzzleNotExists
);
assert_ok!(AtochaPot::do_bonus(
puzzle_hash.clone(),
ACCOUNT_ID_1,
10_000_000_000_000,
5u32.into()
));
assert_eq!(Balances::free_balance(ACCOUNT_ID_1), 90_000_000_000_000);
let pot_ledger = AtoFinanceLedger::<Test>::get(&puzzle_hash);
assert_eq!(pot_ledger.funds, 10_000_000_000_000);
assert_eq!(pot_ledger.total, 10_000_000_000_000);
assert_ok!(AtochaPot::do_sponsorship(
puzzle_hash.clone(),
ACCOUNT_ID_1,
20_000_000_000_000,
5u32.into(), // block number
"Some-Things-1".as_bytes().to_vec()
));
let pot_ledger = AtoFinanceLedger::<Test>::get(&puzzle_hash);
assert_eq!(pot_ledger.funds, 10_000_000_000_000);
assert_eq!(pot_ledger.total, 30_000_000_000_000);
assert_eq!(pot_ledger.sponsor_list.len(), 2);
assert_eq!(
pot_ledger.sponsor_list[0],
SponsorData {
sponsor: ACCOUNT_ID_1,
funds: 20_000_000_000_000,
create_bn: 5,
reason: toVec("Some-Things-1")
}
);
assert_eq!(
pot_ledger.sponsor_list[1],
SponsorData {
sponsor: ACCOUNT_ID_1,
funds: 10_000_000_000_000,
create_bn: 5,
reason: Vec::new(),
}
);
assert_ok!(AtochaPot::do_sponsorship(
puzzle_hash.clone(),
ACCOUNT_ID_2,
30_000_000_000_000,
6u32.into(), // block number
"Some-Things-2".as_bytes().to_vec()
));
let pot_ledger = AtoFinanceLedger::<Test>::get(&puzzle_hash);
assert_eq!(pot_ledger.funds, 10_000_000_000_000);
assert_eq!(pot_ledger.total, 60_000_000_000_000);
assert_eq!(pot_ledger.sponsor_list.len(), 3);
assert_eq!(
pot_ledger.sponsor_list[0],
SponsorData {
sponsor: ACCOUNT_ID_2,
funds: 30_000_000_000_000,
create_bn: 6,
reason: toVec("Some-Things-2")
}
);
assert_eq!(
pot_ledger.sponsor_list[1],
SponsorData {
sponsor: ACCOUNT_ID_1,
funds: 20_000_000_000_000,
create_bn: 5,
reason: toVec("Some-Things-1")
}
);
assert_eq!(
pot_ledger.sponsor_list[2],
SponsorData {
sponsor: ACCOUNT_ID_1,
funds: 10_000_000_000_000,
create_bn: 5,
reason: Vec::new(),
}
);
});
}
|
test_do_sponsorship
|
job.go
|
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
time "time"
lattice_v1 "github.com/mlab-lattice/lattice/pkg/backend/kubernetes/customresource/apis/lattice/v1"
versioned "github.com/mlab-lattice/lattice/pkg/backend/kubernetes/customresource/generated/clientset/versioned"
internalinterfaces "github.com/mlab-lattice/lattice/pkg/backend/kubernetes/customresource/generated/informers/externalversions/internalinterfaces"
v1 "github.com/mlab-lattice/lattice/pkg/backend/kubernetes/customresource/generated/listers/lattice/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// JobInformer provides access to a shared informer and lister for
// Jobs.
type JobInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.JobLister
}
type jobInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewJobInformer constructs a new informer for Job type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewJobInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredJobInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredJobInformer constructs a new informer for Job type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredJobInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.LatticeV1().Jobs(namespace).List(options)
},
WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil
|
return client.LatticeV1().Jobs(namespace).Watch(options)
},
},
&lattice_v1.Job{},
resyncPeriod,
indexers,
)
}
func (f *jobInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *jobInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&lattice_v1.Job{}, f.defaultInformer)
}
func (f *jobInformer) Lister() v1.JobLister {
return v1.NewJobLister(f.Informer().GetIndexer())
}
|
{
tweakListOptions(&options)
}
|
fusioncharts.somaliregion.js
|
(function(factory){if(typeof module==="object"&&typeof module.exports!=="undefined"){module.exports=factory}else{factory(FusionCharts)}})(function(FusionCharts){(function(modules){var installedModules={};function __webpack_require__(moduleId){if(installedModules[moduleId]){return installedModules[moduleId].exports}var module=installedModules[moduleId]={i:moduleId,l:false,exports:{}};modules[moduleId].call(module.exports,module,module.exports,__webpack_require__);module.l=true;return module.exports}__webpack_require__.m=modules;__webpack_require__.c=installedModules;__webpack_require__.d=function(exports,name,getter){if(!__webpack_require__.o(exports,name)){Object.defineProperty(exports,name,{configurable:false,enumerable:true,get:getter})}};__webpack_require__.r=function(exports){Object.defineProperty(exports,"__esModule",{value:true})};__webpack_require__.n=function(module){var getter=module&&module.__esModule?function getDefault(){return module["default"]}:function getModuleExports(){return module};__webpack_require__.d(getter,"a",getter);return getter};__webpack_require__.o=function(object,property){return Object.prototype.hasOwnProperty.call(object,property)};__webpack_require__.p="";return __webpack_require__(__webpack_require__.s=164)})({164:function(module,exports,__webpack_require__){"use strict";var _fusioncharts=__webpack_require__(165);var _fusioncharts2=_interopRequireDefault(_fusioncharts);function
|
(obj){return obj&&obj.__esModule?obj:{"default":obj}}FusionCharts.addDep(_fusioncharts2["default"])},165:function(module,exports,__webpack_require__){"use strict";exports.__esModule=true;
/**!
* @license FusionCharts JavaScript Library
* Copyright FusionCharts, Inc.
* License Information at <http://www.fusioncharts.com/license>
*
* @author FusionCharts, Inc.
* @meta package_map_pack
* @id fusionmaps.SomaliRegion.1.05-03-2017 12:10:12
*/var M="M",L="L",Z="Z",Q="Q",LFT="left",RGT="right",CEN="center",MID="middle",TOP="top",BTM="bottom",geodefinitions=[{name:"SomaliRegion",revision:1,standaloneInit:true,baseWidth:600,baseHeight:588,baseScaleFactor:10,entities:{"ET.SO.SH":{outlines:[[M,1520,76,Q,1502,83,1495,85,1481,88,1472,90,1457,94,1448,99,1432,107,1379,108,1322,108,1307,108,1263,108,1253,111,1234,116,1225,118,1213,121,1205,122,1203,122,1201,122,1176,121,1172,123,1159,130,1154,133,1146,137,1137,137,L,1115,136,Q,1102,136,1091,141,1080,145,1052,145,1022,145,1017,144,1007,139,999,136,994,134,990,134,985,132,984,132,975,125,972,125,969,125,954,125,953,125,949,121,944,115,943,114,939,111,926,111,918,111,910,121,901,131,896,133,891,134,885,134,879,134,876,134,864,134,855,137,L,521,137,Q,516,138,504,153,492,168,492,172,491,180,483,197,476,211,477,224,477,233,470,248,462,263,453,270,452,272,442,276,437,279,435,284,428,296,417,308,411,312,382,331,378,334,367,348,355,362,356,365,356,367,363,375,370,381,369,393,367,404,367,414,366,423,366,438,365,454,361,459,357,464,356,470,355,478,353,482,352,485,347,491,343,497,343,502,343,516,339,527,336,534,326,551,318,567,319,576,320,590,311,629,L,309,629,Q,308,632,308,641,307,651,304,658,300,663,299,672,299,677,299,686,299,696,294,705,287,716,286,723,285,743,279,757,274,767,273,775,273,779,273,790,267,798,265,807,264,812,263,822,262,826,255,838,250,847,251,853,249,859,245,870,244,877,241,883,240,897,249,906,253,909,253,912,253,918,254,922,254,924,262,931,264,933,279,948,282,951,308,951,308,954,312,968,312,972,306,975,303,978,299,980,295,984,288,996,286,998,259,1021,254,1026,242,1034,229,1042,221,1049,211,1057,184,1081,151,1106,133,1149,123,1171,111,1182,100,1192,87,1216,86,1220,87,1227,86,1234,82,1238,78,1242,74,1248,70,1257,68,1259,64,1262,64,1270,64,1278,63,1279,61,1282,54,1289,47,1295,47,1302,47,1302,49,1325,49,1348,49,1358,47,1369,47,1371,47,1372,43,1381,L,92,1381,Q,92,1380,96,1380,100,1379,102,1377,104,1374,106,1372,108,1369,111,1369,L,137,1369,Q,143,1370,147,1367,152,1362,155,1359,159,1355,175,1355,182,1355,183,1356,L,191,1364,Q,193,1366,200,1367,207,1368,210,1371,L,216,1380,Q,217,1381,217,1386,217,1388,221,1391,L,229,1396,Q,232,1397,233,1399,234,1403,235,1404,L,235,1408,Q,194,1407,166,1410,141,1408,133,1408,119,1409,119,1425,119,1432,120,1433,L,124,1434,Q,126,1435,127,1437,127,1452,129,1455,L,141,1469,Q,145,1474,143,1488,L,177,1488,Q,181,1485,184,1483,190,1480,193,1479,200,1478,211,1479,230,1480,248,1471,L,267,1457,Q,271,1453,273,1451,277,1447,282,1447,296,1449,306,1443,312,1440,322,1430,328,1423,343,1416,352,1413,362,1409,385,1394,398,1389,424,1382,433,1376,436,1373,442,1371,445,1370,449,1369,455,1366,459,1362,463,1357,466,1355,466,1351,468,1343,470,1335,470,1331,472,1331,501,1330,505,1329,509,1323,513,1317,521,1317,541,1317,547,1320,549,1321,556,1327,561,1333,566,1334,569,1334,575,1333,582,1334,585,1337,589,1342,593,1343,596,1344,605,1344,605,1343,606,1343,L,606,1343,Q,609,1343,620,1343,628,1343,637,1343,652,1343,668,1357,684,1372,689,1372,699,1372,711,1349,713,1346,724,1337,730,1326,735,1322,740,1318,755,1319,763,1319,777,1321,785,1321,792,1317,801,1311,807,1307,L,821,1307,824,1327,Q,824,1331,823,1340,823,1346,828,1347,834,1347,837,1349,840,1352,843,1352,L,861,1352,Q,862,1352,885,1351,889,1351,888,1343,887,1333,887,1329,887,1312,887,1300,887,1278,890,1274,899,1265,899,1251,899,1235,902,1228,902,1227,903,1215,903,1207,909,1206,918,1202,936,1202,943,1202,943,1209,942,1216,947,1216,950,1216,954,1212,959,1208,963,1206,964,1205,982,1198,994,1191,1001,1181,1004,1176,1017,1176,1025,1176,1028,1179,1038,1187,1044,1191,1050,1194,1059,1198,1068,1202,1075,1206,1077,1207,1102,1224,1124,1238,1126,1238,1134,1238,1138,1231,1146,1219,1149,1216,1168,1200,1172,1195,1183,1182,1196,1170,1203,1161,1203,1146,1203,1139,1202,1129,L,1203,1121,Q,1209,1118,1210,1113,1209,1108,1209,1105,1209,1101,1214,1101,1219,1101,1219,1104,1219,1105,1219,1113,1219,1121,1219,1121,1217,1121,1212,1124,1210,1126,1209,1131,1209,1135,1210,1140,1210,1150,1212,1153,L,1213,1152,Q,1216,1146,1223,1143,1233,1139,1235,1138,1240,1133,1242,1127,1245,1120,1247,1117,1250,1113,1257,1109,1263,1105,1265,1101,1268,1094,1276,1093,1278,1092,1286,1092,1295,1092,1298,1093,1302,1094,1310,1098,1332,1109,1337,1113,1344,1118,1361,1129,1362,1130,1366,1140,1371,1149,1378,1149,L,1408,1150,Q,1415,1150,1418,1158,1421,1164,1420,1172,1419,1177,1420,1191,1420,1193,1427,1204,L,1432,1219,Q,1432,1221,1432,1232,L,1432,1241,Q,1432,1244,1431,1246,1428,1248,1426,1252,1425,1255,1422,1262,1420,1270,1420,1272,1420,1277,1434,1277,1449,1278,1452,1280,1458,1285,1464,1294,L,1464,1203,Q,1464,1196,1474,1197,1487,1198,1494,1191,1501,1184,1512,1184,1518,1184,1531,1186,1539,1186,1568,1178,1579,1176,1606,1175,1630,1173,1639,1161,1641,1160,1647,1156,1653,1154,1656,1151,1658,1148,1657,1143,1657,1136,1657,1135,L,1657,1103,Q,1654,1086,1659,1073,1663,1065,1671,1054,1672,1053,1679,1049,1685,1044,1687,1042,1700,1024,1702,1022,1709,1014,1710,1006,1709,998,1709,994,1710,990,1716,984,1722,976,1722,941,L,1722,917,Q,1722,913,1719,908,1716,901,1714,900,1704,895,1699,883,1695,873,1688,872,L,1688,866,Q,1689,857,1690,854,1691,848,1698,848,1707,846,1716,835,1720,829,1733,826,1749,823,1753,821,1760,816,1774,811,1777,810,1790,797,1794,793,1807,784,1819,776,1826,769,1821,764,1819,759,1814,755,1809,751,1801,744,1800,729,1797,710,1774,689,1760,677,1753,647,1746,615,1743,605,1740,596,1733,585,1724,569,1722,563,1718,556,1715,543,1712,530,1710,525,1680,467,1680,441,1680,431,1689,405,1699,372,1713,363,1737,316,1749,295,1770,260,1788,241,1878,128,1878,126,1878,120,1871,115,1864,110,1853,111,1844,111,1835,106,1828,102,1825,101,1805,98,1797,97,1784,94,1773,87,1725,50,1716,45,1709,41,1655,41,1603,41,1602,42,1587,49,1545,62,1540,64,1533,69,Q,1526,74,1520,76,Z]],label:"Shinile",shortLabel:"SH",labelPosition:[96,63.9],labelAlignment:[CEN,MID]},"ET.SO.JJ":{outlines:[[M,1923,862,Q,1921,855,1899,840,1875,824,1869,815,1860,801,1843,785,1832,776,1826,769,1819,776,1807,784,1794,793,1790,797,1777,810,1774,811,1760,816,1753,821,1749,823,1733,826,1720,829,1716,835,1707,846,1698,848,1691,848,1690,854,1689,857,1688,866,L,1688,872,Q,1695,873,1699,883,1704,895,1714,900,1716,901,1719,908,1722,913,1722,917,L,1722,941,Q,1722,976,1716,984,1710,990,1709,994,1709,998,1710,1006,1709,1014,1702,1022,1700,1024,1687,1042,1685,1044,1679,1049,1672,1053,1671,1054,1663,1065,1659,1073,1654,1086,1657,1103,L,1657,1135,Q,1657,1136,1657,1143,1658,1148,1656,1151,1653,1154,1647,1156,1641,1160,1639,1161,1630,1173,1606,1175,1579,1176,1568,1178,1539,1186,1531,1186,1518,1184,1512,1184,1501,1184,1494,1191,1487,1198,1474,1197,1464,1196,1464,1203,L,1464,1294,Q,1468,1299,1472,1305,1479,1315,1494,1334,1507,1351,1515,1367,L,1517,1367,Q,1520,1373,1524,1382,1528,1391,1533,1396,1538,1401,1541,1413,1543,1424,1550,1434,1555,1444,1557,1463,1557,1474,1557,1497,1558,1506,1570,1522,1571,1524,1577,1529,1580,1533,1582,1538,1585,1547,1585,1559,1585,1563,1587,1577,1591,1583,1599,1589,1601,1592,1615,1597,1624,1601,1624,1612,1624,1614,1612,1630,1601,1646,1601,1648,1601,1654,1602,1656,1603,1658,1611,1664,1620,1670,1629,1677,1633,1680,1635,1684,1640,1689,1644,1695,1647,1699,1652,1703,1653,1706,1654,1711,1656,1715,1658,1719,1661,1724,1668,1732,1677,1747,1690,1760,1697,1767,1713,1781,1722,1790,1727,1794,1730,1797,1740,1812,1745,1818,1751,1828,1756,1836,1764,1839,1768,1841,1792,1858,1804,1867,1806,1871,1810,1877,1810,1889,1810,1900,1807,1905,1806,1906,1806,1908,L,1830,1908,Q,1835,1912,1837,1916,1837,1921,1841,1927,1843,1929,1867,1929,1876,1929,1878,1934,1879,1938,1879,1945,1879,1950,1889,1967,1892,1970,1898,1976,1906,1983,1908,1987,1908,1988,1912,1997,1914,2006,1918,2010,1922,2016,1931,2028,1939,2038,1948,2038,1951,2038,1955,2033,1960,2028,1963,2027,1968,2025,1982,2022,1997,2016,2027,2002,2110,1964,2113,1964,2118,1964,2129,1976,L,2132,1977,Q,2135,1978,2135,1980,L,2135,1988,Q,2135,1991,2139,1999,2139,2002,2140,2010,2142,2016,2145,2018,2152,2021,2164,2021,2172,2021,2175,2016,2179,2007,2189,2e3,2192,1998,2192,1987,2191,1974,2191,1969,L,2191,1941,Q,2190,1937,2186,1934,2182,1932,2181,1930,2180,1928,2180,1924,2180,1920,2180,1918,2178,1915,2167,1894,2161,1882,2161,1870,L,2161,1869,Q,2161,1866,2167,1861,2172,1857,2176,1855,L,2191,1845,Q,2194,1844,2208,1837,2222,1831,2228,1826,2238,1817,2260,1800,2279,1785,2288,1770,2291,1765,2298,1757,2305,1749,2310,1739,2315,1730,2333,1706,2360,1669,2375,1638,L,2379,1638,Q,2390,1651,2409,1675,2413,1680,2420,1683,2423,1684,2431,1687,2436,1689,2440,1693,2442,1695,2450,1703,L,2457,1710,Q,2459,1714,2459,1721,2459,1725,2470,1740,2474,1743,2487,1756,2489,1758,2494,1763,2497,1767,2498,1767,2499,1768,2503,1768,2508,1768,2510,1765,2513,1761,2520,1761,2525,1761,2530,1760,2534,1757,2539,1757,2587,1760,2587,1758,2598,1758,2608,1760,2617,1760,2624,1754,2628,1749,2633,1747,2635,1746,2640,1745,2661,1735,2662,1735,2669,1735,2679,1733,2682,1733,2686,1728,2690,1724,2692,1724,2708,1719,2718,1702,2710,1700,2708,1695,2701,1685,2696,1682,2685,1677,2680,1672,2673,1665,2666,1658,2662,1655,2654,1643,2648,1633,2641,1631,2626,1624,2616,1610,2611,1605,2599,1597,2589,1588,2580,1578,2568,1565,2551,1559,2543,1555,2535,1549,2513,1533,2502,1525,2491,1518,2486,1515,2477,1508,2475,1503,2472,1493,2454,1475,2446,1467,2426,1452,2406,1436,2392,1427,2385,1421,2381,1418,2374,1413,2362,1413,2347,1412,2326,1397,2323,1395,2298,1379,2283,1369,2268,1356,2265,1353,2265,1348,2265,1341,2264,1339,2263,1336,2252,1323,2232,1303,2226,1294,2207,1276,2203,1264,2198,1251,2185,1229,2170,1202,2163,1191,2153,1173,2148,1160,2152,1145,2142,1127,2130,1108,2127,1099,2122,1079,2119,1070,2115,1052,2100,1039,2093,1029,2089,1025,2084,1018,2075,1017,2065,1017,2046,1004,2037,997,2032,994,2024,988,2019,987,1995,985,1992,976,1987,959,1977,941,1966,923,1962,916,1956,905,1941,889,Q,1927,874,1923,862,Z]],label:"Jigjiga",shortLabel:"JJ",labelPosition:[189.5,140.3],labelAlignment:[CEN,MID]},"ET.SO.FQ":{outlines:[[M,1534,2164,Q,1532,2160,1524,2156,1516,2152,1514,2149,1504,2137,1491,2136,1473,2137,1461,2136,1418,2138,1406,2133,L,1398,2128,Q,1395,2127,1385,2122,1381,2119,1376,2114,1371,2109,1369,2106,1362,2099,1355,2082,1351,2062,1344,2059,L,1334,2055,Q,1333,2054,1329,2044,1321,2024,1315,2024,1313,2024,1304,2029,1295,2033,1292,2036,1292,2036,1280,2060,1279,2062,1274,2064,1269,2066,1268,2067,1265,2070,1264,2073,L,1262,2081,Q,1260,2084,1254,2091,1250,2098,1250,2103,1250,2108,1254,2113,1259,2119,1260,2123,1261,2129,1268,2136,1274,2144,1274,2174,1274,2197,1268,2205,1263,2210,1263,2220,L,1263,2239,Q,1262,2269,1262,2272,1260,2285,1252,2298,1251,2299,1250,2300,1246,2305,1236,2315,1231,2319,1230,2326,1229,2330,1228,2334,1216,2348,1198,2368,1188,2378,1185,2386,L,1177,2405,Q,1176,2406,1173,2427,L,1166,2440,Q,1163,2447,1163,2452,1163,2454,1166,2460,1169,2466,1169,2470,1169,2473,1154,2507,1151,2513,1151,2526,1151,2529,1140,2542,1139,2548,1137,2561,1136,2571,1134,2577,1131,2585,1125,2594,1119,2603,1116,2610,1119,2652,1118,2685,1117,2718,1122,2732,1126,2745,1127,2753,1128,2763,1132,2774,1134,2779,1134,2794,L,1133,2819,Q,1133,2823,1130,2832,1127,2840,1127,2844,1129,2855,1128,2882,1129,2907,1147,2920,1180,2944,1208,2991,1216,3004,1235,3025,1247,3043,1240,3063,1232,3078,1247,3098,1256,3109,1274,3129,L,1273,3194,1327,3194,Q,1340,3194,1353,3205,1372,3220,1381,3225,1391,3230,1408,3240,1425,3249,1440,3250,1456,3251,1495,3272,1507,3279,1523,3284,1543,3289,1547,3291,1554,3293,1579,3295,1598,3296,1608,3307,1616,3316,1637,3317,1648,3317,1668,3317,1686,3318,1697,3323,1707,3327,1722,3327,1733,3327,1749,3324,1766,3322,1781,3322,1793,3322,1800,3324,1800,3303,1801,3293,1804,3277,1810,3262,1812,3257,1828,3233,1839,3214,1842,3199,L,1858,3161,Q,1863,3151,1878,3135,1897,3115,1902,3108,1904,3104,1903,3094,1903,3084,1903,3079,1903,3077,1903,3076,1911,3060,1926,3038,1927,3036,1940,3027,1946,3022,1948,3004,1949,2984,1964,2970,1973,2961,1983,2933,1986,2924,1997,2898,2004,2882,2005,2880,2010,2870,2019,2865,2024,2863,2023,2847,2023,2832,2025,2829,2035,2822,2037,2816,2039,2805,2039,2799,L,2045,2799,2045,2790,2018,2790,Q,2011,2780,2004,2779,L,1985,2777,Q,1978,2777,1975,2781,1969,2787,1967,2788,1964,2789,1958,2789,1949,2789,1948,2789,1934,2790,1929,2794,1924,2799,1912,2799,1900,2799,1896,2797,1884,2791,1879,2790,1879,2790,1860,2788,1858,2787,1846,2781,1830,2774,1780,2759,1778,2758,1763,2755,1751,2753,1746,2749,1736,2741,1726,2722,1722,2716,1712,2710,1702,2705,1701,2701,1700,2696,1700,2682,1700,2670,1700,2669,L,1700,2641,Q,1700,2636,1706,2629,1710,2621,1710,2617,1710,2610,1707,2606,1703,2603,1700,2594,1699,2589,1700,2580,1702,2572,1699,2568,1689,2557,1687,2545,1688,2533,1687,2526,1688,2513,1686,2507,1686,2505,1681,2499,1677,2494,1677,2492,1677,2491,1677,2479,1677,2469,1671,2464,1667,2460,1665,2448,1664,2441,1663,2437,1663,2438,1662,2439,1658,2446,1644,2446,1633,2446,1622,2433,1612,2422,1611,2415,1611,2411,1602,2404,1593,2397,1591,2393,1582,2374,1579,2368,L,1572,2346,Q,1565,2329,1564,2322,1563,2318,1557,2295,1554,2284,1554,2277,1554,2275,1554,2273,1554,2272,1554,2272,1555,2243,1554,2233,1554,2224,1553,2220,1553,2213,1548,2208,1545,2204,1542,2196,1540,2188,1540,2184,L,1540,2174,Z]],label:"Fik",shortLabel:"FQ",labelPosition:[149.6,284.3],labelAlignment:[CEN,MID]},"ET.SO.DE":{outlines:[[M,2679,1733,Q,2669,1735,2662,1735,2661,1735,2640,1745,2635,1746,2633,1747,2628,1749,2624,1754,2617,1760,2608,1760,2598,1758,2587,1758,2587,1760,2539,1757,2534,1757,2530,1760,2525,1761,2520,1761,2513,1761,2510,1765,2508,1768,2503,1768,2499,1768,2498,1767,2497,1767,2494,1763,2489,1758,2487,1756,2474,1743,2470,1740,2459,1725,2459,1721,2459,1714,2457,1710,L,2450,1703,Q,2442,1695,2440,1693,2436,1689,2431,1687,2423,1684,2420,1683,2413,1680,2409,1675,2390,1651,2379,1638,L,2375,1638,Q,2360,1669,2333,1706,2315,1730,2310,1739,2305,1749,2298,1757,2291,1765,2288,1770,2279,1785,2260,1800,2238,1817,2228,1826,2222,1831,2208,1837,2194,1844,2191,1845,L,2176,1855,Q,2172,1857,2167,1861,2161,1866,2161,1869,L,2161,1870,Q,2161,1882,2167,1894,2178,1915,2180,1918,2180,1920,2180,1924,2180,1928,2181,1930,2182,1932,2186,1934,2190,1937,2191,1941,L,2191,1969,Q,2191,1974,2192,1987,2192,1998,2189,2e3,2179,2007,2175,2016,2172,2021,2164,2021,2152,2021,2145,2018,2142,2016,2140,2010,2139,2002,2139,1999,2135,1991,2135,1988,L,2135,1980,Q,2135,1978,2132,1977,L,2129,1976,Q,2118,1964,2113,1964,2110,1964,2027,2002,1997,2016,1982,2022,1968,2025,1963,2027,1960,2028,1955,2033,1951,2038,1948,2038,1939,2038,1931,2028,1922,2016,1918,2010,1914,2006,1912,1997,1908,1988,1908,1987,1906,1983,1898,1976,1892,1970,1889,1967,1879,1950,1879,1945,1879,1938,1878,1934,1876,1929,1867,1929,1843,1929,1841,1927,1837,1921,1837,1916,1835,1912,1830,1908,L,1806,1908,Q,1801,1914,1801,1919,L,1796,1932,Q,1786,1942,1786,1946,L,1787,1970,Q,1787,1984,1786,1994,1785,2002,1790,2007,1797,2015,1798,2019,1801,2027,1807,2031,1810,2034,1818,2039,1821,2041,1828,2044,1834,2047,1837,2052,1838,2054,1844,2075,1846,2081,1848,2102,1848,2105,1853,2110,1856,2115,1856,2124,1856,2130,1851,2137,1846,2144,1847,2154,1847,2160,1842,2168,L,1833,2182,Q,1833,2184,1833,2189,1833,2197,1837,2198,L,1847,2203,Q,1849,2205,1849,2211,1849,2218,1847,2219,1841,2221,1833,2223,1830,2223,1824,2227,1816,2230,1814,2231,1808,2232,1793,2237,1777,2243,1773,2246,1765,2252,1753,2252,1740,2251,1733,2255,L,1713,2296,Q,1707,2302,1704,2316,1702,2324,1698,2340,1691,2364,1690,2378,L,1685,2401,Q,1684,2405,1679,2410,1672,2417,1671,2420,1668,2427,1665,2434,1664,2435,1663,2437,1664,2441,1665,2448,1667,2460,1671,2464,1677,2469,1677,2479,1677,2491,1677,2492,1677,2494,1681,2499,1686,2505,1686,2507,1688,2513,1687,2526,1688,2533,1687,2545,1689,2557,1699,2568,1702,2572,1700,2580,1699,2589,1700,2594,1703,2603,1707,2606,1710,2610,1710,2617,1710,2621,1706,2629,1700,2636,1700,2641,L,1700,2669,Q,1700,2670,1700,2682,1700,2696,1701,2701,1702,2705,1712,2710,1722,2716,1726,2722,1736,2741,1746,2749,1751,2753,1763,2755,1778,2758,1780,2759,1830,2774,1846,2781,1858,2787,1860,2788,1879,2790,1879,2790,1884,2791,1896,2797,1900,2799,1912,2799,1924,2799,1929,2794,1934,2790,1948,2789,1949,2789,1958,2789,1964,2789,1967,2788,1969,2787,1975,2781,1978,2777,1985,2777,L,2004,2779,Q,2011,2780,2018,2790,L,2045,2790,2045,2799,2039,2799,Q,2039,2805,2037,2816,2035,2822,2025,2829,2023,2832,2023,2847,2024,2863,2019,2865,2010,2870,2005,2880,2004,2882,1997,2898,1986,2924,1983,2933,1973,2961,1964,2970,1949,2984,1948,3004,1946,3022,1940,3027,1927,3036,1926,3038,1911,3060,1903,3076,1903,3077,1903,3079,1903,3084,1903,3094,1904,3104,1902,3108,1897,3115,1878,3135,1863,3151,1858,3161,L,1842,3199,Q,1839,3214,1828,3233,1812,3257,1810,3262,1804,3277,1801,3293,1800,3303,1800,3324,1805,3324,1808,3326,1815,3330,1825,3330,1830,3330,1838,3316,1846,3302,1852,3299,1860,3295,1873,3287,1884,3281,1890,3282,1909,3283,1928,3270,1955,3252,1962,3248,L,1962,3249,Q,1979,3238,2009,3215,2045,3189,2057,3181,2090,3156,2105,3146,2127,3131,2153,3119,2179,3109,2198,3099,2200,3097,2201,3096,2220,3085,2223,3083,L,2243,3073,Q,2252,3067,2256,3065,2263,3060,2267,3059,L,2288,3052,Q,2293,3050,2300,3041,2306,3034,2309,3032,L,2327,3018,2328,3018,Q,2333,3010,2340,3004,2343,3002,2348,3002,2354,3003,2358,3001,L,2385,2985,Q,2390,2982,2400,2979,2409,2976,2418,2969,2434,2956,2450,2946,2472,2930,2481,2923,2497,2912,2513,2907,L,2542,2882,Q,2545,2882,2572,2871,2581,2867,2635,2846,2675,2831,2698,2817,2703,2814,2710,2813,2714,2812,2720,2812,L,2738,2806,Q,2741,2804,2745,2802,2748,2800,2753,2801,L,2769,2801,Q,2779,2801,2802,2794,2821,2789,2839,2790,L,2840,2790,Q,2854,2790,2877,2789,2897,2789,2911,2792,2931,2797,2939,2798,2955,2800,2974,2799,3e3,2797,3037,2808,3046,2810,3071,2813,3082,2814,3107,2823,3138,2833,3170,2835,3172,2835,3175,2835,3182,2835,3194,2840,3208,2845,3210,2846,3213,2846,3221,2846,3228,2846,3229,2847,3243,2856,3245,2856,3252,2858,3277,2868,3282,2869,3294,2870,L,3310,2877,Q,3333,2887,3386,2881,3444,2881,3455,2880,L,3462,2880,Q,3473,2872,3493,2853,3496,2849,3504,2847,3511,2844,3515,2841,3521,2836,3536,2832,3546,2829,3557,2819,3568,2809,3581,2804,3595,2799,3623,2781,3633,2774,3662,2750,3686,2730,3703,2720,3728,2705,3770,2659,3795,2638,3850,2600,3863,2590,3889,2569,3901,2559,3905,2554,3908,2550,3910,2541,3913,2533,3914,2532,3923,2517,3945,2492,3967,2467,3977,2451,3983,2441,3989,2425,3994,2413,4002,2404,4007,2397,4007,2381,4006,2365,4006,2359,4006,2357,4007,2356,4006,2339,4009,2334,4007,2330,4010,2325,4015,2317,4016,2314,4017,2311,4020,2290,4021,2277,4029,2266,4038,2252,4043,2246,4050,2237,4050,2226,4050,2219,4052,2205,4053,2195,4054,2189,L,4053,2189,Q,4019,2176,3974,2160,3859,2118,3814,2107,3768,2095,3639,2048,3510,2001,3435,1979,3435,1978,3297,1935,3159,1891,3034,1844,2908,1796,2882,1792,2856,1788,2848,1781,2841,1773,2836,1769,2830,1763,2820,1762,2809,1761,2802,1758,2779,1746,2765,1731,L,2746,1711,Q,2737,1704,2726,1703,2721,1703,2718,1702,2708,1719,2692,1724,2690,1724,2686,1728,Q,2682,1733,2679,1733,Z]],label:"Degehabur",shortLabel:"DE",labelPosition:[263,231.1],labelAlignment:[CEN,MID]},"ET.SO.WA":{outlines:[[M,4788,2435,Q,4785,2434,4783,2433,4764,2428,4722,2415,4670,2398,4639,2390,4611,2382,4556,2360,4500,2337,4473,2330,4422,2316,4394,2307,4343,2291,4294,2272,4259,2258,4210,2243,4158,2226,4143,2221,4140,2220,4139,2219,4111,2210,4054,2189,4053,2195,4052,2205,4050,2219,4050,2226,4050,2237,4043,2246,4038,2252,4029,2266,4021,2277,4020,2290,4017,2311,4016,2314,4015,2317,4010,2325,4007,2330,4009,2334,4006,2339,4007,2356,4007,2358,4007,2359,L,4006,2359,Q,4006,2365,4007,2381,4007,2397,4002,2404,3994,2413,3989,2425,3983,2441,3977,2451,3967,2467,3945,2492,3923,2517,3914,2532,3913,2533,3910,2541,3908,2550,3905,2554,3901,2559,3889,2569,3863,2590,3850,2600,3795,2638,3770,2659,3728,2705,3703,2720,3686,2730,3662,2750,3633,2774,3623,2781,3595,2799,3581,2804,3568,2809,3557,2819,3546,2829,3536,2832,3521,2836,3515,2841,3511,2844,3504,2847,3496,2849,3493,2853,3473,2872,3462,2880,L,3455,2880,Q,3444,2881,3386,2881,3333,2887,3310,2877,L,3294,2870,Q,3282,2869,3277,2868,3252,2858,3245,2856,3243,2856,3229,2847,3228,2846,3221,2846,3213,2846,3210,2846,3208,2845,3194,2840,3182,2835,3175,2835,3172,2835,3170,2835,3170,2849,3173,2881,3173,2884,3182,2906,3191,2927,3191,2936,3199,2965,3217,3013,3239,3072,3244,3087,3248,3100,3282,3150,3312,3196,3322,3225,3322,3226,3323,3228,3321,3254,3351,3290,3368,3311,3402,3350,3445,3412,3493,3476,3497,3495,3517,3518,3529,3531,3550,3554,3581,3595,3583,3597,3592,3609,3596,3615,3597,3617,3601,3627,3604,3637,3608,3641,3618,3649,3627,3684,3634,3709,3637,3736,3637,3739,3636,3763,3635,3782,3638,3790,3642,3800,3650,3816,3655,3828,3657,3839,3658,3841,3658,3842,3657,3854,3663,3869,3672,3890,3672,3907,L,3671,3928,Q,3671,3941,3672,3949,3674,3957,3678,3966,3681,3972,3681,3980,3681,3988,3677,3992,3672,3995,3672,4016,3672,4023,3678,4055,3685,4087,3685,4124,L,3685,4139,Q,3696,4136,3709,4131,3733,4122,3739,4122,3768,4128,3861,4088,3875,4082,3894,4074,3909,4068,3929,4068,3936,4068,3958,4059,3981,4048,3984,4048,3996,4045,4011,4043,4024,4040,4042,4030,4056,4022,4075,4017,4081,4015,4112,4008,4123,4006,4142,4003,4157,4e3,4166,3995,4174,3990,4221,3981,4311,3953,4336,3935,4344,3929,4360,3924,4369,3921,4387,3915,4390,3914,4397,3907,4404,3902,4411,3902,4440,3902,4447,3897,4451,3895,4458,3886,4464,3879,4471,3877,4484,3873,4505,3872,4538,3861,4558,3848,4561,3846,4573,3845,4580,3845,4591,3846,4598,3845,4603,3841,4609,3835,4614,3835,4617,3834,4629,3835,4641,3834,4645,3829,4650,3822,4659,3820,4663,3819,4676,3819,4693,3819,4701,3821,4706,3821,4710,3826,4715,3836,4716,3837,5089,3449,5108,3433,5127,3417,5130,3404,5133,3391,5155,3374,5179,3356,5265,3261,5266,3260,5266,3260,5276,3250,5298,3235,5304,3229,5309,3217,5314,3205,5318,3201,5348,3168,5387,3137,5387,3136,5389,3135,5400,3119,5439,3082,5472,3049,5485,3029,5486,3026,5487,3025,5835,2674,5880,2623,5932,2561,5945,2549,L,5949,2543,Q,5932,2543,5926,2543,5920,2542,5891,2541,L,5665,2541,Q,5657,2543,5631,2548,5614,2552,5604,2553,5600,2554,5597,2554,5375,2552,5354,2552,5340,2552,5312,2557,5293,2561,5261,2562,L,5207,2562,Q,5176,2562,5165,2560,5160,2559,5141,2550,5122,2541,5111,2538,5103,2535,5060,2525,5028,2518,5005,2508,4963,2489,4892,2467,Q,4821,2446,4788,2435,Z]],label:"Warder",shortLabel:"WA",labelPosition:[428.4,316.4],labelAlignment:[CEN,MID]},"ET.SO.KO":{outlines:[[M,3191,2936,Q,3191,2927,3182,2906,3173,2884,3173,2881,3170,2849,3170,2835,3138,2833,3107,2823,3082,2814,3071,2813,3046,2810,3037,2808,3e3,2797,2974,2799,2955,2800,2939,2798,2931,2797,2911,2792,2897,2789,2877,2789,2854,2790,2840,2790,L,2839,2790,Q,2821,2789,2802,2794,2779,2801,2769,2801,L,2753,2801,Q,2748,2800,2745,2802,2741,2804,2738,2806,L,2720,2812,Q,2714,2812,2710,2813,2703,2814,2698,2817,2675,2831,2635,2846,2581,2867,2572,2871,2545,2882,2542,2882,L,2513,2907,Q,2497,2912,2481,2923,2472,2930,2450,2946,2434,2956,2418,2969,2409,2976,2400,2979,2390,2982,2385,2985,L,2358,3001,Q,2354,3003,2348,3002,2343,3002,2340,3004,2333,3010,2328,3018,L,2327,3018,2309,3032,Q,2306,3034,2300,3041,2293,3050,2288,3052,L,2267,3059,Q,2263,3060,2256,3065,2252,3067,2243,3073,L,2223,3083,Q,2220,3085,2201,3096,2200,3097,2198,3099,2200,3105,2208,3126,2215,3146,2215,3157,2215,3157,2212,3175,2207,3184,2205,3187,2204,3189,2204,3194,2203,3206,2204,3234,2204,3235,2204,3236,2205,3257,2203,3276,2201,3291,2198,3299,2195,3305,2195,3317,2195,3326,2204,3354,2212,3381,2215,3386,2226,3399,2238,3423,2248,3441,2264,3456,2271,3462,2288,3484,2306,3508,2315,3516,2317,3518,2344,3543,2360,3559,2370,3570,2371,3571,2372,3572,2372,3586,2379,3595,2383,3600,2400,3615,2410,3623,2418,3639,2429,3660,2432,3663,2442,3676,2447,3690,2451,3706,2460,3711,2481,3721,2503,3750,2504,3752,2508,3764,2511,3776,2513,3780,2519,3792,2520,3806,2521,3821,2533,3835,2539,3843,2552,3857,2558,3865,2567,3881,2569,3883,2580,3891,2589,3897,2591,3902,2594,3910,2600,3932,2604,3937,2620,3958,2622,3963,2624,3972,2626,3983,2628,3986,2631,3994,2638,4004,2644,4013,2646,4019,2657,4046,2654,4076,2650,4116,2675,4132,2689,4141,2726,4156,2733,4160,2756,4172,2774,4182,2783,4188,2786,4190,2837,4221,2843,4225,2862,4238,2881,4249,2893,4253,2902,4257,2921,4271,2937,4281,2949,4281,L,2986,4280,Q,2993,4280,3e3,4286,3006,4292,3012,4292,L,3128,4292,Q,3144,4298,3149,4298,3155,4298,3181,4293,3206,4287,3214,4287,3237,4287,3250,4284,3255,4282,3265,4281,3274,4279,3282,4276,3298,4268,3319,4264,3358,4255,3362,4254,3375,4250,3398,4244,3400,4243,3437,4236,3465,4230,3517,4209,3545,4197,3592,4178,3604,4173,3637,4156,3662,4143,3673,4142,3678,4141,3685,4139,L,3685,4124,Q,3685,4087,3678,4055,3672,4023,3672,4016,3672,3995,3677,3992,3681,3988,3681,3980,3681,3972,3678,3966,3674,3957,3672,3949,3671,3941,3671,3928,L,3672,3907,Q,3672,3890,3663,3869,3657,3854,3658,3842,3658,3841,3657,3839,3655,3828,3650,3816,3642,3800,3638,3790,3635,3782,3636,3763,3637,3739,3637,3736,3634,3709,3627,3684,3618,3649,3608,3641,3604,3637,3601,3627,3597,3617,3596,3615,3592,3609,3583,3597,3581,3595,3550,3554,3529,3531,3517,3518,3497,3495,3493,3476,3445,3412,3402,3350,3368,3311,3351,3290,3321,3254,3323,3228,3322,3226,3322,3225,3312,3196,3282,3150,3248,3100,3244,3087,3239,3072,3217,3013,Q,3199,2965,3191,2936,Z]],label:"Korahe",shortLabel:"KO",labelPosition:[294,354.3],labelAlignment:[CEN,MID]},"ET.SO.GD":{outlines:[[M,2009,3215,Q,1979,3238,1962,3249,L,1962,3248,Q,1955,3252,1928,3270,1909,3283,1890,3282,1884,3281,1873,3287,1860,3295,1852,3299,1846,3302,1838,3316,1830,3330,1825,3330,1815,3330,1808,3326,1805,3324,1800,3324,1793,3322,1781,3322,1766,3322,1749,3324,1733,3327,1722,3327,1707,3327,1697,3323,1686,3318,1668,3317,1648,3317,1637,3317,1616,3316,1608,3307,1598,3296,1579,3295,1554,3293,1547,3291,1543,3289,1523,3284,1507,3279,1495,3272,1456,3251,1440,3250,1425,3249,1408,3240,1391,3230,1381,3225,1372,3220,1353,3205,1340,3194,1327,3194,L,1273,3194,1272,3226,Q,1272,3235,1272,3253,1272,3270,1267,3279,1263,3285,1251,3298,1241,3310,1242,3320,1242,3330,1244,3349,1244,3365,1239,3374,1226,3395,1204,3420,L,1188,3436,Q,1175,3449,1170,3458,1166,3466,1165,3483,1163,3496,1161,3504,1161,3506,1160,3508,L,1160,3544,Q,1159,3554,1158,3566,1156,3574,1148,3585,1145,3589,1134,3601,1125,3613,1125,3622,1126,3638,1117,3647,1109,3656,1109,3665,1109,3670,1113,3675,1117,3679,1117,3687,1117,3690,1115,3694,1131,3694,1144,3705,1168,3723,1173,3726,1191,3736,1195,3740,1202,3746,1203,3754,1204,3759,1204,3768,1210,3800,1243,3819,1263,3830,1312,3854,1319,3859,1320,3866,1322,3881,1322,3881,1323,3886,1332,3892,1345,3901,1347,3903,1360,3915,1380,3916,1402,3917,1411,3921,1416,3924,1444,3923,1476,3921,1485,3923,1496,3925,1515,3934,1533,3943,1546,3944,1547,3944,1548,3945,1566,3949,1593,3959,1620,3967,1649,3962,1654,3961,1673,3967,1696,3974,1701,3975,1705,3975,1712,3975,1717,3975,1721,3980,L,1737,4e3,Q,1760,4031,1764,4042,1767,4053,1782,4062,1788,4065,1806,4073,1843,4088,1852,4091,1877,4100,1896,4101,1902,4102,1912,4101,1925,4099,1937,4103,L,1972,4136,Q,1982,4145,2006,4166,2028,4185,2044,4194,2051,4199,2064,4207,2074,4214,2087,4213,2091,4213,2107,4220,2122,4226,2128,4226,2144,4225,2173,4234,2194,4237,2221,4237,2226,4240,2263,4234,2298,4228,2319,4233,2340,4238,2347,4238,2366,4238,2371,4239,2384,4240,2394,4248,2400,4252,2418,4258,2434,4264,2439,4269,2448,4277,2459,4284,2467,4289,2484,4299,2485,4299,2505,4315,2520,4327,2529,4329,2538,4330,2552,4340,2569,4351,2571,4352,2594,4363,2658,4417,L,2659,4417,Q,2678,4442,2681,4446,2698,4465,2713,4469,2721,4471,2752,4482,2774,4490,2792,4494,2804,4495,2817,4503,2824,4507,2832,4512,2839,4516,2855,4521,2864,4523,2874,4526,2889,4531,2909,4534,2927,4537,2946,4549,2949,4551,2955,4565,2962,4579,2966,4583,2968,4585,2982,4595,2993,4602,2999,4609,3012,4625,3029,4641,3051,4661,3066,4668,3074,4671,3108,4692,3132,4706,3153,4711,3163,4713,3171,4720,3176,4725,3187,4735,3194,4741,3207,4743,3223,4744,3228,4745,3269,4768,3315,4805,3362,4845,3396,4886,3439,4939,3468,4965,3497,4992,3509,5007,3513,5012,3530,5027,3547,5043,3552,5049,3674,4924,3692,4904,3711,4883,3720,4874,3729,4864,3748,4843,3766,4821,3810,4778,3854,4735,3904,4680,3948,4631,3975,4608,L,3975,4607,Q,4054,4522,4086,4488,4156,4415,4210,4364,4412,4155,4455,4109,4498,4063,4507,4057,4516,4050,4533,4028,4548,4008,4557,4e3,4559,3998,4586,3975,4600,3964,4608,3953,4609,3950,4610,3948,4666,3889,4716,3837,4715,3836,4710,3826,4706,3821,4701,3821,4693,3819,4676,3819,4663,3819,4659,3820,4650,3822,4645,3829,4641,3834,4629,3835,4617,3834,4614,3835,4609,3835,4603,3841,4598,3845,4591,3846,4580,3845,4573,3845,4561,3846,4558,3848,4538,3861,4505,3872,4484,3873,4471,3877,4464,3879,4458,3886,4451,3895,4447,3897,4440,3902,4411,3902,4404,3902,4397,3907,4390,3914,4387,3915,4369,3921,4360,3924,4344,3929,4336,3935,4311,3953,4221,3981,4174,3990,4166,3995,4157,4e3,4142,4003,4123,4006,4112,4008,4081,4015,4075,4017,4056,4022,4042,4030,4024,4040,4011,4043,3996,4045,3984,4048,3981,4048,3958,4059,3936,4068,3929,4068,3909,4068,3894,4074,3875,4082,3861,4088,3768,4128,3739,4122,3733,4122,3709,4131,3696,4136,3685,4139,3678,4141,3673,4142,3662,4143,3637,4156,3604,4173,3592,4178,3545,4197,3517,4209,3465,4230,3437,4236,3400,4243,3398,4244,3375,4250,3362,4254,3358,4255,3319,4264,3298,4268,3282,4276,3274,4279,3265,4281,3255,4282,3250,4284,3237,4287,3214,4287,3206,4287,3181,4293,3155,4298,3149,4298,3144,4298,3128,4292,L,3012,4292,Q,3006,4292,3e3,4286,2993,4280,2986,4280,L,2949,4281,Q,2937,4281,2921,4271,2902,4257,2893,4253,2881,4249,2862,4238,2843,4225,2837,4221,2786,4190,2783,4188,2774,4182,2756,4172,2733,4160,2726,4156,2689,4141,2675,4132,2650,4116,2654,4076,2657,4046,2646,4019,2644,4013,2638,4004,2631,3994,2628,3986,2626,3983,2624,3972,2622,3963,2620,3958,2604,3937,2600,3932,2594,3910,2591,3902,2589,3897,2580,3891,2569,3883,2567,3881,2558,3865,2552,3857,2539,3843,2533,3835,2521,3821,2520,3806,2519,3792,2513,3780,2511,3776,2508,3764,2504,3752,2503,3750,2481,3721,2460,3711,2451,3706,2447,3690,2442,3676,2432,3663,2429,3660,2418,3639,2410,3623,2400,3615,2383,3600,2379,3595,2372,3586,2372,3572,2371,3571,2370,3570,2360,3559,2344,3543,2317,3518,2315,3516,2306,3508,2288,3484,2271,3462,2264,3456,2248,3441,2238,3423,2226,3399,2215,3386,2212,3381,2204,3354,2195,3326,2195,3317,2195,3305,2198,3299,2201,3291,2203,3276,2205,3257,2204,3236,2204,3235,2204,3234,2203,3206,2204,3194,2204,3189,2205,3187,2207,3184,2212,3175,2215,3157,2215,3157,2215,3146,2208,3126,2200,3105,2198,3099,2179,3109,2153,3119,2127,3131,2105,3146,2090,3156,2057,3181,Q,2045,3189,2009,3215,Z]],label:"Gode",shortLabel:"GD",labelPosition:[189.5,371.6],labelAlignment:[CEN,MID]},"ET.SO.AF":{outlines:[[M,1972,4136,L,1937,4103,Q,1925,4099,1912,4101,1902,4102,1896,4101,1877,4100,1852,4091,1843,4088,1806,4073,1788,4065,1782,4062,1767,4053,1764,4042,1760,4031,1737,4e3,L,1721,3980,Q,1717,3975,1712,3975,1705,3975,1701,3975,1696,3974,1673,3967,1654,3961,1649,3962,1620,3967,1593,3959,1566,3949,1548,3945,1547,3944,1546,3944,1533,3943,1515,3934,1496,3925,1485,3923,1476,3921,1444,3923,1416,3924,1411,3921,1402,3917,1380,3916,1360,3915,1347,3903,1345,3901,1332,3892,1323,3886,1322,3881,1322,3881,1320,3866,1319,3859,1312,3854,1263,3830,1243,3819,1210,3800,1204,3768,1204,3759,1203,3754,1202,3746,1195,3740,1191,3736,1173,3726,1168,3723,1144,3705,1131,3694,1115,3694,1107,3705,1077,3722,1037,3745,1019,3745,1008,3745,1006,3745,998,3742,996,3731,994,3716,990,3712,986,3709,975,3709,967,3709,948,3718,929,3726,923,3731,904,3747,880,3780,875,3786,863,3809,847,3830,847,3836,846,3844,835,3846,822,3846,818,3848,812,3850,810,3858,807,3865,804,3869,803,3870,802,3871,L,801,3883,Q,801,3901,825,3946,850,3990,850,3999,850,4022,826,4035,812,4042,774,4056,764,4060,760,4072,758,4077,756,4087,749,4104,719,4120,L,705,4120,Q,704,4097,700,4083,695,4059,681,4051,663,4042,643,4028,619,4011,610,4e3,L,610,4e3,Q,603,3993,555,3916,523,3866,500,3817,494,3804,437,3715,382,3629,381,3629,372,3629,372,3647,371,3669,371,3669,367,3675,359,3686,353,3692,353,3702,353,3709,356,3723,359,3738,359,3750,359,3764,356,3772,352,3785,350,3793,L,325,3838,Q,320,3848,306,3856,295,3862,286,3864,275,3865,232,3866,230,3866,221,3870,212,3874,209,3874,192,3874,185,3876,L,185,3916,Q,192,3928,194,3944,194,3951,194,3984,L,194,4025,Q,194,4026,190,4044,185,4062,185,4067,185,4120,223,4114,L,228,4114,Q,235,4114,241,4119,244,4123,250,4129,253,4132,263,4138,271,4142,273,4148,274,4150,273,4166,273,4179,277,4185,283,4192,291,4191,302,4190,307,4191,328,4196,338,4201,347,4204,359,4221,366,4230,378,4247,413,4288,438,4346,444,4359,454,4364,463,4369,481,4369,495,4369,514,4366,527,4367,527,4384,527,4398,515,4409,510,4412,507,4416,L,503,4425,504,4454,Q,504,4467,503,4471,500,4493,487,4511,486,4513,486,4522,485,4532,482,4536,L,470,4549,Q,458,4560,454,4561,448,4560,444,4560,437,4560,437,4566,437,4585,445,4601,452,4614,462,4620,467,4623,475,4630,481,4637,484,4642,486,4647,487,4657,489,4665,493,4669,523,4690,532,4716,539,4738,544,4749,546,4754,546,4778,L,547,4816,Q,547,4824,547,4838,548,4854,557,4862,567,4871,573,4880,576,4886,584,4903,591,4916,607,4952,624,4984,639,4990,644,4992,652,4991,658,4991,663,4995,675,5007,677,5013,683,5026,691,5036,696,5042,707,5056,732,5089,739,5102,751,5123,758,5152,L,758,5165,Q,759,5169,758,5174,758,5178,763,5184,764,5186,766,5194,768,5200,772,5202,780,5206,782,5208,785,5211,791,5221,796,5231,801,5233,L,801,5244,Q,802,5254,802,5270,804,5282,813,5291,824,5301,848,5319,L,862,5333,Q,867,5338,869,5344,871,5352,872,5353,874,5356,884,5365,895,5374,900,5382,903,5388,910,5403,916,5415,921,5422,928,5434,935,5444,940,5452,952,5461,972,5475,982,5486,993,5499,1001,5521,1015,5561,1048,5571,1128,5594,1137,5600,1162,5615,1162,5660,L,1161,5703,Q,1165,5701,1167,5699,1172,5694,1188,5693,1195,5692,1205,5693,1214,5693,1226,5692,1235,5691,1242,5684,1244,5681,1258,5676,1270,5670,1276,5670,1291,5670,1304,5671,1313,5672,1329,5672,L,1341,5672,Q,1341,5672,1342,5672,L,1342,5672,1361,5672,Q,1369,5671,1381,5672,1383,5672,1384,5672,L,1418,5672,Q,1449,5667,1530,5652,1604,5639,1646,5629,1700,5615,1763,5579,1816,5548,1841,5519,1859,5511,1885,5475,1914,5432,1924,5416,1926,5388,1953,5357,1974,5332,2008,5309,2119,5234,2253,5167,2270,5162,2320,5142,2366,5124,2390,5121,2414,5118,2468,5103,2526,5087,2544,5083,2565,5079,2616,5062,2664,5048,2693,5052,2703,5053,2729,5049,2758,5045,2771,5045,2806,5046,2824,5040,2842,5034,2852,5036,2863,5037,2874,5041,2885,5044,2927,5045,2965,5045,3039,5045,3058,5045,3078,5049,3089,5051,3114,5054,3139,5056,3159,5056,3180,5057,3215,5055,3355,5061,3427,5059,3500,5056,3511,5058,3522,5060,3544,5056,3548,5053,3552,5049,3547,5043,3530,5027,3513,5012,3509,5007,3497,4992,3468,4965,3439,4939,3396,4886,3362,4845,3315,4805,3269,4768,3228,4745,3223,4744,3207,4743,3194,4741,3187,4735,3176,4725,3171,4720,3163,4713,3153,4711,3132,4706,3108,4692,3074,4671,3066,4668,3051,4661,3029,4641,3012,4625,2999,4609,2993,4602,2982,4595,2968,4585,2966,4583,2962,4579,2955,4565,2949,4551,2946,4549,2927,4537,2909,4534,2889,4531,2874,4526,2864,4523,2855,4521,2839,4516,2832,4512,2824,4507,2817,4503,2804,4495,2792,4494,2774,4490,2752,4482,2721,4471,2713,4469,2698,4465,2681,4446,2678,4442,2659,4417,L,2658,4417,Q,2594,4363,2571,4352,2569,4351,2552,4340,2538,4330,2529,4329,2520,4327,2505,4315,2485,4299,2484,4299,2467,4289,2459,4284,2448,4277,2439,4269,2434,4264,2418,4258,2400,4252,2394,4248,2384,4240,2371,4239,2366,4238,2347,4238,2340,4238,2319,4233,2298,4228,2263,4234,2226,4240,2221,4237,2194,4237,2173,4234,2144,4225,2128,4226,2122,4226,2107,4220,2091,4213,2087,4213,2074,4214,2064,4207,2051,4199,2044,4194,2028,4185,2006,4166,Q,1982,4145,1972,4136,Z]],label:"Afder",shortLabel:"AF",labelPosition:[140.1,477.8],labelAlignment:[CEN,MID]},"ET.SO.LI":{outlines:[[M,616,5143,Q,598,5154,589,5154,582,5154,577,5150,573,5146,563,5146,552,5146,537,5155,521,5165,519,5165,499,5153,478,5142,438,5119,430,5119,407,5119,403,5121,397,5125,397,5150,397,5152,405,5162,413,5173,413,5175,415,5212,414,5217,407,5241,390,5244,379,5245,371,5245,368,5245,361,5250,352,5255,351,5255,345,5257,333,5263,323,5268,311,5268,309,5289,283,5296,255,5304,249,5315,249,5316,245,5332,242,5341,233,5347,230,5348,218,5361,211,5368,199,5368,192,5368,181,5359,167,5347,163,5345,145,5335,140,5333,127,5327,127,5333,L,126,5333,Q,105,5334,81,5350,54,5368,55,5385,L,54,5390,54,5419,Q,52,5434,68,5443,75,5447,101,5455,110,5458,110,5478,110,5494,110,5495,109,5502,104,5507,87,5522,87,5530,87,5535,96,5544,106,5553,108,5559,110,5565,114,5570,121,5578,124,5583,L,133,5596,Q,135,5600,147,5606,150,5609,161,5616,171,5623,174,5627,178,5631,185,5643,192,5653,198,5656,207,5660,221,5666,232,5672,241,5679,264,5696,272,5713,276,5724,304,5744,306,5746,311,5749,317,5751,319,5754,321,5755,336,5768,338,5770,340,5776,342,5781,347,5785,L,412,5822,Q,416,5824,422,5826,427,5828,430,5830,434,5832,439,5837,444,5840,448,5840,454,5840,460,5830,465,5819,469,5819,471,5819,481,5824,490,5828,500,5827,511,5825,531,5838,L,806,5838,Q,819,5827,859,5828,899,5830,912,5819,L,1022,5819,Q,1037,5813,1042,5810,1051,5806,1056,5797,1064,5780,1087,5771,1100,5766,1106,5740,1106,5739,1112,5735,1120,5731,1120,5730,1130,5717,1137,5715,L,1151,5707,Q,1154,5704,1159,5703,1160,5703,1161,5703,L,1162,5660,Q,1162,5615,1137,5600,1128,5594,1048,5571,1015,5561,1001,5521,993,5499,982,5486,972,5475,952,5461,940,5452,935,5444,928,5434,921,5422,916,5415,910,5403,903,5388,900,5382,895,5374,884,5365,874,5356,872,5353,871,5352,869,5344,867,5338,862,5333,L,848,5319,Q,824,5301,813,5291,804,5282,802,5270,802,5254,801,5244,L,801,5233,Q,796,5231,791,5221,785,5211,782,5208,780,5206,772,5202,768,5200,766,5194,764,5186,763,5184,758,5178,758,5174,759,5169,758,5165,L,721,5165,Q,712,5158,684,5151,656,5143,643,5143,Z]],label:"Liben",shortLabel:"LI",labelPosition:[60.8,552.2],labelAlignment:[CEN,MID]}}}];exports["default"]={extension:geodefinitions,name:"somaliregion",type:"maps"}}})});
|
_interopRequireDefault
|
user.go
|
package modules
import (
"fmt"
"github.com/make-os/kit/config"
"github.com/make-os/kit/crypto/ed25519"
"github.com/make-os/kit/keystore"
kstypes "github.com/make-os/kit/keystore/types"
"github.com/make-os/kit/modules/types"
"github.com/make-os/kit/node/services"
types2 "github.com/make-os/kit/rpc/types"
"github.com/make-os/kit/types/api"
"github.com/make-os/kit/types/constants"
"github.com/make-os/kit/types/core"
"github.com/make-os/kit/types/txns"
"github.com/make-os/kit/util"
"github.com/make-os/kit/util/errors"
address2 "github.com/make-os/kit/util/identifier"
"github.com/spf13/cast"
"github.com/c-bata/go-prompt"
at "github.com/make-os/kit/types"
"github.com/robertkrimen/otto"
)
// UserModule provides account management functionalities
// that are accessed through the JavaScript console environment
type UserModule struct {
types.ModuleCommon
cfg *config.AppConfig
keystore kstypes.Keystore
service services.Service
logic core.Logic
}
// NewAttachableUserModule creates an instance of UserModule suitable in attach mode
func NewAttachableUserModule(cfg *config.AppConfig, client types2.Client, ks *keystore.Keystore) *UserModule {
return &UserModule{ModuleCommon: types.ModuleCommon{Client: client}, cfg: cfg, keystore: ks}
}
// NewUserModule creates an instance of UserModule
func NewUserModule(
cfg *config.AppConfig,
keystore kstypes.Keystore,
service services.Service,
logic core.Logic) *UserModule
|
// methods are functions exposed in the special namespace of this module.
func (m *UserModule) methods() []*types.VMMember {
return []*types.VMMember{
{Name: "getKeys", Value: m.GetKeys, Description: "Get address of keys on the keystore"},
{Name: "getPrivKey", Value: m.GetPrivKey, Description: "Get the private key of a key (supports interactive mode)"},
{Name: "getPubKey", Value: m.GetPublicKey, Description: "Get the public key of an account (supports interactive mode)"},
{Name: "getNonce", Value: m.GetNonce, Description: "Get the nonce of an account"},
{Name: "get", Value: m.GetAccount, Description: "Get the account of a given address"},
{Name: "getBalance", Value: m.GetAvailableBalance, Description: "Get the spendable coin balance of an account"},
{Name: "getStakedBalance", Value: m.GetStakedBalance, Description: "Get the total staked coins of an account"},
{Name: "getValidator", Value: m.GetValidator, Description: "Get the validator information"},
{Name: "setCommission", Value: m.SetCommission, Description: "Set the percentage of reward to share with a delegator"},
{Name: "send", Value: m.SendCoin, Description: "Send coins to another user account or a repository"},
}
}
// globals are functions exposed in the VM's global namespace
func (m *UserModule) globals() []*types.VMMember {
defer func() {
if err := recover(); err != nil {
m.cfg.G().Log.Error(fmt.Sprint(err))
}
}()
return []*types.VMMember{
{
Name: "accounts",
Value: m.GetKeys(),
Description: "Get the list of accounts that exist on this node",
},
}
}
// ConfigureVM configures the JS context and return
// any number of console prompt suggestions
func (m *UserModule) ConfigureVM(vm *otto.Otto) prompt.Completer {
// Set the namespace object
nsMap := map[string]interface{}{}
util.VMSet(vm, constants.NamespaceUser, nsMap)
// add methods functions
for _, f := range m.methods() {
nsMap[f.Name] = f.Value
funcFullName := fmt.Sprintf("%s.%s", constants.NamespaceUser, f.Name)
m.Suggestions = append(m.Suggestions, prompt.Suggest{Text: funcFullName, Description: f.Description})
}
// Register global functions
for _, f := range m.globals() {
_ = vm.Set(f.Name, f.Value)
m.Suggestions = append(m.Suggestions, prompt.Suggest{Text: f.Name, Description: f.Description})
}
return m.Completer
}
// GetKeys returns a list of address of keys on the keystore
func (m *UserModule) GetKeys() []string {
if m.IsAttached() {
res, err := m.Client.User().GetKeys()
if err != nil {
panic(err)
}
return res
}
accounts, err := m.keystore.List()
if err != nil {
panic(errors.ReqErr(500, StatusCodeServerErr, "", err.Error()))
}
var resp []string
for _, a := range accounts {
resp = append(resp, a.GetUserAddress())
}
return resp
}
// getKey returns the private key of an account.
//
// The passphrase argument is used to unlock the account.
// If passphrase is not set, an interactive prompt will be started
// to collect the passphrase without revealing it in the terminal.
//
// - address: The address corresponding the the local key
// - [passphrase]: The passphrase of the local key
func (m *UserModule) getKey(address string, passphrase string) *ed25519.Key {
if address == "" {
panic(errors.ReqErr(400, StatusCodeAddressRequire, "address", "address is required"))
}
// Get the address
acct, err := m.keystore.GetByIndexOrAddress(address)
if err != nil {
if err != at.ErrAccountUnknown {
panic(errors.ReqErr(500, StatusCodeServerErr, "address", err.Error()))
}
panic(errors.ReqErr(404, StatusCodeAccountNotFound, "address", err.Error()))
}
if acct.IsUnprotected() {
passphrase = keystore.DefaultPassphrase
goto unlock
}
unlock:
// Unlock the key using the passphrase
if err := acct.Unlock(passphrase); err != nil {
if err == at.ErrInvalidPassphrase {
panic(errors.ReqErr(401, StatusCodeInvalidPass, "passphrase", err.Error()))
}
panic(errors.ReqErr(500, StatusCodeServerErr, "passphrase", err.Error()))
}
return acct.GetKey()
}
// GetPrivKey returns the private key of an account.
//
// The passphrase argument is used to unlock the account.
// If passphrase is not set, an interactive prompt will be started
// to collect the passphrase without revealing it in the terminal.
//
// - address: The address corresponding the the local key
// - [passphrase]: The passphrase of the local key
func (m *UserModule) GetPrivKey(address string, passphrase ...string) string {
// If passphrase is not set, start interactive mode
var pass string
if len(passphrase) == 0 {
pass, _ = m.keystore.AskForPasswordOnce()
} else {
pass = passphrase[0]
}
if m.IsAttached() {
res, err := m.Client.User().GetPrivateKey(address, pass)
if err != nil {
panic(err)
}
return res
}
return m.getKey(address, pass).PrivKey().Base58()
}
// getPublicKey returns the public key of a key.
//
// The passphrase argument is used to unlock the key.
// If passphrase is not set, an interactive prompt will be started
// to collect the passphrase without revealing it in the terminal.
//
// - address: The address corresponding the the local key
// - [passphrase]: The passphrase of the local key
func (m *UserModule) GetPublicKey(address string, passphrase ...string) string {
// If passphrase is not set, start interactive mode
var pass string
if len(passphrase) == 0 {
pass, _ = m.keystore.AskForPasswordOnce()
} else {
pass = passphrase[0]
}
if m.IsAttached() {
res, err := m.Client.User().GetPublicKey(address, pass)
if err != nil {
panic(err)
}
return res
}
return m.getKey(address, pass).PubKey().Base58()
}
// GetNonce returns the current nonce of a network account
// - address: The address corresponding the account
// - [passphrase]: The target block height to query (default: latest)
// - [height]: The target block height to query (default: latest)
func (m *UserModule) GetNonce(address string, height ...uint64) string {
if m.IsAttached() {
nonce, err := m.Client.User().GetNonce(address, height...)
if err != nil {
panic(err)
}
return cast.ToString(nonce)
}
acct := m.logic.AccountKeeper().Get(address2.Address(address), height...)
if acct.IsNil() {
panic(errors.ReqErr(404, StatusCodeAccountNotFound, "address", at.ErrAccountUnknown.Error()))
}
return cast.ToString(acct.Nonce.UInt64())
}
// GetAccount returns the account of the given address.
// - address: The address corresponding the account
// - [height]: The target block height to query (default: latest)
func (m *UserModule) GetAccount(address string, height ...uint64) util.Map {
if m.IsAttached() {
tx, err := m.Client.User().Get(address, height...)
if err != nil {
panic(err)
}
return util.ToMap(tx)
}
acct := m.logic.AccountKeeper().Get(address2.Address(address), height...)
if acct.IsNil() {
panic(errors.ReqErr(404, StatusCodeAccountNotFound, "address", at.ErrAccountUnknown.Error()))
}
if len(acct.Stakes) == 0 {
acct.Stakes = nil
}
return util.ToMap(acct)
}
// GetAvailableBalance returns the spendable balance of an account.
// - address: The address corresponding the account
// - [height]: The target block height to query (default: latest)
func (m *UserModule) GetAvailableBalance(address string, height ...uint64) string {
if m.IsAttached() {
bal, err := m.Client.User().GetBalance(address, height...)
if err != nil {
panic(err)
}
return cast.ToString(bal)
}
acct := m.logic.AccountKeeper().Get(address2.Address(address), height...)
if acct.IsNil() {
panic(errors.ReqErr(404, StatusCodeAccountNotFound, "address", at.ErrAccountUnknown.Error()))
}
curBlockInfo, err := m.logic.SysKeeper().GetLastBlockInfo()
if err != nil {
panic(errors.ReqErr(500, StatusCodeServerErr, "", err.Error()))
}
return acct.GetAvailableBalance(uint64(curBlockInfo.Height)).String()
}
// GetStakedBalance returns the total staked coins of an account
// - address: The address corresponding the account
// - [height]: The target block height to query (default: latest)
func (m *UserModule) GetStakedBalance(address string, height ...uint64) string {
if m.IsAttached() {
bal, err := m.Client.User().GetStakedBalance(address, height...)
if err != nil {
panic(err)
}
return cast.ToString(bal)
}
acct := m.logic.AccountKeeper().Get(address2.Address(address), height...)
if acct.IsNil() {
panic(errors.ReqErr(404, StatusCodeAccountNotFound, "address", at.ErrAccountUnknown.Error()))
}
curBlockInfo, err := m.logic.SysKeeper().GetLastBlockInfo()
if err != nil {
panic(errors.ReqErr(500, StatusCodeServerErr, "", err.Error()))
}
return acct.Stakes.TotalStaked(uint64(curBlockInfo.Height)).String()
}
// GetValidator getPrivateValidator returns the address, public and private keys of the validator.
//
// - includePrivKey: Indicates that the private key of the validator should be included in the result
//
// RETURNS object <map>:
// - pubkey <string>: The validator base58 public key
// - address <string>: The validator's bech32 address.
// - tmAddr <string>: The tendermint address
// - privkey <string>: The validator's base58 public key
func (m *UserModule) GetValidator(includePrivKey ...bool) util.Map {
inclPrivKey := false
if len(includePrivKey) > 0 {
inclPrivKey = includePrivKey[0]
}
if m.IsAttached() {
res, err := m.Client.User().GetValidator(inclPrivKey)
if err != nil {
panic(err)
}
return util.ToMap(res)
}
key, _ := m.cfg.G().PrivVal.GetKey()
info := map[string]interface{}{
"pubkey": key.PubKey().Base58(),
"address": key.Addr().String(),
"tmAddr": m.cfg.G().PrivVal.Key.Address.String(),
}
if inclPrivKey {
info["privkey"] = key.PrivKey().Base58()
}
return info
}
// SetCommission setCommission sets the delegator commission for an account
//
// params <map>
// - nonce <number|string>: The senders next account nonce
// - fee <number|string>: The transaction fee to pay
// - commission <number|string>: The network commission value
// - timestamp <number>: The unix timestamp
//
// options <[]interface{}>
// - [0] key <string>: The signer's private key
// - [1] payloadOnly <bool>: When true, returns the payload only, without sending the tx.
//
// RETURNS object <map>:
// - hash <string>: The transaction hash
func (m *UserModule) SetCommission(params map[string]interface{}, options ...interface{}) util.Map {
var err error
var tx = txns.NewBareTxSetDelegateCommission()
if err = tx.FromMap(params); err != nil {
panic(errors.ReqErr(400, StatusCodeInvalidParam, "", err.Error()))
}
retPayload, signingKey := finalizeTx(tx, m.logic, m.Client, options...)
if retPayload {
return tx.ToMap()
}
if m.IsAttached() {
resp, err := m.Client.User().SetCommission(&api.BodySetCommission{
Commission: tx.Commission.Float(),
Nonce: tx.Nonce,
Fee: cast.ToFloat64(tx.Fee.String()),
SigningKey: ed25519.NewKeyFromPrivKey(signingKey),
})
if err != nil {
panic(err)
}
return util.ToMap(resp)
}
hash, err := m.logic.GetMempoolReactor().AddTx(tx)
if err != nil {
panic(errors.ReqErr(400, StatusCodeMempoolAddFail, "", err.Error()))
}
return map[string]interface{}{
"hash": hash,
}
}
// SendCoin sendCoin sends the native coin from a source account to a destination account.
//
// params <map>
// - value <string>: The amount of coin to send
// - to <string>: The address of the recipient
// - nonce <number|string>: The senders next account nonce
// - fee <number|string>: The transaction fee to pay
// - timestamp <number>: The unix timestamp
//
// options <[]interface{}>
// - [0] key <string>: The signer's private key
// - [1] payloadOnly <bool>: When true, returns the payload only, without sending the tx.
//
// RETURNS object <map>
// - hash <string>: The transaction hash
func (m *UserModule) SendCoin(params map[string]interface{}, options ...interface{}) util.Map {
var err error
var tx = txns.NewBareTxCoinTransfer()
if err = tx.FromMap(params); err != nil {
panic(errors.ReqErr(400, StatusCodeInvalidParam, "params", err.Error()))
}
retPayload, signingKey := finalizeTx(tx, m.logic, m.Client, options...)
if retPayload {
return tx.ToMap()
}
if m.IsAttached() {
resp, err := m.Client.User().Send(&api.BodySendCoin{
To: tx.To,
Nonce: tx.Nonce,
Value: cast.ToFloat64(tx.Value.String()),
Fee: cast.ToFloat64(tx.Fee.String()),
SigningKey: ed25519.NewKeyFromPrivKey(signingKey),
})
if err != nil {
panic(err)
}
return util.ToMap(resp)
}
hash, err := m.logic.GetMempoolReactor().AddTx(tx)
if err != nil {
panic(errors.ReqErr(400, StatusCodeMempoolAddFail, "", err.Error()))
}
return map[string]interface{}{
"hash": hash,
}
}
|
{
return &UserModule{
cfg: cfg,
keystore: keystore,
service: service,
logic: logic,
}
}
|
lib.rs
|
pub mod colibri;
mod conference;
mod jingle;
mod pinger;
mod source;
mod stanza_filter;
mod util;
mod xmpp;
pub use xmpp_parsers;
pub use crate::{
conference::{Feature, JitsiConference, JitsiConferenceConfig, Participant},
source::MediaType,
stanza_filter::StanzaFilter,
xmpp::connection::{Authentication, Connection},
};
#[cfg(feature = "tracing-subscriber")]
pub fn init_tracing(level: tracing::Level)
|
{
tracing_subscriber::fmt()
.with_max_level(level)
.with_span_events(tracing_subscriber::fmt::format::FmtSpan::CLOSE)
.with_target(false)
.init();
}
|
|
monad.py
|
"""
``fn.monad.Option`` represents optional values, each instance of
``Option`` can be either instance of ``Full`` or ``Empty``.
It provides you with simple way to write long computation sequences
and get rid of many ``if/else`` blocks. See usage examples below.
Assume that you have ``Request`` class that gives you parameter
value by its name. To get uppercase notation for non-empty striped value::
class Request(dict):
def parameter(self, name):
return self.get(name, None)
r = Request(testing="Fixed", empty=" ")
param = r.parameter("testing")
if param is None:
fixed = ""
else:
param = param.strip()
if len(param) == 0:
fixed = ""
else:
fixed = param.upper()
Hmm, looks ugly.. Update code with ``fn.monad.Option``::
from operator import methodcaller
from fn.monad import optionable
class Request(dict):
@optionable
def parameter(self, name):
return self.get(name, None)
r = Request(testing="Fixed", empty=" ")
fixed = r.parameter("testing")
.map(methodcaller("strip"))
.filter(len)
.map(methodcaller("upper"))
.get_or("")
``fn.monad.Option.or_call`` is good method for trying several
variant to end computation. I.e. use have ``Request`` class
with optional attributes ``type``, ``mimetype``, ``url``.
You need to evaluate "request type" using at least on attribute::
from fn.monad import Option
request = dict(url="face.png", mimetype="PNG")
tp = (Option(request.get("type", None)) # check "type" key first
.or_call(from_mimetype, request) # or.. check "mimetype" key
.or_call(from_extension, request) # or... get "url" and check extension
.get_or("application/undefined"))
"""
from functools import partial, wraps
from operator import eq, is_not
class Option(object):
def __new__(tp, value, checker=partial(is_not, None)):
if isinstance(value, Option):
# Option(Full) -> Full
# Option(Empty) -> Empty
return value
return Full(value) if checker(value) else Empty()
@staticmethod
def from_value(value):
return Option(value)
@staticmethod
def from_call(callback, *args, **kwargs):
"""Execute callback and catch possible (all by default)
exceptions. If exception is raised Empty will be returned.
"""
exc = kwargs.pop("exc", Exception)
try:
return Option(callback(*args, **kwargs))
except exc:
return Empty()
def map(self, callback):
raise NotImplementedError()
def filter(self, callback):
raise NotImplementedError()
def get_or(self, default):
raise NotImplementedError()
def get_or_call(self, callback, *args, **kwargs):
raise NotImplementedError()
def or_else(self, default):
raise NotImplementedError()
def or_call(self, callback, *args, **kwargs):
raise NotImplementedError()
class Full(Option):
"""Represents value that is ready for further computations"""
__slots__ = "x",
empty = False
def __new__(tp, value, *args, **kwargs):
# Full(Empty) -> Full
if isinstance(value, Empty):
return Empty()
return object.__new__(tp)
def __init__(self, value, *args):
# Option(Full) -> Full
self.x = value.get_or("") if isinstance(value, Full) else value
def map(self, callback):
return Option.from_value(callback(self.x))
def filter(self, callback):
return self if callback(self.x) else Empty()
def get_or(self, default):
return self.x
def get_or_call(self, callback, *args, **kwargs):
return self.x
def or_else(self, default):
return self
def or_call(self, callback, *args, **kwargs):
return self
def __str__(self):
return "Full(%s)" % self.x
__repr__ = __str__
def __eq__(self, other):
if not isinstance(other, Full):
return False
return eq(self.x, other.x)
class Empty(Option):
|
def optionable(f):
@wraps(f)
def wrapper(*args, **kwargs):
return Option(f(*args, **kwargs))
return wrapper
|
"""Represents empty option (without value)"""
__object = None
empty = True
def __new__(tp, *args, **kwargs):
if Empty.__object is None:
Empty.__object = object.__new__(tp)
return Empty.__object
def map(self, callback):
return Empty()
def filter(self, callback):
return Empty()
def get_or(self, default):
return default
def get_or_call(self, callback, *args, **kwargs):
return callback(*args, **kwargs)
def or_else(self, default):
return Option(default)
def or_call(self, callback, *args, **kwargs):
return Option(callback(*args, **kwargs))
def __str__(self):
return "Empty()"
__repr__ = __str__
def __eq__(self, other):
return isinstance(other, Empty)
|
patch.py
|
'''API functions for partial updates of existing data in CKAN'''
import logging
from ckan.logic import get_action
from ckanext.harvest.utils import (
DATASET_TYPE_NAME
)
log = logging.getLogger(__name__)
def harvest_source_patch(context, data_dict):
|
'''
Patch an existing harvest source
This method just proxies the request to package_patch, which will update a
harvest_source dataset type and the HarvestSource object. All auth checks
and validation will be done there. We only make sure to set the dataset
type.
Note that the harvest source type (ckan, waf, csw, etc) is now set via the
source_type field.
All fields that are not provided, will be stay as they were before.
:param id: the name or id of the harvest source to update
:type id: string
:param url: the URL for the harvest source
:type url: string
:param name: the name of the new harvest source, must be between 2 and 100
characters long and contain only lowercase alphanumeric characters
:type name: string
:param title: the title of the dataset (optional, default: same as
``name``)
:type title: string
:param notes: a description of the harvest source (optional)
:type notes: string
:param source_type: the harvester type for this source. This must be one
of the registerd harvesters, eg 'ckan', 'csw', etc.
:type source_type: string
:param frequency: the frequency in wich this harvester should run. See
``ckanext.harvest.model`` source for possible values. Default is
'MANUAL'
:type frequency: string
:param config: extra configuration options for the particular harvester
type. Should be a serialized as JSON. (optional)
:type config: string
:returns: the updated harvest source
:rtype: dictionary
'''
log.info('Patch harvest source: %r', data_dict)
data_dict['type'] = DATASET_TYPE_NAME
context['extras_as_string'] = True
try:
source = get_action('package_patch')(context, data_dict)
except KeyError:
raise Exception('The harvest_source_patch action is not available on '
'this version of CKAN')
return source
|
|
session.rs
|
//! `Session` is the main object used in the driver.\
//! It manages all connections to the cluster and allows to perform queries.
use crate::frame::types::LegacyConsistency;
use bytes::Bytes;
use futures::future::join_all;
use futures::future::try_join_all;
use std::future::Future;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use tokio::net::lookup_host;
use tokio::time::timeout;
use tracing::{debug, trace, trace_span, Instrument};
use uuid::Uuid;
use super::connection::QueryResponse;
use super::errors::{BadQuery, NewSessionError, QueryError};
use crate::cql_to_rust::FromRow;
use crate::frame::response::cql_to_rust::FromRowError;
use crate::frame::response::result;
use crate::frame::value::{BatchValues, SerializedValues, ValueList};
use crate::prepared_statement::{PartitionKeyError, PreparedStatement};
use crate::query::Query;
use crate::routing::{murmur3_token, Token};
use crate::statement::{Consistency, SerialConsistency};
use crate::tracing::{GetTracingConfig, TracingEvent, TracingInfo};
use crate::transport::cluster::{Cluster, ClusterData};
use crate::transport::connection::{
BatchResult, Connection, ConnectionConfig, VerifiedKeyspaceName,
};
use crate::transport::connection_pool::PoolConfig;
use crate::transport::iterator::{PreparedIteratorConfig, RowIterator};
use crate::transport::load_balancing::{
LoadBalancingPolicy, RoundRobinPolicy, Statement, TokenAwarePolicy,
};
use crate::transport::metrics::Metrics;
use crate::transport::node::Node;
use crate::transport::query_result::QueryResult;
use crate::transport::retry_policy::{
DefaultRetryPolicy, QueryInfo, RetryDecision, RetryPolicy, RetrySession,
};
use crate::transport::speculative_execution;
use crate::transport::speculative_execution::SpeculativeExecutionPolicy;
use crate::transport::Compression;
use crate::{batch::Batch, statement::StatementConfig};
pub use crate::transport::connection_pool::PoolSize;
#[cfg(feature = "ssl")]
use openssl::ssl::SslContext;
/// `Session` manages connections to the cluster and allows to perform queries
pub struct Session {
cluster: Cluster,
load_balancer: Arc<dyn LoadBalancingPolicy>,
schema_agreement_interval: Duration,
retry_policy: Box<dyn RetryPolicy>,
speculative_execution_policy: Option<Arc<dyn SpeculativeExecutionPolicy>>,
metrics: Arc<Metrics>,
default_consistency: Consistency,
}
/// Configuration options for [`Session`].
/// Can be created manually, but usually it's easier to use
/// [SessionBuilder](super::session_builder::SessionBuilder)
#[derive(Clone)]
pub struct SessionConfig {
/// List of database servers known on Session startup.
/// Session will connect to these nodes to retrieve information about other nodes in the cluster.
/// Each node can be represented as a hostname or an IP address.
pub known_nodes: Vec<KnownNode>,
/// Preferred compression algorithm to use on connections.
/// If it's not supported by database server Session will fall back to no compression.
pub compression: Option<Compression>,
pub tcp_nodelay: bool,
/// Load balancing policy used by Session
pub load_balancing: Arc<dyn LoadBalancingPolicy>,
pub used_keyspace: Option<String>,
pub keyspace_case_sensitive: bool,
pub retry_policy: Box<dyn RetryPolicy>,
pub speculative_execution_policy: Option<Arc<dyn SpeculativeExecutionPolicy>>,
/// Provide our Session with TLS
#[cfg(feature = "ssl")]
pub ssl_context: Option<SslContext>,
pub auth_username: Option<String>,
pub auth_password: Option<String>,
pub schema_agreement_interval: Duration,
pub connect_timeout: std::time::Duration,
/// Size of the per-node connection pool, i.e. how many connections the driver should keep to each node.
/// The default is `PerShard(1)`, which is the recommended setting for Scylla clusters.
pub connection_pool_size: PoolSize,
/// If true, prevents the driver from connecting to the shard-aware port, even if the node supports it.
/// Generally, this options is best left as default (false).
pub disallow_shard_aware_port: bool,
pub default_consistency: Consistency,
/// If true, full schema is fetched with every metadata refresh.
pub fetch_schema_metadata: bool,
/*
These configuration options will be added in the future:
pub tcp_keepalive: bool,
*/
}
/// Describes database server known on Session startup.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub enum KnownNode {
Hostname(String),
Address(SocketAddr),
}
impl SessionConfig {
/// Creates a [`SessionConfig`] with default configuration
/// # Default configuration
/// * Compression: None
/// * Load balancing policy: Token-aware Round-robin
///
/// # Example
/// ```
/// # use scylla::SessionConfig;
/// let config = SessionConfig::new();
/// ```
pub fn new() -> Self {
SessionConfig {
known_nodes: Vec::new(),
compression: None,
tcp_nodelay: true,
schema_agreement_interval: Duration::from_millis(200),
load_balancing: Arc::new(TokenAwarePolicy::new(Box::new(RoundRobinPolicy::new()))),
used_keyspace: None,
keyspace_case_sensitive: false,
retry_policy: Box::new(DefaultRetryPolicy),
speculative_execution_policy: None,
#[cfg(feature = "ssl")]
ssl_context: None,
auth_username: None,
auth_password: None,
connect_timeout: std::time::Duration::from_secs(5),
connection_pool_size: Default::default(),
disallow_shard_aware_port: false,
default_consistency: Consistency::LocalQuorum,
fetch_schema_metadata: true,
}
}
/// Adds a known database server with a hostname.
/// If the port is not explicitly specified, 9042 is used as default
/// # Example
/// ```
/// # use scylla::SessionConfig;
/// let mut config = SessionConfig::new();
/// config.add_known_node("127.0.0.1");
/// config.add_known_node("db1.example.com:9042");
/// ```
pub fn add_known_node(&mut self, hostname: impl AsRef<str>) {
self.known_nodes
.push(KnownNode::Hostname(hostname.as_ref().to_string()));
}
/// Adds a known database server with an IP address
/// # Example
/// ```
/// # use scylla::SessionConfig;
/// # use std::net::{SocketAddr, IpAddr, Ipv4Addr};
/// let mut config = SessionConfig::new();
/// config.add_known_node_addr(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 9042));
/// ```
pub fn add_known_node_addr(&mut self, node_addr: SocketAddr) {
self.known_nodes.push(KnownNode::Address(node_addr));
}
/// Adds a list of known database server with hostnames.
/// If the port is not explicitly specified, 9042 is used as default
/// # Example
/// ```
/// # use scylla::SessionConfig;
/// # use std::net::{SocketAddr, IpAddr, Ipv4Addr};
/// let mut config = SessionConfig::new();
/// config.add_known_nodes(&["127.0.0.1:9042", "db1.example.com"]);
/// ```
pub fn add_known_nodes(&mut self, hostnames: &[impl AsRef<str>]) {
for hostname in hostnames {
self.add_known_node(hostname);
}
}
/// Adds a list of known database servers with IP addresses
/// # Example
/// ```
/// # use scylla::SessionConfig;
/// # use std::net::{SocketAddr, IpAddr, Ipv4Addr};
/// let addr1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(172, 17, 0, 3)), 9042);
/// let addr2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(172, 17, 0, 4)), 9042);
///
/// let mut config = SessionConfig::new();
/// config.add_known_nodes_addr(&[addr1, addr2]);
/// ```
pub fn add_known_nodes_addr(&mut self, node_addrs: &[SocketAddr]) {
for address in node_addrs {
self.add_known_node_addr(*address);
}
}
/// Creates a PoolConfig which can be used to create NodeConnectionPools
fn get_pool_config(&self) -> PoolConfig {
PoolConfig {
connection_config: self.get_connection_config(),
pool_size: self.connection_pool_size.clone(),
can_use_shard_aware_port: !self.disallow_shard_aware_port,
}
}
/// Makes a config that should be used in Connection
fn get_connection_config(&self) -> ConnectionConfig {
ConnectionConfig {
compression: self.compression,
tcp_nodelay: self.tcp_nodelay,
#[cfg(feature = "ssl")]
ssl_context: self.ssl_context.clone(),
auth_username: self.auth_username.to_owned(),
auth_password: self.auth_password.to_owned(),
connect_timeout: self.connect_timeout,
event_sender: None,
default_consistency: self.default_consistency,
}
}
}
/// Creates default [`SessionConfig`], same as [`SessionConfig::new`]
impl Default for SessionConfig {
fn default() -> Self {
Self::new()
}
}
/// Trait used to implement `Vec<result::Row>::into_typed<RowT>`
// This is the only way to add custom method to Vec
pub trait IntoTypedRows {
fn into_typed<RowT: FromRow>(self) -> TypedRowIter<RowT>;
}
// Adds method Vec<result::Row>::into_typed<RowT>(self)
// It transforms the Vec into iterator mapping to custom row type
impl IntoTypedRows for Vec<result::Row> {
fn into_typed<RowT: FromRow>(self) -> TypedRowIter<RowT> {
TypedRowIter {
row_iter: self.into_iter(),
phantom_data: Default::default(),
}
}
}
/// Iterator over rows parsed as the given type\
/// Returned by `rows.into_typed::<(...)>()`
pub struct TypedRowIter<RowT: FromRow> {
row_iter: std::vec::IntoIter<result::Row>,
phantom_data: std::marker::PhantomData<RowT>,
}
impl<RowT: FromRow> Iterator for TypedRowIter<RowT> {
type Item = Result<RowT, FromRowError>;
fn next(&mut self) -> Option<Self::Item> {
self.row_iter.next().map(RowT::from_row)
}
}
/// Represents a CQL session, which can be used to communicate
/// with the database
impl Session {
// because it's more convenient
/// Estabilishes a CQL session with the database
///
/// Usually it's easier to use [SessionBuilder](crate::transport::session_builder::SessionBuilder)
/// instead of calling `Session::connect` directly
/// # Arguments
/// * `config` - Connection configuration - known nodes, Compression, etc.
/// Must contain at least one known node.
///
/// # Example
/// ```rust
/// # use std::error::Error;
/// # async fn check_only_compiles() -> Result<(), Box<dyn Error>> {
/// use scylla::{Session, SessionConfig};
/// use scylla::transport::session::KnownNode;
///
/// let mut config = SessionConfig::new();
/// config.known_nodes.push(KnownNode::Hostname("127.0.0.1:9042".to_string()));
///
/// let session: Session = Session::connect(config).await?;
/// # Ok(())
/// # }
/// ```
pub async fn connect(config: SessionConfig) -> Result<Session, NewSessionError> {
// Ensure there is at least one known node
if config.known_nodes.is_empty() {
return Err(NewSessionError::EmptyKnownNodesList);
}
// Find IP addresses of all known nodes passed in the config
let mut node_addresses: Vec<SocketAddr> = Vec::with_capacity(config.known_nodes.len());
let mut to_resolve: Vec<&str> = Vec::new();
for node in &config.known_nodes {
match node {
KnownNode::Hostname(hostname) => to_resolve.push(hostname),
KnownNode::Address(address) => node_addresses.push(*address),
};
}
let resolve_futures = to_resolve.into_iter().map(resolve_hostname);
let resolved: Vec<SocketAddr> = futures::future::try_join_all(resolve_futures).await?;
node_addresses.extend(resolved);
let cluster = Cluster::new(
&node_addresses,
config.get_pool_config(),
config.fetch_schema_metadata,
)
.await?;
let session = Session {
cluster,
load_balancer: config.load_balancing,
retry_policy: config.retry_policy,
schema_agreement_interval: config.schema_agreement_interval,
speculative_execution_policy: config.speculative_execution_policy,
metrics: Arc::new(Metrics::new()),
default_consistency: config.default_consistency,
};
if let Some(keyspace_name) = config.used_keyspace
|
Ok(session)
}
/// Sends a query to the database and receives a response.\
/// Returns only a single page of results, to receive multiple pages use [query_iter](Session::query_iter)
///
/// This is the easiest way to make a query, but performance is worse than that of prepared queries.
///
/// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/simple.html) for more information
/// # Arguments
/// * `query` - query to perform, can be just a `&str` or the [Query](crate::query::Query) struct.
/// * `values` - values bound to the query, easiest way is to use a tuple of bound values
///
/// # Examples
/// ```rust
/// # use scylla::Session;
/// # use std::error::Error;
/// # async fn check_only_compiles(session: &Session) -> Result<(), Box<dyn Error>> {
/// // Insert an int and text into a table
/// session
/// .query(
/// "INSERT INTO ks.tab (a, b) VALUES(?, ?)",
/// (2_i32, "some text")
/// )
/// .await?;
/// # Ok(())
/// # }
/// ```
/// ```rust
/// # use scylla::Session;
/// # use std::error::Error;
/// # async fn check_only_compiles(session: &Session) -> Result<(), Box<dyn Error>> {
/// use scylla::IntoTypedRows;
///
/// // Read rows containing an int and text
/// let rows_opt = session
/// .query("SELECT a, b FROM ks.tab", &[])
/// .await?
/// .rows;
///
/// if let Some(rows) = rows_opt {
/// for row in rows.into_typed::<(i32, String)>() {
/// // Parse row as int and text \
/// let (int_val, text_val): (i32, String) = row?;
/// }
/// }
/// # Ok(())
/// # }
/// ```
pub async fn query(
&self,
query: impl Into<Query>,
values: impl ValueList,
) -> Result<QueryResult, QueryError> {
self.query_paged(query, values, None).await
}
/// Queries the database with a custom paging state.
/// # Arguments
///
/// * `query` - query to be performed
/// * `values` - values bound to the query
/// * `paging_state` - previously received paging state or None
pub async fn query_paged(
&self,
query: impl Into<Query>,
values: impl ValueList,
paging_state: Option<Bytes>,
) -> Result<QueryResult, QueryError> {
let query: Query = query.into();
let serialized_values = values.serialized()?;
let span = trace_span!("Request", query = query.contents.as_str());
let response = self
.run_query(
Statement::default(),
&query.config,
|node: Arc<Node>| async move { node.random_connection().await },
|connection: Arc<Connection>| {
// Needed to avoid moving query and values into async move block
let query_ref = &query;
let values_ref = &serialized_values;
let paging_state_ref = &paging_state;
async move {
connection
.query(query_ref, values_ref, paging_state_ref.clone())
.await
}
},
)
.instrument(span)
.await?;
self.handle_set_keyspace_response(&response).await?;
response.into_query_result()
}
async fn handle_set_keyspace_response(
&self,
response: &QueryResponse,
) -> Result<(), QueryError> {
if let Some(set_keyspace) = response.as_set_keyspace() {
debug!(
"Detected USE KEYSPACE query, setting session's keyspace to {}",
set_keyspace.keyspace_name
);
self.use_keyspace(set_keyspace.keyspace_name.clone(), true)
.await?;
}
Ok(())
}
/// Run a simple query with paging\
/// This method will query all pages of the result\
///
/// Returns an async iterator (stream) over all received rows\
/// Page size can be specified in the [Query](crate::query::Query) passed to the function
///
/// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information
///
/// # Arguments
/// * `query` - query to perform, can be just a `&str` or the [Query](crate::query::Query) struct.
/// * `values` - values bound to the query, easiest way is to use a tuple of bound values
///
/// # Example
///
/// ```rust
/// # use scylla::Session;
/// # use std::error::Error;
/// # async fn check_only_compiles(session: &Session) -> Result<(), Box<dyn Error>> {
/// use scylla::IntoTypedRows;
/// use futures::stream::StreamExt;
///
/// let mut rows_stream = session
/// .query_iter("SELECT a, b FROM ks.t", &[])
/// .await?
/// .into_typed::<(i32, i32)>();
///
/// while let Some(next_row_res) = rows_stream.next().await {
/// let (a, b): (i32, i32) = next_row_res?;
/// println!("a, b: {}, {}", a, b);
/// }
/// # Ok(())
/// # }
/// ```
pub async fn query_iter(
&self,
query: impl Into<Query>,
values: impl ValueList,
) -> Result<RowIterator, QueryError> {
let query: Query = query.into();
let serialized_values = values.serialized()?;
let retry_session = match &query.config.retry_policy {
Some(policy) => policy.new_session(),
None => self.retry_policy.new_session(),
};
let span = trace_span!("Request", query = query.contents.as_str());
RowIterator::new_for_query(
query,
serialized_values.into_owned(),
self.default_consistency,
retry_session,
self.load_balancer.clone(),
self.cluster.get_data(),
self.metrics.clone(),
)
.instrument(span)
.await
}
/// Prepares a statement on the server side and returns a prepared statement,
/// which can later be used to perform more efficient queries
///
/// Prepared queries are much faster than simple queries:
/// * Database doesn't need to parse the query
/// * They are properly load balanced using token aware routing
///
/// > ***Warning***\
/// > For token/shard aware load balancing to work properly, all partition key values
/// > must be sent as bound values
/// > (see [performance section](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html#performance))
///
/// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html) for more information
///
/// # Arguments
/// * `query` - query to prepare, can be just a `&str` or the [Query](crate::query::Query) struct.
///
/// # Example
/// ```rust
/// # use scylla::Session;
/// # use std::error::Error;
/// # async fn check_only_compiles(session: &Session) -> Result<(), Box<dyn Error>> {
/// use scylla::prepared_statement::PreparedStatement;
///
/// // Prepare the query for later execution
/// let prepared: PreparedStatement = session
/// .prepare("INSERT INTO ks.tab (a) VALUES(?)")
/// .await?;
///
/// // Run the prepared query with some values, just like a simple query
/// let to_insert: i32 = 12345;
/// session.execute(&prepared, (to_insert,)).await?;
/// # Ok(())
/// # }
/// ```
pub async fn prepare(&self, query: impl Into<Query>) -> Result<PreparedStatement, QueryError> {
let query = query.into();
let connections = self.cluster.get_working_connections().await?;
// Prepare statements on all connections concurrently
let handles = connections.iter().map(|c| c.prepare(&query));
let mut results = join_all(handles).await;
// If at least one prepare was successful prepare returns Ok
// Find first result that is Ok, or Err if all failed
let mut first_ok: Option<Result<PreparedStatement, QueryError>> = None;
while let Some(res) = results.pop() {
let is_ok: bool = res.is_ok();
first_ok = Some(res);
if is_ok {
break;
}
}
let mut prepared: PreparedStatement = first_ok.unwrap()?;
// Validate prepared ids equality
for statement in results.into_iter().flatten() {
if prepared.get_id() != statement.get_id() {
return Err(QueryError::ProtocolError(
"Prepared statement Ids differ, all should be equal",
));
}
// Collect all tracing ids from prepare() queries in the final result
prepared
.prepare_tracing_ids
.extend(statement.prepare_tracing_ids);
}
Ok(prepared)
}
/// Execute a prepared query. Requires a [PreparedStatement](crate::prepared_statement::PreparedStatement)
/// generated using [`Session::prepare`](Session::prepare)\
/// Returns only a single page of results, to receive multiple pages use [execute_iter](Session::execute_iter)
///
/// Prepared queries are much faster than simple queries:
/// * Database doesn't need to parse the query
/// * They are properly load balanced using token aware routing
///
/// > ***Warning***\
/// > For token/shard aware load balancing to work properly, all partition key values
/// > must be sent as bound values
/// > (see [performance section](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html#performance))
///
/// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/prepared.html) for more information
///
/// # Arguments
/// * `prepared` - the prepared statement to execute, generated using [`Session::prepare`](Session::prepare)
/// * `values` - values bound to the query, easiest way is to use a tuple of bound values
///
/// # Example
/// ```rust
/// # use scylla::Session;
/// # use std::error::Error;
/// # async fn check_only_compiles(session: &Session) -> Result<(), Box<dyn Error>> {
/// use scylla::prepared_statement::PreparedStatement;
///
/// // Prepare the query for later execution
/// let prepared: PreparedStatement = session
/// .prepare("INSERT INTO ks.tab (a) VALUES(?)")
/// .await?;
///
/// // Run the prepared query with some values, just like a simple query
/// let to_insert: i32 = 12345;
/// session.execute(&prepared, (to_insert,)).await?;
/// # Ok(())
/// # }
/// ```
pub async fn execute(
&self,
prepared: &PreparedStatement,
values: impl ValueList,
) -> Result<QueryResult, QueryError> {
self.execute_paged(prepared, values, None).await
}
/// Executes a previously prepared statement with previously received paging state
/// # Arguments
///
/// * `prepared` - a statement prepared with [prepare](crate::transport::session::Session::prepare)
/// * `values` - values bound to the query
/// * `paging_state` - paging state from the previous query or None
pub async fn execute_paged(
&self,
prepared: &PreparedStatement,
values: impl ValueList,
paging_state: Option<Bytes>,
) -> Result<QueryResult, QueryError> {
let serialized_values = values.serialized()?;
let values_ref = &serialized_values;
let paging_state_ref = &paging_state;
let token = calculate_token(prepared, &serialized_values)?;
let statement_info = Statement {
token: Some(token),
keyspace: prepared.get_keyspace_name(),
};
let span = trace_span!(
"Request",
prepared_id = format!("{:X}", prepared.get_id()).as_str()
);
let response = self
.run_query(
statement_info,
&prepared.config,
|node: Arc<Node>| async move { node.connection_for_token(token).await },
|connection: Arc<Connection>| async move {
connection
.execute(prepared, values_ref, paging_state_ref.clone())
.await
},
)
.instrument(span)
.await?;
self.handle_set_keyspace_response(&response).await?;
response.into_query_result()
}
/// Run a prepared query with paging\
/// This method will query all pages of the result\
///
/// Returns an async iterator (stream) over all received rows\
/// Page size can be specified in the [PreparedStatement](crate::prepared_statement::PreparedStatement)
/// passed to the function
///
/// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/paged.html) for more information
///
/// # Arguments
/// * `prepared` - the prepared statement to execute, generated using [`Session::prepare`](Session::prepare)
/// * `values` - values bound to the query, easiest way is to use a tuple of bound values
///
/// # Example
///
/// ```rust
/// # use scylla::Session;
/// # use std::error::Error;
/// # async fn check_only_compiles(session: &Session) -> Result<(), Box<dyn Error>> {
/// use scylla::prepared_statement::PreparedStatement;
/// use scylla::IntoTypedRows;
/// use futures::stream::StreamExt;
///
/// // Prepare the query for later execution
/// let prepared: PreparedStatement = session
/// .prepare("SELECT a, b FROM ks.t")
/// .await?;
///
/// // Execute the query and receive all pages
/// let mut rows_stream = session
/// .execute_iter(prepared, &[])
/// .await?
/// .into_typed::<(i32, i32)>();
///
/// while let Some(next_row_res) = rows_stream.next().await {
/// let (a, b): (i32, i32) = next_row_res?;
/// println!("a, b: {}, {}", a, b);
/// }
/// # Ok(())
/// # }
/// ```
pub async fn execute_iter(
&self,
prepared: impl Into<PreparedStatement>,
values: impl ValueList,
) -> Result<RowIterator, QueryError> {
let prepared = prepared.into();
let serialized_values = values.serialized()?;
let token = calculate_token(&prepared, &serialized_values)?;
let retry_session = match &prepared.config.retry_policy {
Some(policy) => policy.new_session(),
None => self.retry_policy.new_session(),
};
let span = trace_span!(
"Request",
prepared_id = format!("{:X}", prepared.get_id()).as_str()
);
RowIterator::new_for_prepared_statement(PreparedIteratorConfig {
prepared,
values: serialized_values.into_owned(),
default_consistency: self.default_consistency,
token,
retry_session,
load_balancer: self.load_balancer.clone(),
cluster_data: self.cluster.get_data(),
metrics: self.metrics.clone(),
})
.instrument(span)
.await
}
/// Perform a batch query\
/// Batch contains many `simple` or `prepared` queries which are executed at once\
/// Batch doesn't return any rows
///
/// Batch values must contain values for each of the queries
///
/// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/batch.html) for more information
///
/// # Arguments
/// * `batch` - [Batch](crate::batch::Batch) to be performed
/// * `values` - List of values for each query, it's the easiest to use a tuple of tuples
///
/// # Example
/// ```rust
/// # use scylla::Session;
/// # use std::error::Error;
/// # async fn check_only_compiles(session: &Session) -> Result<(), Box<dyn Error>> {
/// use scylla::batch::Batch;
///
/// let mut batch: Batch = Default::default();
///
/// // A query with two bound values
/// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(?, ?)");
///
/// // A query with one bound value
/// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(3, ?)");
///
/// // A query with no bound values
/// batch.append_statement("INSERT INTO ks.tab(a, b) VALUES(5, 6)");
///
/// // Batch values is a tuple of 3 tuples containing values for each query
/// let batch_values = ((1_i32, 2_i32), // Tuple with two values for the first query
/// (4_i32,), // Tuple with one value for the second query
/// ()); // Empty tuple/unit for the third query
///
/// // Run the batch
/// session.batch(&batch, batch_values).await?;
/// # Ok(())
/// # }
/// ```
pub async fn batch(
&self,
batch: &Batch,
values: impl BatchValues,
) -> Result<BatchResult, QueryError> {
let values_ref = &values;
self.run_query(
Statement::default(),
&batch.config,
|node: Arc<Node>| async move { node.random_connection().await },
|connection: Arc<Connection>| async move { connection.batch(batch, values_ref).await },
)
.instrument(trace_span!("Batch"))
.await
}
/// Sends `USE <keyspace_name>` request on all connections\
/// This allows to write `SELECT * FROM table` instead of `SELECT * FROM keyspace.table`\
///
/// Note that even failed `use_keyspace` can change currently used keyspace - the request is sent on all connections and
/// can overwrite previously used keyspace.
///
/// Call only one `use_keyspace` at a time.\
/// Trying to do two `use_keyspace` requests simultaneously with different names
/// can end with some connections using one keyspace and the rest using the other.
///
/// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/usekeyspace.html) for more information
///
/// # Arguments
///
/// * `keyspace_name` - keyspace name to use,
/// keyspace names can have up to 48 alphanumeric characters and contain underscores
/// * `case_sensitive` - if set to true the generated query will put keyspace name in quotes
/// # Example
/// ```rust
/// # use scylla::{Session, SessionBuilder};
/// # use scylla::transport::Compression;
/// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
/// # let session = SessionBuilder::new().known_node("127.0.0.1:9042").build().await?;
/// session
/// .query("INSERT INTO my_keyspace.tab (a) VALUES ('test1')", &[])
/// .await?;
///
/// session.use_keyspace("my_keyspace", false).await?;
///
/// // Now we can omit keyspace name in the query
/// session
/// .query("INSERT INTO tab (a) VALUES ('test2')", &[])
/// .await?;
/// # Ok(())
/// # }
/// ```
pub async fn use_keyspace(
&self,
keyspace_name: impl Into<String>,
case_sensitive: bool,
) -> Result<(), QueryError> {
// Trying to pass keyspace as bound value in "USE ?" doesn't work
// So we have to create a string for query: "USE " + new_keyspace
// To avoid any possible CQL injections it's good to verify that the name is valid
let verified_ks_name = VerifiedKeyspaceName::new(keyspace_name.into(), case_sensitive)?;
self.cluster.use_keyspace(verified_ks_name).await?;
Ok(())
}
/// Manually trigger a metadata refresh\
/// The driver will fetch current nodes in the cluster and update its metadata
///
/// Normally this is not needed,
/// the driver should automatically detect all metadata changes in the cluster
pub async fn refresh_metadata(&self) -> Result<(), QueryError> {
self.cluster.refresh_metadata().await
}
/// Access metrics collected by the driver\
/// Driver collects various metrics like number of queries or query latencies.
/// They can be read using this method
pub fn get_metrics(&self) -> Arc<Metrics> {
self.metrics.clone()
}
/// Access cluster data collected by the driver\
/// Driver collects various information about network topology or schema.
/// They can be read using this method
pub fn get_cluster_data(&self) -> Arc<ClusterData> {
self.cluster.get_data()
}
/// Get [`TracingInfo`] of a traced query performed earlier
///
/// See [the book](https://rust-driver.docs.scylladb.com/stable/tracing/tracing.html)
/// for more information about query tracing
pub async fn get_tracing_info(&self, tracing_id: &Uuid) -> Result<TracingInfo, QueryError> {
self.get_tracing_info_custom(tracing_id, &GetTracingConfig::default())
.await
}
/// Queries tracing info with custom retry settings.\
/// Tracing info might not be available immediately on queried node -
/// that's why the driver performs a few attempts with sleeps in between.
/// [`GetTracingConfig`] allows to specify a custom querying strategy.
pub async fn get_tracing_info_custom(
&self,
tracing_id: &Uuid,
config: &GetTracingConfig,
) -> Result<TracingInfo, QueryError> {
// config.attempts is NonZeroU32 so at least one attempt will be made
for _ in 0..config.attempts.get() {
let current_try: Option<TracingInfo> = self
.try_getting_tracing_info(tracing_id, Some(config.consistency))
.await?;
match current_try {
Some(tracing_info) => return Ok(tracing_info),
None => tokio::time::sleep(config.interval).await,
};
}
Err(QueryError::ProtocolError(
"All tracing queries returned an empty result, \
maybe information didn't reach this node yet. \
Consider using get_tracing_info_custom with \
bigger interval in GetTracingConfig",
))
}
// Tries getting the tracing info
// If the queries return 0 rows then returns None - the information didn't reach this node yet
// If there is some other error returns this error
async fn try_getting_tracing_info(
&self,
tracing_id: &Uuid,
consistency: Option<Consistency>,
) -> Result<Option<TracingInfo>, QueryError> {
// Query system_traces.sessions for TracingInfo
let mut traces_session_query = Query::new(crate::tracing::TRACES_SESSION_QUERY_STR);
traces_session_query.config.consistency = consistency;
traces_session_query.set_page_size(1024);
// Query system_traces.events for TracingEvents
let mut traces_events_query = Query::new(crate::tracing::TRACES_EVENTS_QUERY_STR);
traces_events_query.config.consistency = consistency;
traces_events_query.set_page_size(1024);
let (traces_session_res, traces_events_res) = tokio::try_join!(
self.query(traces_session_query, (tracing_id,)),
self.query(traces_events_query, (tracing_id,))
)?;
// Get tracing info
let tracing_info_row_res: Option<Result<TracingInfo, _>> = traces_session_res
.rows
.ok_or(QueryError::ProtocolError(
"Response to system_traces.sessions query was not Rows",
))?
.into_typed::<TracingInfo>()
.next();
let mut tracing_info: TracingInfo = match tracing_info_row_res {
Some(tracing_info_row_res) => tracing_info_row_res.map_err(|_| {
QueryError::ProtocolError(
"Columns from system_traces.session have an unexpected type",
)
})?,
None => return Ok(None),
};
// Get tracing events
let tracing_event_rows = traces_events_res
.rows
.ok_or(QueryError::ProtocolError(
"Response to system_traces.events query was not Rows",
))?
.into_typed::<TracingEvent>();
for event in tracing_event_rows {
let tracing_event: TracingEvent = event.map_err(|_| {
QueryError::ProtocolError(
"Columns from system_traces.events have an unexpected type",
)
})?;
tracing_info.events.push(tracing_event);
}
if tracing_info.events.is_empty() {
return Ok(None);
}
Ok(Some(tracing_info))
}
// This method allows to easily run a query using load balancing, retry policy etc.
// Requires some information about the query and two closures
// First closure is used to choose a connection
// - query will use node.random_connection()
// - execute will use node.connection_for_token()
// The second closure is used to do the query itself on a connection
// - query will use connection.query()
// - execute will use connection.execute()
// If this query closure fails with some errors retry policy is used to perform retries
// On success this query's result is returned
// I tried to make this closures take a reference instead of an Arc but failed
// maybe once async closures get stabilized this can be fixed
async fn run_query<'a, ConnFut, QueryFut, ResT>(
&'a self,
statement_info: Statement<'a>,
statement_config: &StatementConfig,
choose_connection: impl Fn(Arc<Node>) -> ConnFut,
do_query: impl Fn(Arc<Connection>) -> QueryFut,
) -> Result<ResT, QueryError>
where
ConnFut: Future<Output = Result<Arc<Connection>, QueryError>>,
QueryFut: Future<Output = Result<ResT, QueryError>>,
{
let cluster_data = self.cluster.get_data();
let query_plan = self.load_balancer.plan(&statement_info, &cluster_data);
// If a speculative execution policy is used to run query, query_plan has to be shared
// between different async functions. This struct helps to wrap query_plan in mutex so it
// can be shared safely.
struct SharedPlan<I>
where
I: Iterator<Item = Arc<Node>>,
{
iter: std::sync::Mutex<I>,
}
impl<I> Iterator for &SharedPlan<I>
where
I: Iterator<Item = Arc<Node>>,
{
type Item = Arc<Node>;
fn next(&mut self) -> Option<Self::Item> {
self.iter.lock().unwrap().next()
}
}
let retry_policy = match &statement_config.retry_policy {
Some(policy) => policy,
None => &self.retry_policy,
};
#[allow(clippy::unnecessary_lazy_evaluations)]
let speculative_policy = statement_config
.speculative_execution_policy
.as_ref()
.or_else(|| self.speculative_execution_policy.as_ref());
match speculative_policy {
Some(speculative) if statement_config.is_idempotent => {
let shared_query_plan = SharedPlan {
iter: std::sync::Mutex::new(query_plan),
};
let execute_query_generator = || {
self.execute_query(
&shared_query_plan,
statement_config.is_idempotent,
statement_config.consistency,
retry_policy.new_session(),
&choose_connection,
&do_query,
)
};
let context = speculative_execution::Context {
metrics: self.metrics.clone(),
};
speculative_execution::execute(
speculative.as_ref(),
&context,
execute_query_generator,
)
.await
}
_ => self
.execute_query(
query_plan,
statement_config.is_idempotent,
statement_config.consistency,
retry_policy.new_session(),
&choose_connection,
&do_query,
)
.await
.unwrap_or(Err(QueryError::ProtocolError(
"Empty query plan - driver bug!",
))),
}
}
async fn execute_query<ConnFut, QueryFut, ResT>(
&self,
query_plan: impl Iterator<Item = Arc<Node>>,
is_idempotent: bool,
consistency: Option<Consistency>,
mut retry_session: Box<dyn RetrySession>,
choose_connection: impl Fn(Arc<Node>) -> ConnFut,
do_query: impl Fn(Arc<Connection>) -> QueryFut,
) -> Option<Result<ResT, QueryError>>
where
ConnFut: Future<Output = Result<Arc<Connection>, QueryError>>,
QueryFut: Future<Output = Result<ResT, QueryError>>,
{
let mut last_error: Option<QueryError> = None;
'nodes_in_plan: for node in query_plan {
let span = trace_span!("Executing query", node = node.address.to_string().as_str());
'same_node_retries: loop {
trace!(parent: &span, "Execution started");
let connection: Arc<Connection> = match choose_connection(node.clone())
.instrument(span.clone())
.await
{
Ok(connection) => connection,
Err(e) => {
trace!(
parent: &span,
error = e.to_string().as_str(),
"Choosing connection failed"
);
last_error = Some(e);
// Broken connection doesn't count as a failed query, don't log in metrics
continue 'nodes_in_plan;
}
};
self.metrics.inc_total_nonpaged_queries();
let query_start = std::time::Instant::now();
trace!(
parent: &span,
connection = connection.get_connect_address().to_string().as_str(),
"Sending"
);
let query_result: Result<ResT, QueryError> =
do_query(connection).instrument(span.clone()).await;
last_error = match query_result {
Ok(response) => {
trace!(parent: &span, "Query succeeded");
let _ = self
.metrics
.log_query_latency(query_start.elapsed().as_millis() as u64);
return Some(Ok(response));
}
Err(e) => {
trace!(
parent: &span,
last_error = e.to_string().as_str(),
"Query failed"
);
self.metrics.inc_failed_nonpaged_queries();
Some(e)
}
};
// Use retry policy to decide what to do next
let query_info = QueryInfo {
error: last_error.as_ref().unwrap(),
is_idempotent,
consistency: LegacyConsistency::Regular(
consistency.unwrap_or(self.default_consistency),
),
};
let retry_decision = retry_session.decide_should_retry(query_info);
trace!(
parent: &span,
retry_decision = format!("{:?}", retry_decision).as_str()
);
match retry_decision {
RetryDecision::RetrySameNode => {
self.metrics.inc_retries_num();
continue 'same_node_retries;
}
RetryDecision::RetryNextNode => {
self.metrics.inc_retries_num();
continue 'nodes_in_plan;
}
RetryDecision::DontRetry => return last_error.map(Result::Err),
};
}
}
last_error.map(Result::Err)
}
pub async fn await_schema_agreement(&self) -> Result<(), QueryError> {
while !self.check_schema_agreement().await? {
tokio::time::sleep(self.schema_agreement_interval).await
}
Ok(())
}
pub async fn await_timed_schema_agreement(
&self,
timeout_duration: Duration,
) -> Result<bool, QueryError> {
timeout(timeout_duration, self.await_schema_agreement())
.await
.map_or(Ok(false), |res| res.and(Ok(true)))
}
async fn schema_agreement_auxilary<ResT, QueryFut>(
&self,
do_query: impl Fn(Arc<Connection>) -> QueryFut,
) -> Result<ResT, QueryError>
where
QueryFut: Future<Output = Result<ResT, QueryError>>,
{
let info = Statement::default();
let config = StatementConfig {
is_idempotent: true,
serial_consistency: Some(SerialConsistency::LocalSerial),
..Default::default()
};
self.run_query(
info,
&config,
|node: Arc<Node>| async move { node.random_connection().await },
do_query,
)
.await
}
pub async fn check_schema_agreement(&self) -> Result<bool, QueryError> {
let connections = self.cluster.get_working_connections().await?;
let handles = connections.iter().map(|c| c.fetch_schema_version());
let versions = try_join_all(handles).await?;
let local_version: Uuid = versions[0];
let in_agreement = versions.into_iter().all(|v| v == local_version);
Ok(in_agreement)
}
pub async fn fetch_schema_version(&self) -> Result<Uuid, QueryError> {
self.schema_agreement_auxilary(|connection: Arc<Connection>| async move {
connection.fetch_schema_version().await
})
.await
}
}
fn calculate_token(
stmt: &PreparedStatement,
values: &SerializedValues,
) -> Result<Token, QueryError> {
// TODO: take the partitioner of the table that is being queried and calculate the token using
// that partitioner. The below logic gives correct token only for murmur3partitioner
let partition_key = match stmt.compute_partition_key(values) {
Ok(key) => key,
Err(PartitionKeyError::NoPkIndexValue(_, _)) => {
return Err(QueryError::ProtocolError(
"No pk indexes - can't calculate token",
))
}
Err(PartitionKeyError::ValueTooLong(values_len)) => {
return Err(QueryError::BadQuery(BadQuery::ValuesTooLongForKey(
values_len,
u16::max_value().into(),
)))
}
};
Ok(murmur3_token(partition_key))
}
// Resolve the given hostname using a DNS lookup if necessary.
// The resolution may return multiple IPs and the function returns one of them.
// It prefers to return IPv4s first, and only if there are none, IPv6s.
async fn resolve_hostname(hostname: &str) -> Result<SocketAddr, NewSessionError> {
let failed_err = NewSessionError::FailedToResolveAddress(hostname.to_string());
let mut ret = None;
let addrs: Vec<SocketAddr> = match lookup_host(hostname).await {
Ok(addrs) => addrs.collect(),
// Use a default port in case of error, but propagate the original error on failure
Err(e) => lookup_host((hostname, 9042)).await.or(Err(e))?.collect(),
};
for a in addrs {
match a {
SocketAddr::V4(_) => return Ok(a),
_ => {
ret = Some(a);
}
}
}
ret.ok_or(failed_err)
}
|
{
session
.use_keyspace(keyspace_name, config.keyspace_case_sensitive)
.await?;
}
|
index.test.ts
|
import { renderHook, act } from '@testing-library/react-hooks';
import useOptimisticState, { HookResult } from '../src/index';
type Action = {
state: number;
type: 'success' | 'error';
delay: number;
result?: string;
error?: any;
};
type StateData = HookResult<number, string, any>;
function wait(delay: number) {
return new Promise((resolve, reject) => {
setTimeout(() => {
resolve(null);
}, delay);
});
}
function withSuccess(data: any, delay: number) {
return new Promise((resolve, reject) => {
setTimeout(() => {
resolve(data);
}, delay);
});
}
function
|
(err: any, delay: number) {
return new Promise((resolve, reject) => {
setTimeout(() => {
reject(err);
}, delay);
});
}
function processActions(actions: Action[], optimisticHandler: (...args: any[]) => void) {
actions.forEach((action) => {
optimisticHandler(action.state, action);
});
}
function routine(state: number, action: Action) {
if (action.type === 'success') {
return withSuccess(action.result, action.delay);
} else {
return withError(action.error, action.delay);
}
}
describe('Test optimistic-state', () => {
it('should optimistically update state and give result when resolved', async () => {
const actions: Action[] = [{ state: 1, type: 'success', delay: 300, result: 'Result for 1' }];
const { result } = renderHook(() => useOptimisticState(0, routine));
act(() => {
processActions(actions, result.current.updateState);
});
expect(result.current.state).toEqual(1);
expect(result.current.loading).toEqual(true);
await act(async () => {
await wait(400);
});
expect(result.current.state).toEqual(1);
expect(result.current.loading).toEqual(false);
expect(result.current.result).toEqual('Result for 1');
});
it('should rollback and give error when last promise is rejected', async () => {
const actions: Action[] = [
{ state: 1, type: 'success', delay: 500, result: 'Result for 1' },
{ state: 2, type: 'error', delay: 300, error: 'Error for 2' },
];
const { result } = renderHook(() => useOptimisticState(0, routine));
act(() => {
processActions(actions, result.current.updateState);
});
expect(result.current.state).toEqual(2);
expect(result.current.loading).toEqual(true);
await act(async () => {
await wait(500);
});
expect(result.current.state).toEqual(1);
expect(result.current.loading).toEqual(false);
expect(result.current.result).toEqual('Result for 1');
expect(result.current.error).toEqual('Error for 2');
});
it('should reset error when new routine starts but should maintain the last result', async () => {
const { result } = renderHook(() => useOptimisticState(0, routine));
act(() => {
processActions(
[{ state: 1, type: 'success', delay: 300, result: 'Result for 1' }],
result.current.updateState,
);
});
await act(async () => {
await wait(400);
});
act(() => {
processActions(
[{ state: 2, type: 'error', delay: 300, error: 'Error for 2' }],
result.current.updateState,
);
});
await act(async () => {
await wait(400);
});
expect(result.current.state).toEqual(1);
expect(result.current.error).toEqual('Error for 2');
expect(result.current.result).toEqual('Result for 1');
act(() => {
processActions(
[{ state: 3, type: 'success', delay: 300, result: 'Result for 3' }],
result.current.updateState,
);
});
expect(result.current.state).toEqual(3);
expect(result.current.error).toEqual(undefined);
expect(result.current.result).toEqual('Result for 1');
await act(async () => {
await wait(400);
});
expect(result.current.state).toEqual(3);
expect(result.current.error).toEqual(undefined);
expect(result.current.result).toEqual('Result for 3');
});
});
|
withError
|
encoding.go
|
// Package key provides types and functions to encode and decode keys.
//
// Encoding keys
//
// Each type is encoded in a way that allows ordering to be preserved. That way, if vA < vB,
// where vA and vB are two unencoded values of the same type, then eA < eB, where eA and eB
// are the respective encoded values of vA and vB.
package key
import (
"bytes"
"encoding/base64"
"encoding/binary"
"errors"
"math"
"time"
"github.com/genjidb/genji/document"
)
const base64encoder = "-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz"
var base64Encoding = base64.NewEncoding(base64encoder).WithPadding(base64.NoPadding)
const arrayValueDelim = 0x1f
const arrayEnd = 0x1e
const documentValueDelim = 0x1c
const documentEnd = 0x1d
// AppendBool takes a bool and returns its binary representation.
func AppendBool(buf []byte, x bool) []byte {
if x {
return append(buf, 1)
}
return append(buf, 0)
}
// DecodeBool takes a byte slice and decodes it into a boolean.
func DecodeBool(buf []byte) bool {
return buf[0] == 1
}
// AppendUint64 takes an uint64 and returns its binary representation.
func AppendUint64(buf []byte, x uint64) []byte {
var b [8]byte
binary.BigEndian.PutUint64(b[:], x)
return append(buf, b[:]...)
}
// DecodeUint64 takes a byte slice and decodes it into a uint64.
func DecodeUint64(buf []byte) (uint64, error) {
if len(buf) < 8 {
return 0, errors.New("cannot decode buffer to uint64")
}
return binary.BigEndian.Uint64(buf), nil
}
// AppendInt64 takes an int64 and returns its binary representation.
func AppendInt64(buf []byte, x int64) []byte {
var b [8]byte
binary.BigEndian.PutUint64(b[:], uint64(x)+math.MaxInt64+1)
return append(buf, b[:]...)
}
// DecodeInt64 takes a byte slice and decodes it into an int64.
func DecodeInt64(buf []byte) (int64, error) {
x, err := DecodeUint64(buf)
x -= math.MaxInt64 + 1
return int64(x), err
}
// AppendFloat64 takes an float64 and returns its binary representation.
func AppendFloat64(buf []byte, x float64) []byte {
fb := math.Float64bits(x)
if x >= 0 {
fb ^= 1 << 63
} else {
fb ^= 1<<64 - 1
}
return AppendUint64(buf, fb)
}
// DecodeFloat64 takes a byte slice and decodes it into an float64.
func DecodeFloat64(buf []byte) (float64, error) {
x := binary.BigEndian.Uint64(buf)
if (x & (1 << 63)) != 0 {
x ^= 1 << 63
} else {
x ^= 1<<64 - 1
}
return math.Float64frombits(x), nil
}
// AppendBase64 encodes data into a custom base64 encoding. The resulting slice respects
// natural sort-ordering.
func AppendBase64(buf []byte, data []byte) ([]byte, error) {
b := bytes.NewBuffer(buf)
enc := base64.NewEncoder(base64Encoding, b)
_, err := enc.Write(data)
if err != nil {
return nil, err
}
err = enc.Close()
if err != nil {
return nil, err
}
return b.Bytes(), nil
}
// DecodeBase64 decodes a custom base64 encoded byte slice,
// encoded with AppendBase64.
func DecodeBase64(data []byte) ([]byte, error) {
var buf bytes.Buffer
dec := base64.NewDecoder(base64Encoding, bytes.NewReader(data))
_, err := buf.ReadFrom(dec)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// AppendNumber takes a number value, integer or double, and encodes it in 16 bytes
// so that encoded integers and doubles are naturally ordered.
// Integers will fist be encoded using AppendInt64 on 8 bytes, then 8 zero-bytes will be
// appended to them.
// Doubles will first be converted to integer, encoded using AppendInt64,
// then AppendFloat64 will be called with the float value.
func AppendNumber(buf []byte, v document.Value) ([]byte, error) {
if !v.Type.IsNumber() {
return nil, errors.New("expected number type")
}
if v.Type == document.IntegerValue {
// appending 8 zero bytes so that the integer has the same size as the double
// but always lower for the same value.
return append(AppendInt64(buf, v.V.(int64)), 0, 0, 0, 0, 0, 0, 0, 0), nil
}
x := v.V.(float64)
if x > math.MaxInt64 {
return AppendFloat64(AppendInt64(buf, math.MaxInt64), x), nil
}
return AppendFloat64(AppendInt64(buf, int64(x)), x), nil
}
// AppendArray encodes an array into a sort-ordered binary representation.
func AppendArray(buf []byte, a document.Array) ([]byte, error) {
err := a.Iterate(func(i int, value document.Value) error {
var err error
if i > 0 {
buf = append(buf, arrayValueDelim)
}
buf, err = AppendValue(buf, value)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
buf = append(buf, arrayEnd)
return buf, nil
}
func decodeValue(data []byte, delim, end byte) (document.Value, int, error) {
t := document.ValueType(data[0])
i := 1
switch t {
case document.ArrayValue:
a, n, err := decodeArray(data[i:])
i += n
if err != nil {
return document.Value{}, i, err
}
return document.NewArrayValue(a), i, nil
case document.DocumentValue:
d, n, err := decodeDocument(data[i:])
i += n
if err != nil {
return document.Value{}, i, err
}
return document.NewDocumentValue(d), i, nil
case document.NullValue:
case document.BoolValue:
i++
case document.DoubleValue:
i += 16
case document.DurationValue:
i += 8
case document.BlobValue, document.TextValue:
for i < len(data) && data[i] != delim && data[i] != end {
i++
}
default:
return document.Value{}, 0, errors.New("invalid type character")
}
v, err := DecodeValue(data[:i])
return v, i, err
}
// DecodeArray decodes an array.
func DecodeArray(data []byte) (document.Array, error) {
a, _, err := decodeArray(data)
return a, err
}
func decodeArray(data []byte) (document.Array, int, error) {
var vb document.ValueBuffer
var readCount int
for len(data) > 0 && data[0] != arrayEnd {
v, i, err := decodeValue(data, arrayValueDelim, arrayEnd)
if err != nil {
return nil, i, err
}
vb = vb.Append(v)
// skip the delimiter
if data[i] == arrayValueDelim {
i++
}
readCount += i
data = data[i:]
}
// skip the array end character
readCount++
return vb, readCount, nil
}
// AppendDocument encodes a document into a sort-ordered binary representation.
func AppendDocument(buf []byte, d document.Document) ([]byte, error) {
var i int
err := d.Iterate(func(field string, value document.Value) error {
var err error
if i > 0 {
buf = append(buf, documentValueDelim)
}
buf, err = AppendBase64(buf, []byte(field))
if err != nil {
return err
}
buf = append(buf, documentValueDelim)
buf, err = AppendValue(buf, value)
if err != nil {
return err
}
i++
return nil
})
if err != nil {
return nil, err
}
buf = append(buf, documentEnd)
return buf, nil
}
// DecodeDocument decodes a document.
func DecodeDocument(data []byte) (document.Document, error) {
a, _, err := decodeDocument(data)
return a, err
}
func decodeDocument(data []byte) (document.Document, int, error) {
var fb document.FieldBuffer
var readCount int
for len(data) > 0 && data[0] != documentEnd {
i := 0
for i < len(data) && data[i] != documentValueDelim {
i++
}
field, err := DecodeBase64(data[:i])
if err != nil {
return nil, 0, err
}
// skip the delimiter
i++
if i >= len(data) {
return nil, 0, errors.New("invalid end of input")
}
readCount += i
data = data[i:]
v, i, err := decodeValue(data, documentValueDelim, documentEnd)
if err != nil {
return nil, i, err
}
fb.Add(string(field), v)
// skip the delimiter
if data[i] == documentValueDelim {
i++
}
readCount += i
data = data[i:]
}
// skip the document end character
readCount++
return &fb, readCount, nil
}
// AppendValue encodes a value as a key.
func AppendValue(buf []byte, v document.Value) ([]byte, error) {
if v.Type == document.IntegerValue || v.Type == document.DoubleValue {
buf = append(buf, byte(document.DoubleValue))
} else {
buf = append(buf, byte(v.Type))
}
switch v.Type {
case document.BlobValue:
return AppendBase64(buf, v.V.([]byte))
case document.TextValue:
text := v.V.(string)
return AppendBase64(buf, []byte(text))
case document.BoolValue:
return AppendBool(buf, v.V.(bool)), nil
case document.IntegerValue, document.DoubleValue:
return AppendNumber(buf, v)
case document.DurationValue:
return AppendInt64(buf, int64(v.V.(time.Duration))), nil
case document.NullValue:
return buf, nil
case document.ArrayValue:
return AppendArray(buf, v.V.(document.Array))
case document.DocumentValue:
return AppendDocument(buf, v.V.(document.Document))
}
return nil, errors.New("cannot encode type " + v.Type.String() + " as key")
}
// DecodeValue takes some encoded data and decodes it to the target type t.
func DecodeValue(data []byte) (document.Value, error) {
t := document.ValueType(data[0])
data = data[1:]
switch t {
case document.BlobValue:
t, err := DecodeBase64(data)
if err != nil {
return document.Value{}, err
}
return document.NewBlobValue(t), nil
case document.TextValue:
t, err := DecodeBase64(data)
if err != nil {
return document.Value{}, err
}
return document.NewTextValue(string(t)), nil
case document.BoolValue:
return document.NewBoolValue(DecodeBool(data)), nil
case document.DoubleValue:
if bytes.Equal(data[8:], []byte{0, 0, 0, 0, 0, 0, 0, 0}) {
x, err := DecodeInt64(data[:8])
if err != nil {
return document.Value{}, err
}
return document.NewIntegerValue(x), nil
}
x, err := DecodeFloat64(data[8:])
if err != nil {
return document.Value{}, err
}
return document.NewDoubleValue(x), nil
case document.DurationValue:
x, err := DecodeInt64(data)
if err != nil {
return document.Value{}, err
}
return document.NewDurationValue(time.Duration(x)), nil
case document.NullValue:
return document.NewNullValue(), nil
case document.ArrayValue:
a, err := DecodeArray(data)
if err != nil
|
return document.NewArrayValue(a), nil
case document.DocumentValue:
d, err := DecodeDocument(data)
if err != nil {
return document.Value{}, err
}
return document.NewDocumentValue(d), nil
}
return document.Value{}, errors.New("unknown type")
}
// Append encodes a value of the type t as a key.
// The encoded key doesn't include type information.
func Append(buf []byte, t document.ValueType, v interface{}) ([]byte, error) {
switch t {
case document.BlobValue:
return append(buf, v.([]byte)...), nil
case document.TextValue:
return append(buf, v.(string)...), nil
case document.BoolValue:
return AppendBool(buf, v.(bool)), nil
case document.IntegerValue:
return AppendInt64(buf, v.(int64)), nil
case document.DoubleValue:
return AppendFloat64(buf, v.(float64)), nil
case document.DurationValue:
return AppendInt64(buf, int64(v.(time.Duration))), nil
case document.NullValue:
return buf, nil
case document.ArrayValue:
return AppendArray(buf, v.(document.Array))
case document.DocumentValue:
return AppendDocument(buf, v.(document.Document))
}
return nil, errors.New("cannot encode type " + t.String() + " as key")
}
// Decode takes some encoded data and decodes it to the target type t.
func Decode(t document.ValueType, data []byte) (document.Value, error) {
switch t {
case document.BlobValue:
return document.NewBlobValue(data), nil
case document.TextValue:
return document.NewTextValue(string(data)), nil
case document.BoolValue:
return document.NewBoolValue(DecodeBool(data)), nil
case document.IntegerValue:
x, err := DecodeInt64(data)
if err != nil {
return document.Value{}, err
}
return document.NewIntegerValue(x), nil
case document.DoubleValue:
x, err := DecodeFloat64(data)
if err != nil {
return document.Value{}, err
}
return document.NewDoubleValue(x), nil
case document.DurationValue:
x, err := DecodeInt64(data)
if err != nil {
return document.Value{}, err
}
return document.NewDurationValue(time.Duration(x)), nil
case document.NullValue:
return document.NewNullValue(), nil
case document.ArrayValue:
a, err := DecodeArray(data)
if err != nil {
return document.Value{}, err
}
return document.NewArrayValue(a), nil
case document.DocumentValue:
d, err := DecodeDocument(data)
if err != nil {
return document.Value{}, err
}
return document.NewDocumentValue(d), nil
}
return document.Value{}, errors.New("unknown type")
}
|
{
return document.Value{}, err
}
|
lib.rs
|
//! VirtIO guest drivers.
#![no_std]
#![deny(unused_must_use, missing_docs)]
#![allow(clippy::identity_op)]
#![allow(dead_code)]
// #[macro_use]
extern crate log;
extern crate alloc;
mod blk;
mod console;
mod gpu;
mod hal;
mod header;
mod input;
mod net;
mod queue;
pub use self::blk::VirtIOBlk;
pub use self::console::VirtIOConsole;
pub use self::gpu::VirtIOGpu;
pub use self::header::*;
pub use self::input::{InputConfigSelect, InputEvent, VirtIOInput};
pub use self::net::VirtIONet;
use self::queue::VirtQueue;
use core::mem::size_of;
use hal::*;
const PAGE_SIZE: usize = 0x1000;
const MAX_QUEUE_SIZE: usize = 4096;
/// The type returned by driver methods.
pub type Result<T = ()> = core::result::Result<T, Error>;
// pub struct Error {
// kind: ErrorKind,
// reason: &'static str,
// }
/// The error type of VirtIO drivers.
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum Error {
/// The buffer is too small.
BufferTooSmall,
/// The device is not ready.
NotReady,
/// The queue is already in use.
AlreadyUsed,
/// Invalid parameter.
InvalidParam,
/// Failed to alloc DMA memory.
DmaError,
/// I/O Error
IoError,
}
/// Align `size` up to a page.
fn align_up(size: usize) -> usize {
(size + PAGE_SIZE) & !(PAGE_SIZE - 1)
}
/// Pages of `size`.
fn pages(size: usize) -> usize {
(size + PAGE_SIZE - 1) / PAGE_SIZE
}
/// Convert a struct into buffer.
unsafe trait AsBuf: Sized {
fn
|
(&self) -> &[u8] {
unsafe { core::slice::from_raw_parts(self as *const _ as _, size_of::<Self>()) }
}
fn as_buf_mut(&mut self) -> &mut [u8] {
unsafe { core::slice::from_raw_parts_mut(self as *mut _ as _, size_of::<Self>()) }
}
}
|
as_buf
|
oc.js
|
// This file was automatically generated. Do not modify.
'use strict';
goog.provide('Blockly.Msg.oc');
goog.require('Blockly.Msg');
Blockly.Msg.ADD_COMMENT = "Apondre un comentari";
Blockly.Msg.CHANGE_VALUE_TITLE = "Modificar la valor :";
Blockly.Msg.CLEAN_UP = "Netejar los blòts";
Blockly.Msg.COLLAPSE_ALL = "Redusir los blòts";
Blockly.Msg.COLLAPSE_BLOCK = "Redusir lo blòt";
Blockly.Msg.COLOUR_BLEND_COLOUR1 = "color 1";
Blockly.Msg.COLOUR_BLEND_COLOUR2 = "color 2";
Blockly.Msg.COLOUR_BLEND_HELPURL = "http://meyerweb.com/eric/tools/color-blend/"; // untranslated
Blockly.Msg.COLOUR_BLEND_RATIO = "ratio";
Blockly.Msg.COLOUR_BLEND_TITLE = "mesclar";
Blockly.Msg.COLOUR_BLEND_TOOLTIP = "Blends two colours together with a given ratio (0.0 - 1.0)."; // untranslated
Blockly.Msg.COLOUR_PICKER_HELPURL = "https://oc.wikipedia.org/wiki/Color";
Blockly.Msg.COLOUR_PICKER_TOOLTIP = "Choose a colour from the palette."; // untranslated
Blockly.Msg.COLOUR_RANDOM_HELPURL = "http://randomcolour.com"; // untranslated
Blockly.Msg.COLOUR_RANDOM_TITLE = "color aleatòria";
Blockly.Msg.COLOUR_RANDOM_TOOLTIP = "Causir una color a l'azard.";
Blockly.Msg.COLOUR_RGB_BLUE = "blau";
Blockly.Msg.COLOUR_RGB_GREEN = "verd";
Blockly.Msg.COLOUR_RGB_HELPURL = "http://www.december.com/html/spec/colorper.html"; // untranslated
Blockly.Msg.COLOUR_RGB_RED = "roge";
Blockly.Msg.COLOUR_RGB_TITLE = "colorar amb";
Blockly.Msg.COLOUR_RGB_TOOLTIP = "Create a colour with the specified amount of red, green, and blue. All values must be between 0 and 100."; // untranslated
Blockly.Msg.CONTROLS_FLOW_STATEMENTS_HELPURL = "https://github.com/google/blockly/wiki/Loops#loop-termination-blocks"; // untranslated
Blockly.Msg.CONTROLS_FLOW_STATEMENTS_OPERATOR_BREAK = "break out of loop"; // untranslated
Blockly.Msg.CONTROLS_FLOW_STATEMENTS_OPERATOR_CONTINUE = "continue with next iteration of loop"; // untranslated
Blockly.Msg.CONTROLS_FLOW_STATEMENTS_TOOLTIP_BREAK = "Break out of the containing loop."; // untranslated
Blockly.Msg.CONTROLS_FLOW_STATEMENTS_TOOLTIP_CONTINUE = "Skip the rest of this loop, and continue with the next iteration."; // untranslated
Blockly.Msg.CONTROLS_FLOW_STATEMENTS_WARNING = "Warning: This block may only be used within a loop."; // untranslated
Blockly.Msg.CONTROLS_FOREACH_HELPURL = "https://github.com/google/blockly/wiki/Loops#for-each"; // untranslated
Blockly.Msg.CONTROLS_FOREACH_TITLE = "per cada element %1 dins la lista %2";
Blockly.Msg.CONTROLS_FOREACH_TOOLTIP = "For each item in a list, set the variable '%1' to the item, and then do some statements."; // untranslated
Blockly.Msg.CONTROLS_FOR_HELPURL = "https://github.com/google/blockly/wiki/Loops#count-with"; // untranslated
Blockly.Msg.CONTROLS_FOR_TITLE = "comptar amb %1 de %2 a %3 per %4";
Blockly.Msg.CONTROLS_FOR_TOOLTIP = "Have the variable '%1' take on the values from the start number to the end number, counting by the specified interval, and do the specified blocks."; // untranslated
Blockly.Msg.CONTROLS_IF_ELSEIF_TOOLTIP = "Add a condition to the if block."; // untranslated
Blockly.Msg.CONTROLS_IF_ELSE_TOOLTIP = "Add a final, catch-all condition to the if block."; // untranslated
Blockly.Msg.CONTROLS_IF_HELPURL = "https://github.com/google/blockly/wiki/IfElse"; // untranslated
Blockly.Msg.CONTROLS_IF_IF_TOOLTIP = "Add, remove, or reorder sections to reconfigure this if block."; // untranslated
Blockly.Msg.CONTROLS_IF_MSG_ELSE = "siquenon";
Blockly.Msg.CONTROLS_IF_MSG_ELSEIF = "siquenon se";
Blockly.Msg.CONTROLS_IF_MSG_IF = "se";
Blockly.Msg.CONTROLS_IF_TOOLTIP_1 = "If a value is true, then do some statements."; // untranslated
Blockly.Msg.CONTROLS_IF_TOOLTIP_2 = "If a value is true, then do the first block of statements. Otherwise, do the second block of statements."; // untranslated
Blockly.Msg.CONTROLS_IF_TOOLTIP_3 = "If the first value is true, then do the first block of statements. Otherwise, if the second value is true, do the second block of statements."; // untranslated
Blockly.Msg.CONTROLS_IF_TOOLTIP_4 = "If the first value is true, then do the first block of statements. Otherwise, if the second value is true, do the second block of statements. If none of the values are true, do the last block of statements."; // untranslated
Blockly.Msg.CONTROLS_REPEAT_HELPURL = "https://oc.wikipedia.org/wiki/For_loop";
Blockly.Msg.CONTROLS_REPEAT_INPUT_DO = "far";
Blockly.Msg.CONTROLS_REPEAT_TITLE = "repetir %1 còps";
Blockly.Msg.CONTROLS_REPEAT_TOOLTIP = "Do some statements several times."; // untranslated
Blockly.Msg.CONTROLS_WHILEUNTIL_HELPURL = "https://github.com/google/blockly/wiki/Loops#repeat"; // untranslated
Blockly.Msg.CONTROLS_WHILEUNTIL_OPERATOR_UNTIL = "repetir fins a";
Blockly.Msg.CONTROLS_WHILEUNTIL_OPERATOR_WHILE = "repetir tant que";
Blockly.Msg.CONTROLS_WHILEUNTIL_TOOLTIP_UNTIL = "While a value is false, then do some statements."; // untranslated
Blockly.Msg.CONTROLS_WHILEUNTIL_TOOLTIP_WHILE = "While a value is true, then do some statements."; // untranslated
Blockly.Msg.DELETE_ALL_BLOCKS = "Suprimir totes los %1 blòts ?";
Blockly.Msg.DELETE_BLOCK = "Suprimir lo blòt";
Blockly.Msg.DELETE_VARIABLE = "Delete the '%1' variable"; // untranslated
Blockly.Msg.DELETE_VARIABLE_CONFIRMATION = "Delete %1 uses of the '%2' variable?"; // untranslated
Blockly.Msg.DELETE_X_BLOCKS = "Suprimir %1 blòts";
Blockly.Msg.DICTS_CREATE_EMPTY_TITLE = "empty dictionary"; // untranslated
Blockly.Msg.DICTS_CREATE_WITH_CONTAINER_TITLE_ADD = "Create Dictionary"; // untranslated
Blockly.Msg.DICTS_CREATE_WITH_CONTAINER_TOOLTIP = ""; // untranslated
Blockly.Msg.DICTS_CREATE_WITH_INPUT_WITH = "create dict with"; // untranslated
Blockly.Msg.DICTS_CREATE_WITH_ITEM_KEY = "key"; // untranslated
Blockly.Msg.DICTS_CREATE_WITH_ITEM_MAPPING = ":"; // untranslated
Blockly.Msg.DICTS_CREATE_WITH_ITEM_TITLE = "key/value"; // untranslated
Blockly.Msg.DICTS_CREATE_WITH_ITEM_TOOLTIP = ""; // untranslated
Blockly.Msg.DICTS_CREATE_WITH_TOOLTIP = ""; // untranslated
Blockly.Msg.DICT_GET = "get key"; // untranslated
Blockly.Msg.DICT_GET_TO = "of"; // untranslated
Blockly.Msg.DICT_KEYS = "get all keys from"; // untranslated
Blockly.Msg.DISABLE_BLOCK = "Desactivar lo blòt";
Blockly.Msg.DUPLICATE_BLOCK = "Duplicar";
Blockly.Msg.ENABLE_BLOCK = "Activar lo blòt";
Blockly.Msg.EXPAND_ALL = "Desvolopar los blòts";
Blockly.Msg.EXPAND_BLOCK = "Desvolopar lo blòt";
Blockly.Msg.EXTERNAL_INPUTS = "Entradas extèrnas";
Blockly.Msg.HELP = "Ajuda";
Blockly.Msg.INLINE_INPUTS = "Entradas en linha";
Blockly.Msg.LISTS_APPEND = "append item"; // untranslated
Blockly.Msg.LISTS_APPEND_HELPURL = "http://google.com"; // untranslated
Blockly.Msg.LISTS_APPEND_TO = "to list"; // untranslated
Blockly.Msg.LISTS_APPEND_TOOLTIP = "Append an element to a list"; // untranslated
Blockly.Msg.LISTS_CREATE_EMPTY_HELPURL = "https://github.com/google/blockly/wiki/Lists#create-empty-list"; // untranslated
Blockly.Msg.LISTS_CREATE_EMPTY_TITLE = "create empty list"; // untranslated
Blockly.Msg.LISTS_CREATE_EMPTY_TOOLTIP = "Returns a list, of length 0, containing no data records"; // untranslated
Blockly.Msg.LISTS_CREATE_WITH_CONTAINER_TITLE_ADD = "lista";
Blockly.Msg.LISTS_CREATE_WITH_CONTAINER_TOOLTIP = "Add, remove, or reorder sections to reconfigure this list block."; // untranslated
Blockly.Msg.LISTS_CREATE_WITH_HELPURL = "https://github.com/google/blockly/wiki/Lists#create-list-with"; // untranslated
Blockly.Msg.LISTS_CREATE_WITH_INPUT_WITH = "crear una lista amb";
Blockly.Msg.LISTS_CREATE_WITH_ITEM_TOOLTIP = "Add an item to the list."; // untranslated
Blockly.Msg.LISTS_CREATE_WITH_TOOLTIP = "Create a list with any number of items."; // untranslated
Blockly.Msg.LISTS_GET_INDEX_FIRST = "primièr";
Blockly.Msg.LISTS_GET_INDEX_FROM_END = "# dempuèi la fin";
Blockly.Msg.LISTS_GET_INDEX_FROM_START = "#"; // untranslated
Blockly.Msg.LISTS_GET_INDEX_GET = "obténer";
Blockly.Msg.LISTS_GET_INDEX_GET_REMOVE = "obténer e suprimir";
Blockly.Msg.LISTS_GET_INDEX_LAST = "darrièr";
Blockly.Msg.LISTS_GET_INDEX_RANDOM = "aleatòri";
Blockly.Msg.LISTS_GET_INDEX_REMOVE = "suprimit";
Blockly.Msg.LISTS_GET_INDEX_TAIL = ""; // untranslated
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_FIRST = "Returns the first item in a list."; // untranslated
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_FROM = "Returns the item at the specified position in a list."; // untranslated
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_LAST = "Returns the last item in a list."; // untranslated
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_RANDOM = "Returns a random item in a list."; // untranslated
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_REMOVE_FIRST = "Removes and returns the first item in a list."; // untranslated
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_REMOVE_FROM = "Removes and returns the item at the specified position in a list."; // untranslated
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_REMOVE_LAST = "Removes and returns the last item in a list."; // untranslated
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_GET_REMOVE_RANDOM = "Removes and returns a random item in a list."; // untranslated
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_REMOVE_FIRST = "Removes the first item in a list."; // untranslated
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_REMOVE_FROM = "Removes the item at the specified position in a list."; // untranslated
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_REMOVE_LAST = "Removes the last item in a list."; // untranslated
Blockly.Msg.LISTS_GET_INDEX_TOOLTIP_REMOVE_RANDOM = "Removes a random item in a list."; // untranslated
Blockly.Msg.LISTS_GET_SUBLIST_END_FROM_END = "fins a # dempuèi la fin";
Blockly.Msg.LISTS_GET_SUBLIST_END_FROM_START = "fins a #";
Blockly.Msg.LISTS_GET_SUBLIST_END_LAST = "fins a la fin";
Blockly.Msg.LISTS_GET_SUBLIST_HELPURL = "https://github.com/google/blockly/wiki/Lists#getting-a-sublist"; // untranslated
Blockly.Msg.LISTS_GET_SUBLIST_START_FIRST = "get sub-list from first"; // untranslated
Blockly.Msg.LISTS_GET_SUBLIST_START_FROM_END = "get sub-list from # from end"; // untranslated
Blockly.Msg.LISTS_GET_SUBLIST_START_FROM_START = "get sub-list from #"; // untranslated
Blockly.Msg.LISTS_GET_SUBLIST_TAIL = ""; // untranslated
Blockly.Msg.LISTS_GET_SUBLIST_TOOLTIP = "Creates a copy of the specified portion of a list."; // untranslated
Blockly.Msg.LISTS_INDEX_FROM_END_TOOLTIP = "%1 is the last item."; // untranslated
Blockly.Msg.LISTS_INDEX_FROM_START_TOOLTIP = "%1 is the first item."; // untranslated
Blockly.Msg.LISTS_INDEX_OF_FIRST = "find first occurrence of item"; // untranslated
Blockly.Msg.LISTS_INDEX_OF_HELPURL = "https://github.com/google/blockly/wiki/Lists#getting-items-from-a-list"; // untranslated
Blockly.Msg.LISTS_INDEX_OF_LAST = "find last occurrence of item"; // untranslated
Blockly.Msg.LISTS_INDEX_OF_TOOLTIP = "Returns the index of the first/last occurrence of the item in the list. Returns %1 if item is not found."; // untranslated
Blockly.Msg.LISTS_INLIST = "dins la lista";
Blockly.Msg.LISTS_ISEMPTY_HELPURL = "https://github.com/google/blockly/wiki/Lists#is-empty"; // untranslated
|
Blockly.Msg.LISTS_LENGTH_HELPURL = "https://github.com/google/blockly/wiki/Lists#length-of"; // untranslated
Blockly.Msg.LISTS_LENGTH_TITLE = "length of %1"; // untranslated
Blockly.Msg.LISTS_LENGTH_TOOLTIP = "Returns the length of a list."; // untranslated
Blockly.Msg.LISTS_REPEAT_HELPURL = "https://github.com/google/blockly/wiki/Lists#create-list-with"; // untranslated
Blockly.Msg.LISTS_REPEAT_TITLE = "create list with item %1 repeated %2 times"; // untranslated
Blockly.Msg.LISTS_REPEAT_TOOLTIP = "Creates a list consisting of the given value repeated the specified number of times."; // untranslated
Blockly.Msg.LISTS_SET_INDEX_HELPURL = "https://github.com/google/blockly/wiki/Lists#in-list--set"; // untranslated
Blockly.Msg.LISTS_SET_INDEX_INPUT_TO = "coma";
Blockly.Msg.LISTS_SET_INDEX_INSERT = "inserir en";
Blockly.Msg.LISTS_SET_INDEX_SET = "metre";
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_INSERT_FIRST = "Inserts the item at the start of a list."; // untranslated
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_INSERT_FROM = "Inserts the item at the specified position in a list."; // untranslated
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_INSERT_LAST = "Append the item to the end of a list."; // untranslated
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_INSERT_RANDOM = "Inserts the item randomly in a list."; // untranslated
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_SET_FIRST = "Sets the first item in a list."; // untranslated
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_SET_FROM = "Sets the item at the specified position in a list."; // untranslated
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_SET_LAST = "Sets the last item in a list."; // untranslated
Blockly.Msg.LISTS_SET_INDEX_TOOLTIP_SET_RANDOM = "Sets a random item in a list."; // untranslated
Blockly.Msg.LISTS_SORT_HELPURL = "https://github.com/google/blockly/wiki/Lists#sorting-a-list"; // untranslated
Blockly.Msg.LISTS_SORT_ORDER_ASCENDING = "creissent";
Blockly.Msg.LISTS_SORT_ORDER_DESCENDING = "descreissent";
Blockly.Msg.LISTS_SORT_TITLE = "sort %1 %2 %3"; // untranslated
Blockly.Msg.LISTS_SORT_TOOLTIP = "Sort a copy of a list."; // untranslated
Blockly.Msg.LISTS_SORT_TYPE_IGNORECASE = "alphabetic, ignore case"; // untranslated
Blockly.Msg.LISTS_SORT_TYPE_NUMERIC = "numeric";
Blockly.Msg.LISTS_SORT_TYPE_TEXT = "alfabetic";
Blockly.Msg.LISTS_SPLIT_HELPURL = "https://github.com/google/blockly/wiki/Lists#splitting-strings-and-joining-lists"; // untranslated
Blockly.Msg.LISTS_SPLIT_LIST_FROM_TEXT = "make list from text"; // untranslated
Blockly.Msg.LISTS_SPLIT_TEXT_FROM_LIST = "make text from list"; // untranslated
Blockly.Msg.LISTS_SPLIT_TOOLTIP_JOIN = "Join a list of texts into one text, separated by a delimiter."; // untranslated
Blockly.Msg.LISTS_SPLIT_TOOLTIP_SPLIT = "Split text into a list of texts, breaking at each delimiter."; // untranslated
Blockly.Msg.LISTS_SPLIT_WITH_DELIMITER = "with delimiter"; // untranslated
Blockly.Msg.LOGIC_BOOLEAN_FALSE = "fals";
Blockly.Msg.LOGIC_BOOLEAN_HELPURL = "https://github.com/google/blockly/wiki/Logic#values"; // untranslated
Blockly.Msg.LOGIC_BOOLEAN_TOOLTIP = "Returns either true or false."; // untranslated
Blockly.Msg.LOGIC_BOOLEAN_TRUE = "verai";
Blockly.Msg.LOGIC_COMPARE_HELPURL = "https://en.wikipedia.org/wiki/Inequality_(mathematics)"; // untranslated
Blockly.Msg.LOGIC_COMPARE_TOOLTIP_EQ = "Return true if both inputs equal each other."; // untranslated
Blockly.Msg.LOGIC_COMPARE_TOOLTIP_GT = "Return true if the first input is greater than the second input."; // untranslated
Blockly.Msg.LOGIC_COMPARE_TOOLTIP_GTE = "Return true if the first input is greater than or equal to the second input."; // untranslated
Blockly.Msg.LOGIC_COMPARE_TOOLTIP_LT = "Return true if the first input is smaller than the second input."; // untranslated
Blockly.Msg.LOGIC_COMPARE_TOOLTIP_LTE = "Return true if the first input is smaller than or equal to the second input."; // untranslated
Blockly.Msg.LOGIC_COMPARE_TOOLTIP_NEQ = "Return true if both inputs are not equal to each other."; // untranslated
Blockly.Msg.LOGIC_NEGATE_HELPURL = "https://github.com/google/blockly/wiki/Logic#not"; // untranslated
Blockly.Msg.LOGIC_NEGATE_TITLE = "pas %1";
Blockly.Msg.LOGIC_NEGATE_TOOLTIP = "Returns true if the input is false. Returns false if the input is true."; // untranslated
Blockly.Msg.LOGIC_NULL = "nul";
Blockly.Msg.LOGIC_NULL_HELPURL = "https://en.wikipedia.org/wiki/Nullable_type"; // untranslated
Blockly.Msg.LOGIC_NULL_TOOLTIP = "Renvia nul.";
Blockly.Msg.LOGIC_OPERATION_AND = "e";
Blockly.Msg.LOGIC_OPERATION_HELPURL = "https://github.com/google/blockly/wiki/Logic#logical-operations"; // untranslated
Blockly.Msg.LOGIC_OPERATION_OR = "o";
Blockly.Msg.LOGIC_OPERATION_TOOLTIP_AND = "Return true if both inputs are true."; // untranslated
Blockly.Msg.LOGIC_OPERATION_TOOLTIP_OR = "Return true if at least one of the inputs is true."; // untranslated
Blockly.Msg.LOGIC_TERNARY_CONDITION = "tèst";
Blockly.Msg.LOGIC_TERNARY_HELPURL = "https://en.wikipedia.org/wiki/%3F:"; // untranslated
Blockly.Msg.LOGIC_TERNARY_IF_FALSE = "se fals";
Blockly.Msg.LOGIC_TERNARY_IF_TRUE = "se verai";
Blockly.Msg.LOGIC_TERNARY_TOOLTIP = "Check the condition in 'test'. If the condition is true, returns the 'if true' value; otherwise returns the 'if false' value."; // untranslated
Blockly.Msg.MATH_ADDITION_SYMBOL = "+"; // untranslated
Blockly.Msg.MATH_ARITHMETIC_HELPURL = "https://oc.wikipedia.org/wiki/Aritmetica";
Blockly.Msg.MATH_ARITHMETIC_TOOLTIP_ADD = "Return the sum of the two numbers."; // untranslated
Blockly.Msg.MATH_ARITHMETIC_TOOLTIP_DIVIDE = "Return the quotient of the two numbers."; // untranslated
Blockly.Msg.MATH_ARITHMETIC_TOOLTIP_MINUS = "Return the difference of the two numbers."; // untranslated
Blockly.Msg.MATH_ARITHMETIC_TOOLTIP_MULTIPLY = "Return the product of the two numbers."; // untranslated
Blockly.Msg.MATH_ARITHMETIC_TOOLTIP_POWER = "Return the first number raised to the power of the second number."; // untranslated
Blockly.Msg.MATH_CHANGE_HELPURL = "https://en.wikipedia.org/wiki/Programming_idiom#Incrementing_a_counter"; // untranslated
Blockly.Msg.MATH_CHANGE_TITLE = "incrementar %1 per %2";
Blockly.Msg.MATH_CHANGE_TOOLTIP = "Add a number to variable '%1'."; // untranslated
Blockly.Msg.MATH_CONSTANT_HELPURL = "https://en.wikipedia.org/wiki/Mathematical_constant"; // untranslated
Blockly.Msg.MATH_CONSTANT_TOOLTIP = "Return one of the common constants: π (3.141…), e (2.718…), φ (1.618…), sqrt(2) (1.414…), sqrt(½) (0.707…), or ∞ (infinity)."; // untranslated
Blockly.Msg.MATH_CONSTRAIN_HELPURL = "https://en.wikipedia.org/wiki/Clamping_%28graphics%29"; // untranslated
Blockly.Msg.MATH_CONSTRAIN_TITLE = "constrain %1 low %2 high %3"; // untranslated
Blockly.Msg.MATH_CONSTRAIN_TOOLTIP = "Constrain a number to be between the specified limits (inclusive)."; // untranslated
Blockly.Msg.MATH_DIVISION_SYMBOL = "/"; // untranslated
Blockly.Msg.MATH_IS_DIVISIBLE_BY = "es devesible per";
Blockly.Msg.MATH_IS_EVEN = "es par";
Blockly.Msg.MATH_IS_NEGATIVE = "es negatiu";
Blockly.Msg.MATH_IS_ODD = "es impar";
Blockly.Msg.MATH_IS_POSITIVE = "es positiu";
Blockly.Msg.MATH_IS_PRIME = "es primièr";
Blockly.Msg.MATH_IS_TOOLTIP = "Check if a number is an even, odd, prime, whole, positive, negative, or if it is divisible by certain number. Returns true or false."; // untranslated
Blockly.Msg.MATH_IS_WHOLE = "es entièr";
Blockly.Msg.MATH_MODULO_HELPURL = "https://en.wikipedia.org/wiki/Modulo_operation"; // untranslated
Blockly.Msg.MATH_MODULO_TITLE = "remainder of %1 ÷ %2"; // untranslated
Blockly.Msg.MATH_MODULO_TOOLTIP = "Return the remainder from dividing the two numbers."; // untranslated
Blockly.Msg.MATH_MULTIPLICATION_SYMBOL = "×"; // untranslated
Blockly.Msg.MATH_NUMBER_HELPURL = "https://oc.wikipedia.org/wiki/Nombre";
Blockly.Msg.MATH_NUMBER_TOOLTIP = "Un nombre.";
Blockly.Msg.MATH_ONLIST_HELPURL = ""; // untranslated
Blockly.Msg.MATH_ONLIST_OPERATOR_AVERAGE = "mejana de la lista";
Blockly.Msg.MATH_ONLIST_OPERATOR_MAX = "maximum de la lista";
Blockly.Msg.MATH_ONLIST_OPERATOR_MEDIAN = "mediana de la lista";
Blockly.Msg.MATH_ONLIST_OPERATOR_MIN = "minimum de la lista";
Blockly.Msg.MATH_ONLIST_OPERATOR_MODE = "modes of list"; // untranslated
Blockly.Msg.MATH_ONLIST_OPERATOR_RANDOM = "random item of list"; // untranslated
Blockly.Msg.MATH_ONLIST_OPERATOR_STD_DEV = "standard deviation of list"; // untranslated
Blockly.Msg.MATH_ONLIST_OPERATOR_SUM = "soma de la lista";
Blockly.Msg.MATH_ONLIST_TOOLTIP_AVERAGE = "Return the average (arithmetic mean) of the numeric values in the list."; // untranslated
Blockly.Msg.MATH_ONLIST_TOOLTIP_MAX = "Return the largest number in the list."; // untranslated
Blockly.Msg.MATH_ONLIST_TOOLTIP_MEDIAN = "Return the median number in the list."; // untranslated
Blockly.Msg.MATH_ONLIST_TOOLTIP_MIN = "Return the smallest number in the list."; // untranslated
Blockly.Msg.MATH_ONLIST_TOOLTIP_MODE = "Return a list of the most common item(s) in the list."; // untranslated
Blockly.Msg.MATH_ONLIST_TOOLTIP_RANDOM = "Return a random element from the list."; // untranslated
Blockly.Msg.MATH_ONLIST_TOOLTIP_STD_DEV = "Return the standard deviation of the list."; // untranslated
Blockly.Msg.MATH_ONLIST_TOOLTIP_SUM = "Return the sum of all the numbers in the list."; // untranslated
Blockly.Msg.MATH_POWER_SYMBOL = "^"; // untranslated
Blockly.Msg.MATH_RANDOM_FLOAT_HELPURL = "https://en.wikipedia.org/wiki/Random_number_generation"; // untranslated
Blockly.Msg.MATH_RANDOM_FLOAT_TITLE_RANDOM = "random fraction"; // untranslated
Blockly.Msg.MATH_RANDOM_FLOAT_TOOLTIP = "Return a random fraction between 0.0 (inclusive) and 1.0 (exclusive)."; // untranslated
Blockly.Msg.MATH_RANDOM_INT_HELPURL = "https://en.wikipedia.org/wiki/Random_number_generation"; // untranslated
Blockly.Msg.MATH_RANDOM_INT_TITLE = "random integer from %1 to %2"; // untranslated
Blockly.Msg.MATH_RANDOM_INT_TOOLTIP = "Return a random integer between the two specified limits, inclusive."; // untranslated
Blockly.Msg.MATH_ROUND_HELPURL = "https://en.wikipedia.org/wiki/Rounding"; // untranslated
Blockly.Msg.MATH_ROUND_OPERATOR_ROUND = "arredondir";
Blockly.Msg.MATH_ROUND_OPERATOR_ROUNDDOWN = "arredondir a l’inferior";
Blockly.Msg.MATH_ROUND_OPERATOR_ROUNDUP = "arredondir al superior";
Blockly.Msg.MATH_ROUND_TOOLTIP = "Round a number up or down."; // untranslated
Blockly.Msg.MATH_SINGLE_HELPURL = "https://en.wikipedia.org/wiki/Square_root"; // untranslated
Blockly.Msg.MATH_SINGLE_OP_ABSOLUTE = "absolut";
Blockly.Msg.MATH_SINGLE_OP_ROOT = "raiç carrada";
Blockly.Msg.MATH_SINGLE_TOOLTIP_ABS = "Return the absolute value of a number."; // untranslated
Blockly.Msg.MATH_SINGLE_TOOLTIP_EXP = "Return e to the power of a number."; // untranslated
Blockly.Msg.MATH_SINGLE_TOOLTIP_LN = "Return the natural logarithm of a number."; // untranslated
Blockly.Msg.MATH_SINGLE_TOOLTIP_LOG10 = "Return the base 10 logarithm of a number."; // untranslated
Blockly.Msg.MATH_SINGLE_TOOLTIP_NEG = "Return the negation of a number."; // untranslated
Blockly.Msg.MATH_SINGLE_TOOLTIP_POW10 = "Return 10 to the power of a number."; // untranslated
Blockly.Msg.MATH_SINGLE_TOOLTIP_ROOT = "Return the square root of a number."; // untranslated
Blockly.Msg.MATH_SUBTRACTION_SYMBOL = "-"; // untranslated
Blockly.Msg.MATH_TRIG_ACOS = "acos"; // untranslated
Blockly.Msg.MATH_TRIG_ASIN = "asin"; // untranslated
Blockly.Msg.MATH_TRIG_ATAN = "atan"; // untranslated
Blockly.Msg.MATH_TRIG_COS = "cos"; // untranslated
Blockly.Msg.MATH_TRIG_HELPURL = "https://en.wikipedia.org/wiki/Trigonometric_functions"; // untranslated
Blockly.Msg.MATH_TRIG_SIN = "sin"; // untranslated
Blockly.Msg.MATH_TRIG_TAN = "tan"; // untranslated
Blockly.Msg.MATH_TRIG_TOOLTIP_ACOS = "Return the arccosine of a number."; // untranslated
Blockly.Msg.MATH_TRIG_TOOLTIP_ASIN = "Return the arcsine of a number."; // untranslated
Blockly.Msg.MATH_TRIG_TOOLTIP_ATAN = "Return the arctangent of a number."; // untranslated
Blockly.Msg.MATH_TRIG_TOOLTIP_COS = "Return the cosine of a degree (not radian)."; // untranslated
Blockly.Msg.MATH_TRIG_TOOLTIP_SIN = "Return the sine of a degree (not radian)."; // untranslated
Blockly.Msg.MATH_TRIG_TOOLTIP_TAN = "Return the tangent of a degree (not radian)."; // untranslated
Blockly.Msg.NEW_VARIABLE = "Crear una variabla...";
Blockly.Msg.NEW_VARIABLE_TITLE = "Nom de la novèla variabla :";
Blockly.Msg.ORDINAL_NUMBER_SUFFIX = ""; // untranslated
Blockly.Msg.PROCEDURES_ALLOW_STATEMENTS = "allow statements"; // untranslated
Blockly.Msg.PROCEDURES_BEFORE_PARAMS = "amb :";
Blockly.Msg.PROCEDURES_CALLNORETURN_HELPURL = "https://en.wikipedia.org/wiki/Procedure_%28computer_science%29"; // untranslated
Blockly.Msg.PROCEDURES_CALLNORETURN_TOOLTIP = "Run the user-defined function '%1'."; // untranslated
Blockly.Msg.PROCEDURES_CALLRETURN_HELPURL = "https://en.wikipedia.org/wiki/Procedure_%28computer_science%29"; // untranslated
Blockly.Msg.PROCEDURES_CALLRETURN_TOOLTIP = "Run the user-defined function '%1' and use its output."; // untranslated
Blockly.Msg.PROCEDURES_CALL_BEFORE_PARAMS = "amb :";
Blockly.Msg.PROCEDURES_CREATE_DO = "Crear '%1'";
Blockly.Msg.PROCEDURES_DEFNORETURN_COMMENT = "Describe this function..."; // untranslated
Blockly.Msg.PROCEDURES_DEFNORETURN_DO = ""; // untranslated
Blockly.Msg.PROCEDURES_DEFNORETURN_HELPURL = "https://en.wikipedia.org/wiki/Procedure_%28computer_science%29"; // untranslated
Blockly.Msg.PROCEDURES_DEFNORETURN_PROCEDURE = "far quicòm";
Blockly.Msg.PROCEDURES_DEFNORETURN_TITLE = "a";
Blockly.Msg.PROCEDURES_DEFNORETURN_TOOLTIP = "Creates a function with no output."; // untranslated
Blockly.Msg.PROCEDURES_DEFRETURN_HELPURL = "https://en.wikipedia.org/wiki/Procedure_%28computer_science%29"; // untranslated
Blockly.Msg.PROCEDURES_DEFRETURN_RETURN = "retorn";
Blockly.Msg.PROCEDURES_DEFRETURN_TOOLTIP = "Creates a function with an output."; // untranslated
Blockly.Msg.PROCEDURES_DEF_DUPLICATE_WARNING = "Warning: This function has duplicate parameters."; // untranslated
Blockly.Msg.PROCEDURES_HIGHLIGHT_DEF = "Highlight function definition"; // untranslated
Blockly.Msg.PROCEDURES_IFRETURN_HELPURL = "http://c2.com/cgi/wiki?GuardClause"; // untranslated
Blockly.Msg.PROCEDURES_IFRETURN_TOOLTIP = "If a value is true, then return a second value."; // untranslated
Blockly.Msg.PROCEDURES_IFRETURN_WARNING = "Warning: This block may be used only within a function definition."; // untranslated
Blockly.Msg.PROCEDURES_MUTATORARG_TITLE = "nom de l’entrada :";
Blockly.Msg.PROCEDURES_MUTATORARG_TOOLTIP = "Add a parameter to the function."; // untranslated
Blockly.Msg.PROCEDURES_MUTATORCONTAINER_TITLE = "entradas";
Blockly.Msg.PROCEDURES_MUTATORCONTAINER_TOOLTIP = "Add, remove, or reorder inputs to this function."; // untranslated
Blockly.Msg.REDO = "Refar";
Blockly.Msg.REMOVE_COMMENT = "Suprimir un comentari";
Blockly.Msg.RENAME_VARIABLE = "Renomenar la variabla…";
Blockly.Msg.RENAME_VARIABLE_TITLE = "Renomenar totas las variablas '%1' a :";
Blockly.Msg.TEXT_APPEND_APPENDTEXT = "apondre lo tèxte";
Blockly.Msg.TEXT_APPEND_HELPURL = "https://github.com/google/blockly/wiki/Text#text-modification"; // untranslated
Blockly.Msg.TEXT_APPEND_TO = "a";
Blockly.Msg.TEXT_APPEND_TOOLTIP = "Append some text to variable '%1'."; // untranslated
Blockly.Msg.TEXT_CHANGECASE_HELPURL = "https://github.com/google/blockly/wiki/Text#adjusting-text-case"; // untranslated
Blockly.Msg.TEXT_CHANGECASE_OPERATOR_LOWERCASE = "en minusculas";
Blockly.Msg.TEXT_CHANGECASE_OPERATOR_TITLECASE = "to Title Case"; // untranslated
Blockly.Msg.TEXT_CHANGECASE_OPERATOR_UPPERCASE = "en MAJUSCULAS";
Blockly.Msg.TEXT_CHANGECASE_TOOLTIP = "Return a copy of the text in a different case."; // untranslated
Blockly.Msg.TEXT_CHARAT_FIRST = "obténer la primièra letra";
Blockly.Msg.TEXT_CHARAT_FROM_END = "obténer la letra # dempuèi la fin";
Blockly.Msg.TEXT_CHARAT_FROM_START = "obténer la letra #";
Blockly.Msg.TEXT_CHARAT_HELPURL = "https://github.com/google/blockly/wiki/Text#extracting-text"; // untranslated
Blockly.Msg.TEXT_CHARAT_INPUT_INTEXT = "dins lo tèxte";
Blockly.Msg.TEXT_CHARAT_LAST = "obténer la darrièra letra";
Blockly.Msg.TEXT_CHARAT_RANDOM = "obténer una letra a l'azard";
Blockly.Msg.TEXT_CHARAT_TAIL = ""; // untranslated
Blockly.Msg.TEXT_CHARAT_TOOLTIP = "Renvia la letra a la posicion indicada.";
Blockly.Msg.TEXT_CREATE_JOIN_ITEM_TOOLTIP = "Add an item to the text."; // untranslated
Blockly.Msg.TEXT_CREATE_JOIN_TITLE_JOIN = "jónher";
Blockly.Msg.TEXT_CREATE_JOIN_TOOLTIP = "Add, remove, or reorder sections to reconfigure this text block."; // untranslated
Blockly.Msg.TEXT_GET_SUBSTRING_END_FROM_END = "to letter # from end"; // untranslated
Blockly.Msg.TEXT_GET_SUBSTRING_END_FROM_START = "fins a la letra #";
Blockly.Msg.TEXT_GET_SUBSTRING_END_LAST = "to last letter"; // untranslated
Blockly.Msg.TEXT_GET_SUBSTRING_HELPURL = "https://github.com/google/blockly/wiki/Text#extracting-a-region-of-text"; // untranslated
Blockly.Msg.TEXT_GET_SUBSTRING_INPUT_IN_TEXT = "dins lo tèxte";
Blockly.Msg.TEXT_GET_SUBSTRING_START_FIRST = "get substring from first letter"; // untranslated
Blockly.Msg.TEXT_GET_SUBSTRING_START_FROM_END = "get substring from letter # from end"; // untranslated
Blockly.Msg.TEXT_GET_SUBSTRING_START_FROM_START = "get substring from letter #"; // untranslated
Blockly.Msg.TEXT_GET_SUBSTRING_TAIL = ""; // untranslated
Blockly.Msg.TEXT_GET_SUBSTRING_TOOLTIP = "Returns a specified portion of the text."; // untranslated
Blockly.Msg.TEXT_INDEXOF_HELPURL = "https://github.com/google/blockly/wiki/Text#finding-text"; // untranslated
Blockly.Msg.TEXT_INDEXOF_INPUT_INTEXT = "dins lo tèxte";
Blockly.Msg.TEXT_INDEXOF_OPERATOR_FIRST = "find first occurrence of text"; // untranslated
Blockly.Msg.TEXT_INDEXOF_OPERATOR_LAST = "find last occurrence of text"; // untranslated
Blockly.Msg.TEXT_INDEXOF_TAIL = ""; // untranslated
Blockly.Msg.TEXT_INDEXOF_TOOLTIP = "Returns the index of the first/last occurrence of the first text in the second text. Returns %1 if text is not found."; // untranslated
Blockly.Msg.TEXT_ISEMPTY_HELPURL = "https://github.com/google/blockly/wiki/Text#checking-for-empty-text"; // untranslated
Blockly.Msg.TEXT_ISEMPTY_TITLE = "%1 es void";
Blockly.Msg.TEXT_ISEMPTY_TOOLTIP = "Returns true if the provided text is empty."; // untranslated
Blockly.Msg.TEXT_JOIN_HELPURL = "https://github.com/google/blockly/wiki/Text#text-creation"; // untranslated
Blockly.Msg.TEXT_JOIN_TITLE_CREATEWITH = "join strings"; // untranslated
Blockly.Msg.TEXT_JOIN_TOOLTIP = "Create a piece of text by joining together any number of items."; // untranslated
Blockly.Msg.TEXT_LENGTH_HELPURL = "https://github.com/google/blockly/wiki/Text#text-modification"; // untranslated
Blockly.Msg.TEXT_LENGTH_TITLE = "longor de %1";
Blockly.Msg.TEXT_LENGTH_TOOLTIP = "Returns the number of letters (including spaces) in the provided text."; // untranslated
Blockly.Msg.TEXT_PRINT_HELPURL = "https://github.com/google/blockly/wiki/Text#printing-text"; // untranslated
Blockly.Msg.TEXT_PRINT_TITLE = "afichar %1";
Blockly.Msg.TEXT_PRINT_TITLE_PRINT = "print"; // untranslated
Blockly.Msg.TEXT_PRINT_TOOLTIP = ""; // untranslated
Blockly.Msg.TEXT_PROMPT_HELPURL = "https://github.com/google/blockly/wiki/Text#getting-input-from-the-user"; // untranslated
Blockly.Msg.TEXT_PROMPT_TOOLTIP_NUMBER = "Prompt for user for a number."; // untranslated
Blockly.Msg.TEXT_PROMPT_TOOLTIP_TEXT = "Prompt for user for some text."; // untranslated
Blockly.Msg.TEXT_PROMPT_TYPE_NUMBER = "prompt for number with message"; // untranslated
Blockly.Msg.TEXT_PROMPT_TYPE_TEXT = "prompt for text with message"; // untranslated
Blockly.Msg.TEXT_TEXT_HELPURL = "https://en.wikipedia.org/wiki/String_(computer_science)"; // untranslated
Blockly.Msg.TEXT_TEXT_TOOLTIP = "A letter, word, or line of text."; // untranslated
Blockly.Msg.TEXT_TRIM_HELPURL = "https://github.com/google/blockly/wiki/Text#trimming-removing-spaces"; // untranslated
Blockly.Msg.TEXT_TRIM_OPERATOR_BOTH = "strip both sides"; // untranslated
Blockly.Msg.TEXT_TRIM_OPERATOR_LEFT = "strip left side"; // untranslated
Blockly.Msg.TEXT_TRIM_OPERATOR_RIGHT = "strip right side"; // untranslated
Blockly.Msg.TEXT_TRIM_TOOLTIP = "Return a copy of the text with spaces removed from one or both ends."; // untranslated
Blockly.Msg.TODAY = "Uèi";
Blockly.Msg.TYPE_CHECK = "type of"; // untranslated
Blockly.Msg.UNDO = "Anullar";
Blockly.Msg.VARIABLES_DEFAULT_NAME = "element";
Blockly.Msg.VARIABLES_GET_CREATE_SET = "Crear 'fixar %1'";
Blockly.Msg.VARIABLES_GET_HELPURL = "https://github.com/google/blockly/wiki/Variables#get"; // untranslated
Blockly.Msg.VARIABLES_GET_TOOLTIP = "Returns the value of this variable."; // untranslated
Blockly.Msg.VARIABLES_SET = "fixar %1 a %2";
Blockly.Msg.VARIABLES_SET_CREATE_GET = "Create 'get %1'"; // untranslated
Blockly.Msg.VARIABLES_SET_HELPURL = "https://github.com/google/blockly/wiki/Variables#set"; // untranslated
Blockly.Msg.VARIABLES_SET_TAIL = "="; // untranslated
Blockly.Msg.VARIABLES_SET_TITLE = "set"; // untranslated
Blockly.Msg.VARIABLES_SET_TOOLTIP = "Sets this variable to be equal to the input."; // untranslated
Blockly.Msg.VARIABLE_ALREADY_EXISTS = "A variable named '%1' already exists."; // untranslated
Blockly.Msg.MATH_CHANGE_TITLE_ITEM = Blockly.Msg.VARIABLES_DEFAULT_NAME;
Blockly.Msg.PROCEDURES_DEFRETURN_TITLE = Blockly.Msg.PROCEDURES_DEFNORETURN_TITLE;
Blockly.Msg.CONTROLS_IF_IF_TITLE_IF = Blockly.Msg.CONTROLS_IF_MSG_IF;
Blockly.Msg.CONTROLS_WHILEUNTIL_INPUT_DO = Blockly.Msg.CONTROLS_REPEAT_INPUT_DO;
Blockly.Msg.CONTROLS_IF_MSG_THEN = Blockly.Msg.CONTROLS_REPEAT_INPUT_DO;
Blockly.Msg.CONTROLS_IF_ELSE_TITLE_ELSE = Blockly.Msg.CONTROLS_IF_MSG_ELSE;
Blockly.Msg.PROCEDURES_DEFRETURN_PROCEDURE = Blockly.Msg.PROCEDURES_DEFNORETURN_PROCEDURE;
Blockly.Msg.LISTS_GET_SUBLIST_INPUT_IN_LIST = Blockly.Msg.LISTS_INLIST;
Blockly.Msg.LISTS_GET_INDEX_INPUT_IN_LIST = Blockly.Msg.LISTS_INLIST;
Blockly.Msg.VARIABLES_SET_ITEM = Blockly.Msg.VARIABLES_DEFAULT_NAME;
Blockly.Msg.PROCEDURES_DEFRETURN_DO = Blockly.Msg.PROCEDURES_DEFNORETURN_DO;
Blockly.Msg.CONTROLS_IF_ELSEIF_TITLE_ELSEIF = Blockly.Msg.CONTROLS_IF_MSG_ELSEIF;
Blockly.Msg.LISTS_GET_INDEX_HELPURL = Blockly.Msg.LISTS_INDEX_OF_HELPURL;
Blockly.Msg.CONTROLS_FOREACH_INPUT_DO = Blockly.Msg.CONTROLS_REPEAT_INPUT_DO;
Blockly.Msg.LISTS_SET_INDEX_INPUT_IN_LIST = Blockly.Msg.LISTS_INLIST;
Blockly.Msg.CONTROLS_FOR_INPUT_DO = Blockly.Msg.CONTROLS_REPEAT_INPUT_DO;
Blockly.Msg.LISTS_CREATE_WITH_ITEM_TITLE = Blockly.Msg.VARIABLES_DEFAULT_NAME;
Blockly.Msg.TEXT_APPEND_VARIABLE = Blockly.Msg.VARIABLES_DEFAULT_NAME;
Blockly.Msg.TEXT_CREATE_JOIN_ITEM_TITLE_ITEM = Blockly.Msg.VARIABLES_DEFAULT_NAME;
Blockly.Msg.LISTS_INDEX_OF_INPUT_IN_LIST = Blockly.Msg.LISTS_INLIST;
Blockly.Msg.PROCEDURES_DEFRETURN_COMMENT = Blockly.Msg.PROCEDURES_DEFNORETURN_COMMENT;
|
Blockly.Msg.LISTS_ISEMPTY_TITLE = "%1 is empty"; // untranslated
Blockly.Msg.LISTS_ISEMPTY_TOOLTIP = "Returns true if the list is empty."; // untranslated
|
resolve_succeeds_with_broken_minfs.rs
|
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/// This module tests pkg-resolver's resolve keeps working when
/// MinFs is broken.
use {
fidl::endpoints::{Proxy, RequestStream, ServerEnd},
fidl_fuchsia_io::{
DirectoryControlHandle, DirectoryProxy, DirectoryRequest, DirectoryRequestStream,
FileControlHandle, FileEvent, FileMarker, FileProxy, FileRequest, FileRequestStream,
FileWriteResponder, NodeMarker,
},
fidl_fuchsia_pkg_ext::RepositoryConfig,
fidl_fuchsia_pkg_rewrite_ext::Rule,
fuchsia_async as fasync,
fuchsia_pkg_testing::{serve::ServedRepository, Package, PackageBuilder, RepositoryBuilder},
fuchsia_zircon::Status,
futures::future::BoxFuture,
futures::prelude::*,
lib::{
get_repos, get_rules, mock_filesystem, DirOrProxy, EnableDynamicConfig, MountsBuilder,
TestEnv, TestEnvBuilder, EMPTY_REPO_PATH,
},
std::sync::{
atomic::{AtomicBool, AtomicU64},
Arc,
},
};
trait OpenRequestHandler: Sized {
fn handle_open_request(
&self,
flags: u32,
mode: u32,
path: String,
object: ServerEnd<NodeMarker>,
control_handle: DirectoryControlHandle,
parent: Arc<DirectoryStreamHandler<Self>>,
);
}
struct DirectoryStreamHandler<O: Sized> {
open_handler: Arc<O>,
}
impl<O> DirectoryStreamHandler<O>
where
O: OpenRequestHandler + Send + Sync + 'static,
{
fn new(open_handler: Arc<O>) -> Self {
Self { open_handler }
}
fn handle_stream(
self: Arc<Self>,
mut stream: DirectoryRequestStream,
) -> BoxFuture<'static, ()> {
async move {
while let Some(req) = stream.next().await {
match req.unwrap() {
DirectoryRequest::Clone { flags, object, control_handle: _ } => {
let stream = object.into_stream().unwrap().cast_stream();
mock_filesystem::describe_dir(flags, &stream);
fasync::Task::spawn(Arc::clone(&self).handle_stream(stream)).detach();
}
DirectoryRequest::Open { flags, mode, path, object, control_handle } => {
self.open_handler.handle_open_request(
flags,
mode,
path,
object,
control_handle,
Arc::clone(&self),
)
}
DirectoryRequest::Close { .. } => (),
req => panic!("DirectoryStreamHandler unhandled request {:?}", req),
}
}
}
.boxed()
}
}
struct OpenFailOrTempFs {
should_fail: AtomicBool,
fail_count: AtomicU64,
tempdir: tempfile::TempDir,
}
impl OpenFailOrTempFs {
fn new_failing() -> Arc<Self> {
Arc::new(Self {
should_fail: AtomicBool::new(true),
fail_count: AtomicU64::new(0),
tempdir: tempfile::tempdir().expect("/tmp to exist"),
})
}
fn get_open_fail_count(&self) -> u64 {
self.fail_count.load(std::sync::atomic::Ordering::SeqCst)
}
fn make_open_succeed(&self) {
self.should_fail.store(false, std::sync::atomic::Ordering::SeqCst);
}
fn should_fail(&self) -> bool {
self.should_fail.load(std::sync::atomic::Ordering::SeqCst)
}
}
impl OpenRequestHandler for OpenFailOrTempFs {
fn handle_open_request(
&self,
flags: u32,
mode: u32,
path: String,
object: ServerEnd<NodeMarker>,
_control_handle: DirectoryControlHandle,
parent: Arc<DirectoryStreamHandler<Self>>,
) {
if self.should_fail() {
if path == "."
|
else {
self.fail_count.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
}
} else {
let (tempdir_proxy, server_end) =
fidl::endpoints::create_proxy::<fidl_fuchsia_io::DirectoryMarker>().unwrap();
fdio::service_connect(self.tempdir.path().to_str().unwrap(), server_end.into_channel())
.unwrap();
tempdir_proxy.open(flags, mode, &path, object).unwrap();
}
}
}
/// Implements OpenRequestHandler, proxying to a backing temp file and optionally failing writes
/// to certain files.
struct WriteFailOrTempFs {
files_to_fail_writes: Vec<String>,
should_fail: Arc<AtomicBool>,
fail_count: Arc<AtomicU64>,
tempdir_proxy: DirectoryProxy,
// We don't read this, but need to keep it around otherwise the temp directory is torn down
_tempdir: tempfile::TempDir,
}
impl WriteFailOrTempFs {
fn new_failing(files_to_fail_writes: Vec<String>) -> Arc<Self> {
let tempdir = tempfile::tempdir().expect("/tmp to exist");
let (tempdir_proxy, server_end) =
fidl::endpoints::create_proxy::<fidl_fuchsia_io::DirectoryMarker>().unwrap();
fdio::open(
tempdir.path().to_str().unwrap(),
fidl_fuchsia_io::OPEN_FLAG_DIRECTORY
| fidl_fuchsia_io::OPEN_RIGHT_READABLE
| fidl_fuchsia_io::OPEN_RIGHT_WRITABLE,
server_end.into_channel(),
)
.expect("open temp directory");
Arc::new(Self {
files_to_fail_writes,
should_fail: Arc::new(AtomicBool::new(true)),
fail_count: Arc::new(AtomicU64::new(0)),
_tempdir: tempdir,
tempdir_proxy,
})
}
fn get_write_fail_count(&self) -> u64 {
self.fail_count.load(std::sync::atomic::Ordering::SeqCst)
}
fn make_write_succeed(&self) {
self.should_fail.store(false, std::sync::atomic::Ordering::SeqCst);
}
fn should_fail(&self) -> bool {
self.should_fail.load(std::sync::atomic::Ordering::SeqCst)
}
}
impl OpenRequestHandler for WriteFailOrTempFs {
fn handle_open_request(
&self,
flags: u32,
mode: u32,
path: String,
object: ServerEnd<NodeMarker>,
_control_handle: DirectoryControlHandle,
parent: Arc<DirectoryStreamHandler<Self>>,
) {
if path == "." && self.should_fail() {
let stream = object.into_stream().unwrap().cast_stream();
mock_filesystem::describe_dir(flags, &stream);
fasync::Task::spawn(parent.handle_stream(stream)).detach();
return;
}
if !self.files_to_fail_writes.contains(&path) {
// We don't want to intercept file operations, so just open the file normally.
self.tempdir_proxy.open(flags, mode, &path, object).unwrap();
return;
}
// This file matched our configured set of paths to intercept operations for, so open a
// backing file and send all file operations which the client thinks it's sending
// to the backing file instead to our FailingWriteFileStreamHandler.
let (file_requests, file_control_handle) =
ServerEnd::<FileMarker>::new(object.into_channel())
.into_stream_and_control_handle()
.expect("split file server end");
// Create a proxy to the actual file we'll open to proxy to.
let (backing_node_proxy, backing_node_server_end) =
fidl::endpoints::create_proxy::<NodeMarker>().unwrap();
self.tempdir_proxy
.open(flags, mode, &path, backing_node_server_end)
.expect("open file requested by pkg-resolver");
// All the things pkg-resolver attempts to open in these tests are files,
// not directories, so cast the NodeProxy to a FileProxy. If the pkg-resolver assumption
// changes, this code will have to support both.
let backing_file_proxy = FileProxy::new(backing_node_proxy.into_channel().unwrap());
let send_onopen = flags & fidl_fuchsia_io::OPEN_FLAG_DESCRIBE != 0;
let file_handler = Arc::new(FailingWriteFileStreamHandler::new(
backing_file_proxy,
String::from(path),
Arc::clone(&self.should_fail),
Arc::clone(&self.fail_count),
));
fasync::Task::spawn(file_handler.handle_stream(
file_requests,
file_control_handle,
send_onopen,
))
.detach();
}
}
/// Handles a stream of requests for a particular file, proxying to a backing file for all
/// operations except writes, which it may decide to make fail.
struct FailingWriteFileStreamHandler {
backing_file: FileProxy,
writes_should_fail: Arc<AtomicBool>,
write_fail_count: Arc<AtomicU64>,
path: String,
}
impl FailingWriteFileStreamHandler {
fn new(
backing_file: FileProxy,
path: String,
writes_should_fail: Arc<AtomicBool>,
write_fail_count: Arc<AtomicU64>,
) -> Self {
Self { backing_file, writes_should_fail, write_fail_count, path }
}
fn writes_should_fail(self: &Arc<Self>) -> bool {
self.writes_should_fail.load(std::sync::atomic::Ordering::SeqCst)
}
async fn handle_write(self: &Arc<Self>, data: Vec<u8>, responder: FileWriteResponder) {
if self.writes_should_fail() {
self.write_fail_count.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
responder.send(Status::NO_MEMORY.into_raw(), 0u64).expect("send on write");
return;
}
// Don't fail, actually do the write.
let (status, bytes_written) = self.backing_file.write(&data).await.unwrap();
responder.send(status, bytes_written).unwrap();
}
fn handle_stream(
self: Arc<Self>,
mut stream: FileRequestStream,
control_handle: FileControlHandle,
send_onopen: bool,
) -> BoxFuture<'static, ()> {
async move {
if send_onopen {
// The client end of the file is waiting for an OnOpen event, so send
// one based on the actual OnOpen from the backing file.
let mut event_stream = self.backing_file.take_event_stream();
let event = event_stream.try_next().await.unwrap();
match event.expect("failed to received file event") {
FileEvent::OnOpen_ { s, mut info } => {
// info comes as an Option<Box<NodeInfo>>, but we need to return an
// Option<&mut NodeInfo>. Transform it.
let node_info = info.as_mut().map(|b| &mut **b);
control_handle
.send_on_open_(s, node_info)
.expect("send on open to fake file");
}
FileEvent::OnConnectionInfo { info } => {
control_handle
.send_on_connection_info(info)
.expect("send on open to fake file");
}
}
}
while let Some(req) = stream.next().await {
match req.unwrap() {
FileRequest::Write { data, responder } => {
self.handle_write(data, responder).await
}
FileRequest::GetAttr { responder } => {
let (status, mut attrs) = self.backing_file.get_attr().await.unwrap();
responder.send(status, &mut attrs).unwrap();
}
FileRequest::Read { count, responder } => {
let (status, data) = self.backing_file.read(count).await.unwrap();
responder.send(status, &data).unwrap();
}
FileRequest::Close { responder } => {
let backing_file_close_response = self.backing_file.close().await.unwrap();
responder.send(backing_file_close_response).unwrap();
}
FileRequest::Close2 { responder } => {
let mut backing_file_close_response =
self.backing_file.close2().await.unwrap();
responder.send(&mut backing_file_close_response).unwrap();
}
other => {
panic!("unhandled request type for path {:?}: {:?}", self.path, other);
}
}
}
}
.boxed()
}
}
/// Optionally fails renames of certain files. Otherwise, delegates
/// DirectoryRequests to a backing tempdir.
struct RenameFailOrTempFs {
fail_count: Arc<AtomicU64>,
files_to_fail_renames: Vec<String>,
should_fail: Arc<AtomicBool>,
tempdir: Arc<tempfile::TempDir>,
}
impl RenameFailOrTempFs {
fn new_failing(files_to_fail_renames: Vec<String>) -> Arc<Self> {
Arc::new(Self {
fail_count: Arc::new(AtomicU64::new(0)),
files_to_fail_renames,
should_fail: Arc::new(AtomicBool::new(true)),
tempdir: Arc::new(tempfile::tempdir().expect("/tmp to exist")),
})
}
fn get_rename_fail_count(&self) -> u64 {
self.fail_count.load(std::sync::atomic::Ordering::SeqCst)
}
fn make_rename_succeed(&self) {
self.should_fail.store(false, std::sync::atomic::Ordering::SeqCst);
}
fn should_fail(&self) -> bool {
self.should_fail.load(std::sync::atomic::Ordering::SeqCst)
}
}
impl OpenRequestHandler for RenameFailOrTempFs {
fn handle_open_request(
&self,
flags: u32,
mode: u32,
path: String,
object: ServerEnd<NodeMarker>,
_control_handle: DirectoryControlHandle,
parent: Arc<DirectoryStreamHandler<Self>>,
) {
// Set up proxy to tmpdir and delegate to it on success.
let (tempdir_proxy, server_end) =
fidl::endpoints::create_proxy::<fidl_fuchsia_io::DirectoryMarker>().unwrap();
fdio::service_connect(self.tempdir.path().to_str().unwrap(), server_end.into_channel())
.unwrap();
if !self.should_fail() || path != "." {
tempdir_proxy.open(flags, mode, &path, object).unwrap();
return;
}
// Prepare to handle the directory requests. We must call describe_dir, which sends an
// OnOpen if OPEN_FLAG_DESCRIBE is set. Otherwise, the code will hang when reading from
// the stream.
let mut stream = object.into_stream().unwrap().cast_stream();
mock_filesystem::describe_dir(flags, &stream);
let fail_count = Arc::clone(&self.fail_count);
let files_to_fail_renames = Clone::clone(&self.files_to_fail_renames);
// Handle the directory requests.
fasync::Task::spawn(async move {
while let Some(req) = stream.next().await {
match req.unwrap() {
DirectoryRequest::GetAttr { responder } => {
let (status, mut attrs) = tempdir_proxy.get_attr().await.unwrap();
responder.send(status, &mut attrs).unwrap();
}
DirectoryRequest::Close { responder } => {
let status = tempdir_proxy.close().await.unwrap();
responder.send(status).unwrap();
}
DirectoryRequest::Close2 { responder } => {
let mut result = tempdir_proxy.close2().await.unwrap();
responder.send(&mut result).unwrap();
}
DirectoryRequest::GetToken { responder } => {
let (status, handle) = tempdir_proxy.get_token().await.unwrap();
responder.send(status, handle).unwrap();
}
DirectoryRequest::Rename2 { src, dst, responder, .. } => {
if !files_to_fail_renames.contains(&src) {
panic!("unsupported rename from {} to {}", src, dst);
}
fail_count.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
responder.send(&mut Err(Status::NOT_FOUND.into_raw())).unwrap();
}
DirectoryRequest::Open { flags, mode, path, object, control_handle } => {
parent.open_handler.handle_open_request(
flags,
mode,
path,
object,
control_handle,
Arc::clone(&parent.clone()),
);
}
other => {
panic!("unhandled request type for path {:?}: {:?}", path, other);
}
}
}
})
.detach();
}
}
async fn create_testenv_serves_repo<H: OpenRequestHandler + Send + Sync + 'static>(
open_handler: Arc<H>,
) -> (TestEnv, RepositoryConfig, Package, ServedRepository) {
// Create testenv with failing isolated-persistent-storage
let directory_handler = Arc::new(DirectoryStreamHandler::new(open_handler));
let (proxy, stream) =
fidl::endpoints::create_proxy_and_stream::<fidl_fuchsia_io::DirectoryMarker>().unwrap();
fasync::Task::spawn(directory_handler.handle_stream(stream)).detach();
let env = TestEnvBuilder::new()
.mounts(
MountsBuilder::new()
.enable_dynamic_config(EnableDynamicConfig { enable_dynamic_configuration: true })
.pkg_resolver_data(DirOrProxy::Proxy(proxy))
.build(),
)
.build()
.await;
// Serve repo with package
let pkg = PackageBuilder::new("just_meta_far").build().await.expect("created pkg");
let repo = Arc::new(
RepositoryBuilder::from_template_dir(EMPTY_REPO_PATH)
.add_package(&pkg)
.build()
.await
.unwrap(),
);
let served_repository = repo.server().start().unwrap();
let repo_url = "fuchsia-pkg://example.com".parse().unwrap();
let config = served_repository.make_repo_config(repo_url);
(env, config, pkg, served_repository)
}
async fn verify_pkg_resolution_succeeds_during_minfs_repo_config_failure<
O,
FailCountFn,
MakeSucceedFn,
>(
open_handler: Arc<O>,
fail_count_fn: FailCountFn,
num_failures_before_first_restart: u64,
num_failures_after_first_restart: u64,
make_succeed_fn: MakeSucceedFn,
) where
O: OpenRequestHandler + Send + Sync + 'static,
FailCountFn: FnOnce() -> u64 + Copy,
MakeSucceedFn: FnOnce(),
{
let (mut env, config, pkg, _served_repo) =
create_testenv_serves_repo(Arc::clone(&open_handler)).await;
// Verify we can resolve the package with a broken MinFs, and that repo configs do not persist
let () = env.proxies.repo_manager.add(config.clone().into()).await.unwrap().unwrap();
let package_dir = env.resolve_package("fuchsia-pkg://example.com/just_meta_far").await.unwrap();
pkg.verify_contents(&package_dir).await.unwrap();
assert_eq!(fail_count_fn(), num_failures_before_first_restart);
env.restart_pkg_resolver().await;
assert_eq!(get_repos(&env.proxies.repo_manager).await, vec![]);
assert_eq!(fail_count_fn(), num_failures_after_first_restart);
// Now let MinFs recover and show how repo configs are saved on restart.
// Note we know we are not executing the failure path anymore since
// the failure count doesn't change.
make_succeed_fn();
let () = env.proxies.repo_manager.add(config.clone().into()).await.unwrap().unwrap();
let package_dir = env.resolve_package("fuchsia-pkg://example.com/just_meta_far").await.unwrap();
pkg.verify_contents(&package_dir).await.unwrap();
assert_eq!(fail_count_fn(), num_failures_after_first_restart);
env.restart_pkg_resolver().await;
assert_eq!(get_repos(&env.proxies.repo_manager).await, vec![config.clone()]);
env.stop().await;
}
async fn verify_pkg_resolution_succeeds_during_minfs_repo_config_and_rewrite_rule_failure<
O,
FailCountFn,
MakeSucceedFn,
>(
open_handler: Arc<O>,
fail_count_fn: FailCountFn,
num_failures_before_first_restart: u64,
num_failures_after_first_restart: u64,
make_succeed_fn: MakeSucceedFn,
) where
O: OpenRequestHandler + Send + Sync + 'static,
FailCountFn: FnOnce() -> u64 + Copy,
MakeSucceedFn: FnOnce(),
{
let (mut env, config, pkg, _served_repo) =
create_testenv_serves_repo(Arc::clone(&open_handler)).await;
// Add repo config and rewrite rules
let () = env.proxies.repo_manager.add(config.clone().into()).await.unwrap().unwrap();
let (edit_transaction, edit_transaction_server) = fidl::endpoints::create_proxy().unwrap();
env.proxies.rewrite_engine.start_edit_transaction(edit_transaction_server).unwrap();
let rule = Rule::new("should_be_rewritten", "example.com", "/", "/").unwrap();
let () = edit_transaction.add(&mut rule.clone().into()).await.unwrap().unwrap();
let () = edit_transaction.commit().await.unwrap().unwrap();
// Verify we can resolve the package with a broken MinFs, and that rewrite rules do not
// persist
let package_dir =
env.resolve_package("fuchsia-pkg://should_be_rewritten/just_meta_far").await.unwrap();
pkg.verify_contents(&package_dir).await.unwrap();
assert_eq!(fail_count_fn(), num_failures_before_first_restart);
env.restart_pkg_resolver().await;
assert_eq!(get_rules(&env.proxies.rewrite_engine).await, vec![]);
assert_eq!(fail_count_fn(), num_failures_after_first_restart);
// Now let MinFs recover and show how rewrite rules are saved on restart
// Note we know we are not executing the failure path anymore since
// the failure count doesn't change.
make_succeed_fn();
let () = env.proxies.repo_manager.add(config.clone().into()).await.unwrap().unwrap();
let (edit_transaction, edit_transaction_server) = fidl::endpoints::create_proxy().unwrap();
env.proxies.rewrite_engine.start_edit_transaction(edit_transaction_server).unwrap();
let () = edit_transaction.add(&mut rule.clone().into()).await.unwrap().unwrap();
let () = edit_transaction.commit().await.unwrap().unwrap();
let package_dir =
env.resolve_package("fuchsia-pkg://should_be_rewritten/just_meta_far").await.unwrap();
pkg.verify_contents(&package_dir).await.unwrap();
assert_eq!(fail_count_fn(), num_failures_after_first_restart);
env.restart_pkg_resolver().await;
assert_eq!(get_rules(&env.proxies.rewrite_engine).await, vec![rule.clone()]);
env.stop().await;
}
// Test that when pkg-resolver can't open the file for dynamic repo configs, the resolver
// still works.
#[fasync::run_singlethreaded(test)]
async fn minfs_fails_create_repo_configs() {
let open_handler = OpenFailOrTempFs::new_failing();
verify_pkg_resolution_succeeds_during_minfs_repo_config_failure(
Arc::clone(&open_handler),
|| open_handler.get_open_fail_count(),
// Before the first pkg-resolver restart, we fail 3 times:
// * when trying to open repositories.json on start
// * when trying to open rewrites.json on start
// * when trying to open repositories.json when adding a dynamic repo config
3,
// We fail an additional 2 times after the restart to account for repositories.json
// and rewrites.json failing to open again on startup.
5,
|| open_handler.make_open_succeed(),
)
.await;
}
// Test that when pkg-resolver can open neither the file for rewrite rules
// NOR the file for dynamic repositories, the resolver still works.
#[fasync::run_singlethreaded(test)]
async fn minfs_fails_create_rewrite_rules() {
let open_handler = OpenFailOrTempFs::new_failing();
verify_pkg_resolution_succeeds_during_minfs_repo_config_and_rewrite_rule_failure(
Arc::clone(&open_handler),
|| open_handler.get_open_fail_count(),
// Before the first pkg-resolver restart, we fail 4 times:
// * when trying to open repositories.json on start
// * when trying to open rewrites.json on start
// * when trying to open repositories.json when adding a dynamic repo config
// * when trying to open rewrites.json when adding a dynamic rewrite rule
4,
// We fail an additional 2 times after the restart to account for repositories.json
// and rewrites.json failing to open again on startup.
6,
|| open_handler.make_open_succeed(),
)
.await;
}
// Test that when pkg-resolver can't write to the file for dynamic repo configs,
// package resolution still works.
#[fasync::run_singlethreaded(test)]
async fn minfs_fails_write_to_repo_configs() {
let open_handler = WriteFailOrTempFs::new_failing(vec![String::from("repositories.json.new")]);
verify_pkg_resolution_succeeds_during_minfs_repo_config_failure(
Arc::clone(&open_handler),
|| open_handler.get_write_fail_count(),
// The only time the test should hit the write failure path is when we add a repo config
// when should_fail = true, in which case we fail at writing repositories.json.new.
1,
1,
|| open_handler.make_write_succeed(),
)
.await;
}
// Test that when pkg-resolver can write to neither the file for dynamic repo configs
// NOR the file for rewrite rules, package resolution still works.
#[fasync::run_singlethreaded(test)]
async fn minfs_fails_write_to_repo_configs_and_rewrite_rules() {
let open_handler = WriteFailOrTempFs::new_failing(vec![
String::from("repositories.json.new"),
String::from("rewrites.json.new"),
]);
verify_pkg_resolution_succeeds_during_minfs_repo_config_and_rewrite_rule_failure(
Arc::clone(&open_handler),
|| open_handler.get_write_fail_count(),
// The only time the test should hit the write failure path is when we add a repo config
// when should_fail = true, in which case we fail at writing both repositories.json.new and
// rewrites.json.new.
2,
2,
|| open_handler.make_write_succeed(),
)
.await;
}
// Test that when pkg-resolver can't rename file for dynamic repo configs, package resolution,
// still works. Note this test might stop working if the pkg-resolver starts issuing Rename
// directly to /data instead of going through std::fs::rename. If that's the case, consider
// extending DirectoryStreamHandler to also have a RenameRequestHandler, and possibly use a
// std::sync::Weak to coordinate between the DirectoryStreamHandler and RenameRequestHandler.
#[fasync::run_singlethreaded(test)]
async fn minfs_fails_rename_repo_configs() {
let open_handler = RenameFailOrTempFs::new_failing(vec![String::from("repositories.json.new")]);
verify_pkg_resolution_succeeds_during_minfs_repo_config_failure(
Arc::clone(&open_handler),
|| open_handler.get_rename_fail_count(),
// The only time the test should hit the rename failure path is when we add a
// repo config when should_fail = true, in which case we fail at renaming
// repositories.json.new.
1,
1,
|| open_handler.make_rename_succeed(),
)
.await;
}
// Test that when pkg-resolver can rename neither the file for dynamic repo configs
// NOR the file for rewrite rules, package resolution still works.
#[fasync::run_singlethreaded(test)]
async fn minfs_fails_rename_repo_configs_and_rewrite_rules() {
let open_handler = RenameFailOrTempFs::new_failing(vec![
String::from("repositories.json.new"),
String::from("rewrites.json.new"),
]);
verify_pkg_resolution_succeeds_during_minfs_repo_config_and_rewrite_rule_failure(
Arc::clone(&open_handler),
|| open_handler.get_rename_fail_count(),
// The only time the test should hit the rename failure path is when we add a
// repo config when should_fail = true, in which case we fail at renaming both
// repositories.json.new and rewrites.json.new.
2,
2,
|| open_handler.make_rename_succeed(),
)
.await;
}
|
{
let stream = object.into_stream().unwrap().cast_stream();
mock_filesystem::describe_dir(flags, &stream);
fasync::Task::spawn(parent.handle_stream(stream)).detach();
}
|
create_tf_record.py
|
# -*-coding: utf-8 -*-
"""
@Project: create_tfrecord
@File : create_tfrecord.py
@Author : panjq
@E-mail : [email protected]
@Date : 2018-07-27 17:19:54
@desc : 将图片数据保存为单个tfrecord文件
"""
##########################################################################
import tensorflow as tf
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
import random
from PIL import Image
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
train_path = './train_new/img'
test_path = './test_new/img'
list = set(os.listdir(test_path))
classes=sorted(list,key=str.lower)
print(classes)
##########################################################################
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
# 生成字符串型的属性
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# 生成实数型的属性
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def get_example_nums(tf_records_filenames):
'''
统计tf_records图像的个数(example)个数
:param tf_records_filenames: tf_records文件路径
:return:
'''
nums= 0
for record in tf.python_io.tf_record_iterator(tf_records_filenames):
nums += 1
return nums
def show_image(title,image):
'''
显示图片
:param title: 图像标题
:param image: 图像的数据
:return:
'''
# plt.figure("show_image")
# print(image.dtype)
plt.imshow(image)
plt.axis('on') # 关掉坐标轴为 off
plt.title(title) # 图像题目
|
# def load_labels_file(filename,labels_num=1,shuffle=False):
# '''
# 载图txt文件,文件中每行为一个图片信息,且以空格隔开:图像路径 标签1 标签2,如:test_image/1.jpg 0 2
# :param filename:
# :param labels_num :labels个数
# :param shuffle :是否打乱顺序
# :return:images type->list
# :return:labels type->list
# '''
# images=[]
# labels=[]
# with open(filename) as f:
# lines_list=f.readlines()
# if shuffle:
# random.shuffle(lines_list)
#
# for lines in lines_list:
# line=lines.rstrip().split(' ')
# label=[]
# for i in range(labels_num):
# label.append(int(line[i+1]))
# images.append(line[0])
# labels.append(label)
# return images,labels
def load_labels_file(filename,num=1,shuffle=False):
'''
载图txt文件,文件中每行为一个图片信息,且以空格隔开:图像路径 标签1 标签2,如:test_image/1.jpg 0 2
:param filename:
:param labels_num :labels个数
:param shuffle :是否打乱顺序
:return:images type->list
:return:labels type->list
'''
images=[]
labels=[]
# with open(filename) as f:
# lines_list=f.readlines()
# if shuffle:
# random.shuffle(lines_list)
#
# for lines in lines_list:
# line=lines.rstrip().split(' ')
# label=[]
# for i in range(labels_num):
# label.append(int(line[i+1]))
# images.append(line[0])
# labels.append(label)
# return images,labels
for index,name in enumerate(classes):
# print(index,name)
class_path = filename+'/'+name+'/'
# print(class_path)
for img_name in os.listdir(class_path):
img_path = class_path+img_name
# print(img_path)
images.append(img_path)
labels.append(index)
# img = Image.open(img_path)
# img = img.resize((224,224))
# img_raw = img.tobytes()
# with open(train_label,'a') as f:
# f.write(str(index)+'\n')
randnum = random.randint(0, 100)
random.seed(randnum)
random.shuffle(images)
random.seed(randnum)
random.shuffle(labels)
return images,labels
def read_image(filename, resize_height, resize_width,normalization=False):
'''
读取图片数据,默认返回的是uint8,[0,255]
:param filename:
:param resize_height:
:param resize_width:
:param normalization:是否归一化到[0.,1.0]
:return: 返回的图片数据
'''
bgr_image = cv2.imread(filename)
if len(bgr_image.shape)==2:#若是灰度图则转为三通道
print("Warning:gray image",filename)
bgr_image = cv2.cvtColor(bgr_image, cv2.COLOR_GRAY2BGR)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)#将BGR转为RGB
# show_image(filename,rgb_image)
# rgb_image=Image.open(filename)
if resize_height>0 and resize_width>0:
rgb_image=cv2.resize(rgb_image,(resize_width,resize_height))
rgb_image=np.asanyarray(rgb_image)
if normalization:
# 不能写成:rgb_image=rgb_image/255
rgb_image=rgb_image/255.0
# show_image("src resize image",image)
return rgb_image
def get_batch_images(images,labels,batch_size,labels_nums,one_hot=False,shuffle=False,num_threads=64):
'''
:param images:图像
:param labels:标签
:param batch_size:
:param labels_nums:标签个数
:param one_hot:是否将labels转为one_hot的形式
:param shuffle:是否打乱顺序,一般train时shuffle=True,验证时shuffle=False
:return:返回batch的images和labels
'''
min_after_dequeue = 200
capacity = min_after_dequeue + 3 * batch_size # 保证capacity必须大于min_after_dequeue参数值
if shuffle:
images_batch, labels_batch = tf.train.shuffle_batch([images,labels],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads)
else:
images_batch, labels_batch = tf.train.batch([images,labels],
batch_size=batch_size,
capacity=capacity,
num_threads=num_threads)
if one_hot:
labels_batch = tf.one_hot(labels_batch, labels_nums, 1, 0)
return images_batch,labels_batch
def read_records(filename,resize_height, resize_width,type=None):
'''
解析record文件:源文件的图像数据是RGB,uint8,[0,255],一般作为训练数据时,需要归一化到[0,1]
:param filename:
:param resize_height:
:param resize_width:
:param type:选择图像数据的返回类型
None:默认将uint8-[0,255]转为float32-[0,255]
normalization:归一化float32-[0,1]
centralization:归一化float32-[0,1],再减均值中心化
:return:
'''
# 创建文件队列,不限读取的数量
filename_queue = tf.train.string_input_producer([filename])
# create a reader from file queue
reader = tf.TFRecordReader()
# reader从文件队列中读入一个序列化的样本
_, serialized_example = reader.read(filename_queue)
# get feature from serialized example
# 解析符号化的样本
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64)
}
)
tf_image = tf.decode_raw(features['image_raw'], tf.uint8)#获得图像原始的数据
tf_height = features['height']
tf_width = features['width']
tf_depth = features['depth']
tf_label = tf.cast(features['label'], tf.int32)
# PS:恢复原始图像数据,reshape的大小必须与保存之前的图像shape一致,否则出错
# tf_image=tf.reshape(tf_image, [-1]) # 转换为行向量
tf_image=tf.reshape(tf_image, [resize_height, resize_width, 3]) # 设置图像的维度
# 恢复数据后,才可以对图像进行resize_images:输入uint->输出float32
# tf_image=tf.image.resize_images(tf_image,[224, 224])
# 存储的图像类型为uint8,tensorflow训练时数据必须是tf.float32
if type is None:
tf_image = tf.cast(tf_image, tf.float32)
elif type=='normalization':# [1]若需要归一化请使用:
# 仅当输入数据是uint8,才会归一化[0,255]
# tf_image = tf.image.convert_image_dtype(tf_image, tf.float32)
tf_image = tf.cast(tf_image, tf.float32) * (1. / 255.0) # 归一化
elif type=='centralization':
# 若需要归一化,且中心化,假设均值为0.5,请使用:
tf_image = tf.cast(tf_image, tf.float32) * (1. / 255) - 0.5 #中心化
# 这里仅仅返回图像和标签
# return tf_image, tf_height,tf_width,tf_depth,tf_label
return tf_image,tf_label
def create_records(image_dir, output_record_dir, resize_height, resize_width,shuffle,log=5):
'''
实现将图像原始数据,label,长,宽等信息保存为record文件
注意:读取的图像数据默认是uint8,再转为tf的字符串型BytesList保存,解析请需要根据需要转换类型
:param image_dir:原始图像的目录
:param file:输入保存图片信息的txt文件(image_dir+file构成图片的路径)
:param output_record_dir:保存record文件的路径
:param resize_height:
:param resize_width:
PS:当resize_height或者resize_width=0是,不执行resize
:param shuffle:是否打乱顺序
:param log:log信息打印间隔
'''
# 加载文件,仅获取一个label
images_list, labels_list=load_labels_file(image_dir,1,shuffle)
writer = tf.python_io.TFRecordWriter(output_record_dir)
for i, [image_name, labels] in enumerate(zip(images_list, labels_list)):
image_path=image_name
# print(image_path)
# print(labels)
if not os.path.exists(image_path):
print('Err:no image',image_path)
continue
image = read_image(image_path, resize_height, resize_width)
image_raw = image.tostring()
if i%log==0 or i==len(images_list)-1:
print('------------processing:%d-th------------' % (i))
print('current image_path=%s' % (image_path),'shape:{}'.format(image.shape),'labels:{}'.format(labels))
# 这里仅保存一个label,多label适当增加"'label': _int64_feature(label)"项
label=labels
example = tf.train.Example(features=tf.train.Features(feature={
'image_raw': _bytes_feature(image_raw),
'height': _int64_feature(image.shape[0]),
'width': _int64_feature(image.shape[1]),
'depth': _int64_feature(image.shape[2]),
'labels': _int64_feature(label)
}))
writer.write(example.SerializeToString())
writer.close()
def disp_records(record_file,resize_height, resize_width,show_nums=4):
'''
解析record文件,并显示show_nums张图片,主要用于验证生成record文件是否成功
:param tfrecord_file: record文件路径
:return:
'''
# 读取record函数
tf_image, tf_label = read_records(record_file,resize_height,resize_width,type='normalization')
# 显示前4个图片
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(show_nums):
image,label = sess.run([tf_image,tf_label]) # 在会话中取出image和label
# image = tf_image.eval()
# 直接从record解析的image是一个向量,需要reshape显示
# image = image.reshape([height,width,depth])
#print('shape:{},tpye:{},labels:{}'.format(image.shape,image.dtype,label))
# pilimg = Image.fromarray(np.asarray(image_eval_reshape))
# pilimg.show()
show_image("image:%d"%(label),image)
coord.request_stop()
coord.join(threads)
def batch_test(record_file,resize_height, resize_width):
'''
:param record_file: record文件路径
:param resize_height:
:param resize_width:
:return:
:PS:image_batch, label_batch一般作为网络的输入
'''
# 读取record函数
tf_image,tf_label = read_records(record_file,resize_height,resize_width,type='normalization')
image_batch, label_batch= get_batch_images(tf_image,tf_label,batch_size=4,labels_nums=5,one_hot=False,shuffle=False)
init = tf.global_variables_initializer()
with tf.Session() as sess: # 开始一个会话
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(4):
# 在会话中取出images和labels
images, labels = sess.run([image_batch, label_batch])
# 这里仅显示每个batch里第一张图片
show_image("image", images[0, :, :, :])
print('shape:{},tpye:{},labels:{}'.format(images.shape,images.dtype,labels))
# 停止所有线程
coord.request_stop()
coord.join(threads)
# if __name__ == '__main__':
# # 参数设置
#
# resize_height = 224 # 指定存储图片高度
# resize_width = 224 # 指定存储图片宽度
# shuffle=True
# log=5
# # 产生train.record文件
# image_dir='dataset/train'
# train_labels = 'dataset/train.txt' # 图片路径
# train_record_output = 'dataset/record/train.tfrecords'
# create_records(image_dir,train_labels, train_record_output, resize_height, resize_width,shuffle,log)
# train_nums=get_example_nums(train_record_output)
# print("save train example nums={}".format(train_nums))
#
# # 产生val.record文件
# image_dir='dataset/val'
# val_labels = 'dataset/val.txt' # 图片路径
# val_record_output = 'dataset/record/val.tfrecords'
# create_records(image_dir,val_labels, val_record_output, resize_height, resize_width,shuffle,log)
# val_nums=get_example_nums(val_record_output)
# print("save val example nums={}".format(val_nums))
#
# # 测试显示函数
# # disp_records(train_record_output,resize_height, resize_width)
# batch_test(train_record_output,resize_height, resize_width)
if __name__ == '__main__':
# 参数设置
resize_height = 224 # 指定存储图片高度
resize_width = 224 # 指定存储图片宽度
shuffle=True
log=5
# 产生train.record文件
image_dir='./train_new/img'
# train_labels = './onsets/train.txt' # 图片路径
train_record_output = 'train.tfrecord'
create_records(image_dir, train_record_output, resize_height, resize_width,shuffle,log)
train_nums=get_example_nums(train_record_output)
print("save train example nums={}".format(train_nums))
# 产生val.record文件
image_dir='./test_new/img'
# val_labels = './onsets/val.txt' # 图片路径
val_record_output = 'val.tfrecord'
create_records(image_dir, val_record_output, resize_height, resize_width,shuffle,log)
val_nums=get_example_nums(val_record_output)
print("save val example nums={}".format(val_nums))
# 测试显示函数
# disp_records(train_record_output,resize_height, resize_width)
# batch_test(train_record_output,resize_height, resize_width)
|
plt.show()
|
benchmark_SingleOrigin.py
|
import timeit
import os.path
import numpy as np
from math import exp, fabs
from sys import float_info
from globals import *
from utils import loadMatrix, resizeMatrix
from models.SingleOrigin import SingleOrigin
"""
Benchmarks for the Single Origin Constrained model (models/SingleOrigin.py)
All code here is lifted from the original model code and changed to be
self-contained (no setup) so that timings of various optimisations are easy.
Code here is designed to be a test of timings, NOT necessarily a test of
return values, although real data has been used wherever possible i.e. instead
of an NxN matrix containing random values, I try to load in a real matrix
instead.
"""
#modelRunsDir = '../model-runs'
#TObsFilename = 'TObs.bin' #1 mode
#CijRoadMinFilename = 'Cij_road_min.bin'
#load and init
Tij=loadMatrix(os.path.join(modelRunsDir,TObs31Filename))
cij=loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))
#end load and init
###############################################################################
"""
calculateCBar_slow
Mean trips calculation, straight conversion from original C# code, no python optimisation
@returns float
"""
def benchmark_calculateCBar_slow():
#main code
(M, N) = np.shape(Tij)
CNumerator = 0.0
CDenominator = 0.0
for i in range(0,N):
for j in range(0,N):
CNumerator += Tij[i, j] * cij[i, j]
CDenominator += Tij[i, j]
CBar = CNumerator / CDenominator
print("CBar=",CBar)
return CBar
###############################################################################
"""
calculateCBar_fast
Mean trips calculation, python optimised version of "_slow"
@returns float (NOTE: the return value MUST be identical to the _slow version, to prove they're functionally identical)
"""
def benchmark_calculateCBar_fast():
#load and init
Tij=loadMatrix(os.path.join(modelRunsDir,TObs31Filename))
cij=loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))
#end load and init
#main code
CNumerator2 = np.sum(Tij*cij)
CDenominator2 = np.sum(Tij)
CBar2=CNumerator2/CDenominator2
print("CBar2=",CBar2)
return CBar2
###############################################################################
"""
This is a benchmark of the simple Python code for SingleOrigin using different matrix sizes.
It is a test for how long a single execution of the main loop takes. Timings are printed
to the console based on 1000 runs of the model code i.e. the timing you see in seconds
must be divided by 1000.
NOTE: this could take a VERY long time to run if you pass in a high number for Nfinish
"""
def
|
(Nstart,Nfinish,Nstep):
print("benchmark_SingleDest running matrix Nstart=",Nstart," Nfinish=",Nfinish, " Nstep=",Nstep)
#load testing matrices
TObs1 = loadMatrix(os.path.join(modelRunsDir,TObs31Filename))
Cij1 = loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))
for N in range(Nstart,Nfinish,Nstep):
#print("TPred runModel N=",N)
#set up the model
testModel = SingleOrigin()
(TPred, secs)=testModel.benchmarkRun(1000,resizeMatrix(TObs1,N),resizeMatrix(Cij1,N),1.0)
#NOTE: timing printed to console based on 1000 iterations of the main loop in the above code
#Should not contain any setup timings - only the actual algorithm run time.
print(N,",1000,",secs) #all console logging from here - makes it nice and easy to import into excel
###############################################################################
|
benchmarkSingleOriginMatrixSizes
|
archive_test.go
|
// Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package archive
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/mock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
config "github.com/chaos-mesh/chaos-mesh/pkg/config/dashboard"
"github.com/chaos-mesh/chaos-mesh/pkg/dashboard/core"
pkgmock "github.com/chaos-mesh/chaos-mesh/pkg/mock"
)
// MockExperimentStore is a mock type for ExperimentStore
type MockExperimentStore struct {
mock.Mock
}
// MockScheduleStore is a mock type for ScheduleStore
type MockScheduleStore struct {
mock.Mock
}
func TestEvent(t *testing.T)
|
func (m *MockExperimentStore) ListMeta(ctx context.Context, kind, namespace, name string, archived bool) ([]*core.ExperimentMeta, error) {
var res []*core.ExperimentMeta
var err error
if kind == "testKind" {
expMeta := &core.ExperimentMeta{
UID: "testUID",
Kind: "testKind",
Name: "testName",
Namespace: "testNamespace",
Action: "testAction",
StartTime: time.Time{},
FinishTime: time.Time{},
Archived: true,
}
res = append(res, expMeta)
} else {
err = fmt.Errorf("test err")
}
return res, err
}
func (m *MockExperimentStore) FindByUID(ctx context.Context, UID string) (*core.Experiment, error) {
var res *core.Experiment
var err error
switch UID {
case "testPodChaos":
chaos := v1alpha1.PodChaos{}
jsonStr, _ := json.Marshal(chaos)
res = &core.Experiment{
ExperimentMeta: core.ExperimentMeta{
UID: UID,
Kind: v1alpha1.KindPodChaos,
Name: "testName",
Namespace: "testNamespace",
Action: "testAction",
StartTime: time.Time{},
FinishTime: time.Time{},
Archived: true,
},
Experiment: string(jsonStr),
}
case "testIOChaos":
chaos := v1alpha1.IOChaos{}
jsonStr, _ := json.Marshal(chaos)
res = &core.Experiment{
ExperimentMeta: core.ExperimentMeta{
UID: UID,
Kind: v1alpha1.KindIOChaos,
Name: "testName",
Namespace: "testNamespace",
Action: "testAction",
StartTime: time.Time{},
FinishTime: time.Time{},
Archived: true,
},
Experiment: string(jsonStr),
}
case "testNetworkChaos":
chaos := v1alpha1.NetworkChaos{}
jsonStr, _ := json.Marshal(chaos)
res = &core.Experiment{
ExperimentMeta: core.ExperimentMeta{
UID: UID,
Kind: v1alpha1.KindNetworkChaos,
Name: "testName",
Namespace: "testNamespace",
Action: "testAction",
StartTime: time.Time{},
FinishTime: time.Time{},
Archived: true,
},
Experiment: string(jsonStr),
}
case "testTimeChaos":
chaos := v1alpha1.TimeChaos{}
jsonStr, _ := json.Marshal(chaos)
res = &core.Experiment{
ExperimentMeta: core.ExperimentMeta{
UID: UID,
Kind: v1alpha1.KindTimeChaos,
Name: "testName",
Namespace: "testNamespace",
Action: "testAction",
StartTime: time.Time{},
FinishTime: time.Time{},
Archived: true,
},
Experiment: string(jsonStr),
}
case "testKernelChaos":
chaos := v1alpha1.KernelChaos{}
jsonStr, _ := json.Marshal(chaos)
res = &core.Experiment{
ExperimentMeta: core.ExperimentMeta{
UID: UID,
Kind: v1alpha1.KindKernelChaos,
Name: "testName",
Namespace: "testNamespace",
Action: "testAction",
StartTime: time.Time{},
FinishTime: time.Time{},
Archived: true,
},
Experiment: string(jsonStr),
}
case "testStressChaos":
chaos := v1alpha1.StressChaos{}
jsonStr, _ := json.Marshal(chaos)
res = &core.Experiment{
ExperimentMeta: core.ExperimentMeta{
UID: UID,
Kind: v1alpha1.KindStressChaos,
Name: "testName",
Namespace: "testNamespace",
Action: "testAction",
StartTime: time.Time{},
FinishTime: time.Time{},
Archived: true,
},
Experiment: string(jsonStr),
}
case "testOtherChaos":
res = &core.Experiment{
ExperimentMeta: core.ExperimentMeta{
UID: UID,
Kind: "OtherChaos",
Name: "testName",
Namespace: "testNamespace",
Action: "testAction",
StartTime: time.Time{},
FinishTime: time.Time{},
Archived: true,
},
Experiment: "",
}
case "testErrRecordNotFound":
err = gorm.ErrRecordNotFound
default:
err = fmt.Errorf("test err")
}
return res, err
}
func (m *MockExperimentStore) FindMetaByUID(ctx context.Context, UID string) (*core.ExperimentMeta, error) {
var res *core.ExperimentMeta
var err error
switch UID {
case "tsetUID":
res = &core.ExperimentMeta{
UID: "testUID",
Kind: "testKind",
Name: "testName",
Namespace: "testNamespace",
Action: "testAction",
StartTime: time.Time{},
FinishTime: time.Time{},
Archived: true,
}
case "testErrRecordNotFound":
err = gorm.ErrRecordNotFound
default:
err = fmt.Errorf("test err")
}
return res, err
}
func (m *MockExperimentStore) Set(context.Context, *core.Experiment) error {
panic("implement me")
}
func (m *MockExperimentStore) Archive(ctx context.Context, namespace, name string) error {
panic("implement me")
}
func (m *MockExperimentStore) Delete(context.Context, *core.Experiment) error {
panic("implement me")
}
func (m *MockExperimentStore) DeleteByFinishTime(context.Context, time.Duration) error {
panic("implement me")
}
func (m *MockExperimentStore) DeleteIncompleteExperiments(context.Context) error {
panic("implement me")
}
func (m *MockExperimentStore) DeleteByUIDs(context.Context, []string) error {
panic("implement me")
}
func (m *MockScheduleStore) ListMeta(ctx context.Context, namespace, name string, archived bool) ([]*core.ScheduleMeta, error) {
var res []*core.ScheduleMeta
var err error
if name == "testScheduleName" {
schMeta := &core.ScheduleMeta{
UID: "testUID",
Kind: "testKind",
Name: "testScheduleName",
Namespace: "testNamespace",
Action: "testAction",
StartTime: time.Time{},
FinishTime: time.Time{},
Archived: true,
}
res = append(res, schMeta)
} else {
err = fmt.Errorf("test err")
}
return res, err
}
func (m *MockScheduleStore) FindByUID(ctx context.Context, UID string) (*core.Schedule, error) {
var res *core.Schedule
var err error
switch UID {
case "testPodChaos":
sch := v1alpha1.Schedule{}
jsonStr, _ := json.Marshal(sch)
res = &core.Schedule{
ScheduleMeta: core.ScheduleMeta{
UID: UID,
Kind: v1alpha1.KindPodChaos,
Name: "testName",
Namespace: "testNamespace",
Action: "testAction",
StartTime: time.Time{},
FinishTime: time.Time{},
Archived: true,
},
Schedule: string(jsonStr),
}
case "testErrRecordNotFound":
err = gorm.ErrRecordNotFound
default:
err = fmt.Errorf("test err")
}
return res, err
}
func (m *MockScheduleStore) FindMetaByUID(context.Context, string) (*core.ScheduleMeta, error) {
panic("implement me")
}
func (m *MockScheduleStore) Set(context.Context, *core.Schedule) error {
panic("implement me")
}
func (m *MockScheduleStore) Archive(ctx context.Context, namespace, name string) error {
panic("implement me")
}
func (m *MockScheduleStore) Delete(context.Context, *core.Schedule) error {
panic("implement me")
}
func (m *MockScheduleStore) DeleteByFinishTime(context.Context, time.Duration) error {
panic("implement me")
}
func (m *MockScheduleStore) DeleteByUIDs(context.Context, []string) error {
panic("implement me")
}
func (m *MockScheduleStore) DeleteIncompleteSchedules(context.Context) error {
panic("implement me")
}
var _ = Describe("event", func() {
var router *gin.Engine
BeforeEach(func() {
pkgmock.With("AuthMiddleware", true)
mockExpStore := new(MockExperimentStore)
mockSchStore := new(MockScheduleStore)
s := Service{
archive: mockExpStore,
archiveSchedule: mockSchStore,
event: nil,
conf: &config.ChaosDashboardConfig{
ClusterScoped: true,
},
}
router = gin.Default()
r := router.Group("/api")
endpoint := r.Group("/archives")
endpoint.GET("", s.list)
endpoint.GET("/:uid", s.get)
endpoint.GET("/schedules", s.listSchedule)
endpoint.GET("/schedules/:uid", s.detailSchedule)
})
AfterEach(func() {
// Add any setup steps that needs to be executed after each test
pkgmock.Reset("AuthMiddleware")
})
Context("List", func() {
It("success", func() {
response := []Archive{
{
UID: "testUID",
Kind: "testKind",
Namespace: "testNamespace",
Name: "testName",
Created: time.Time{}.Format(time.RFC3339),
},
}
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives?kind=testKind", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusOK))
responseBody, err := json.Marshal(response)
Expect(err).ShouldNot(HaveOccurred())
Expect(rr.Body.Bytes()).Should(Equal(responseBody))
})
It("test err", func() {
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusInternalServerError))
})
})
Context("Detail", func() {
It("testPodChaos", func() {
chaos := &v1alpha1.PodChaos{}
response := Detail{
Archive: Archive{
UID: "testPodChaos",
Kind: v1alpha1.KindPodChaos,
Namespace: "testNamespace",
Name: "testName",
Created: time.Time{}.Format(time.RFC3339),
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: "",
Kind: "",
},
Meta: core.KubeObjectMeta{
Name: "",
Namespace: "",
Labels: nil,
Annotations: nil,
},
Spec: chaos.Spec,
},
}
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/testPodChaos", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusOK))
responseBody, err := json.Marshal(response)
Expect(err).ShouldNot(HaveOccurred())
Expect(rr.Body.Bytes()).Should(Equal(responseBody))
})
It("testIOChaos", func() {
chaos := &v1alpha1.IOChaos{}
response := Detail{
Archive: Archive{
UID: "testIOChaos",
Kind: v1alpha1.KindIOChaos,
Namespace: "testNamespace",
Name: "testName",
Created: time.Time{}.Format(time.RFC3339),
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: "",
Kind: "",
},
Meta: core.KubeObjectMeta{
Name: "",
Namespace: "",
Labels: nil,
Annotations: nil,
},
Spec: chaos.Spec,
},
}
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/testIOChaos", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusOK))
responseBody, err := json.Marshal(response)
Expect(err).ShouldNot(HaveOccurred())
Expect(rr.Body.Bytes()).Should(Equal(responseBody))
})
It("testNetworkChaos", func() {
chaos := &v1alpha1.NetworkChaos{}
response := Detail{
Archive: Archive{
UID: "testNetworkChaos",
Kind: v1alpha1.KindNetworkChaos,
Namespace: "testNamespace",
Name: "testName",
Created: time.Time{}.Format(time.RFC3339),
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: "",
Kind: "",
},
Meta: core.KubeObjectMeta{
Name: "",
Namespace: "",
Labels: nil,
Annotations: nil,
},
Spec: chaos.Spec,
},
}
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/testNetworkChaos", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusOK))
responseBody, err := json.Marshal(response)
Expect(err).ShouldNot(HaveOccurred())
Expect(rr.Body.Bytes()).Should(Equal(responseBody))
})
It("testTimeChaos", func() {
chaos := &v1alpha1.TimeChaos{}
response := Detail{
Archive: Archive{
UID: "testTimeChaos",
Kind: v1alpha1.KindTimeChaos,
Namespace: "testNamespace",
Name: "testName",
Created: time.Time{}.Format(time.RFC3339),
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: "",
Kind: "",
},
Meta: core.KubeObjectMeta{
Name: "",
Namespace: "",
Labels: nil,
Annotations: nil,
},
Spec: chaos.Spec,
},
}
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/testTimeChaos", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusOK))
responseBody, err := json.Marshal(response)
Expect(err).ShouldNot(HaveOccurred())
Expect(rr.Body.Bytes()).Should(Equal(responseBody))
})
It("testKernelChaos", func() {
chaos := &v1alpha1.KernelChaos{}
response := Detail{
Archive: Archive{
UID: "testKernelChaos",
Kind: v1alpha1.KindKernelChaos,
Namespace: "testNamespace",
Name: "testName",
Created: time.Time{}.Format(time.RFC3339),
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: "",
Kind: "",
},
Meta: core.KubeObjectMeta{
Name: "",
Namespace: "",
Labels: nil,
Annotations: nil,
},
Spec: chaos.Spec,
},
}
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/testKernelChaos", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusOK))
responseBody, err := json.Marshal(response)
Expect(err).ShouldNot(HaveOccurred())
Expect(rr.Body.Bytes()).Should(Equal(responseBody))
})
It("testStressChaos", func() {
chaos := &v1alpha1.StressChaos{}
response := Detail{
Archive: Archive{
UID: "testStressChaos",
Kind: v1alpha1.KindStressChaos,
Namespace: "testNamespace",
Name: "testName",
Created: time.Time{}.Format(time.RFC3339),
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: "",
Kind: "",
},
Meta: core.KubeObjectMeta{
Name: "",
Namespace: "",
Labels: nil,
Annotations: nil,
},
Spec: chaos.Spec,
},
}
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/testStressChaos", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusOK))
responseBody, err := json.Marshal(response)
Expect(err).ShouldNot(HaveOccurred())
Expect(rr.Body.Bytes()).Should(Equal(responseBody))
})
It("testOtherChaos", func() {
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/testOtherChaos", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusInternalServerError))
})
It("testErrRecordNotFound", func() {
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/testErrRecordNotFound", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusNotFound))
})
It("test err", func() {
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/testErr", nil)
router.ServeHTTP(rr, request)
fmt.Println(rr.Code)
Expect(rr.Code).Should(Equal(http.StatusInternalServerError))
})
})
Context("ListSchedule", func() {
It("success", func() {
response := []Archive{
{
UID: "testUID",
Kind: "testKind",
Namespace: "testNamespace",
Name: "testScheduleName",
Created: time.Time{}.Format(time.RFC3339),
},
}
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/schedules?name=testScheduleName", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusOK))
responseBody, err := json.Marshal(response)
Expect(err).ShouldNot(HaveOccurred())
Expect(rr.Body.Bytes()).Should(Equal(responseBody))
})
It("test err", func() {
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/schedules", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusInternalServerError))
})
})
Context("DetailSchedule", func() {
It("testPodChaos", func() {
sch := &v1alpha1.Schedule{}
response := Detail{
Archive: Archive{
UID: "testPodChaos",
Kind: v1alpha1.KindPodChaos,
Namespace: "testNamespace",
Name: "testName",
Created: time.Time{}.Format(time.RFC3339),
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: "",
Kind: "",
},
Meta: core.KubeObjectMeta{
Name: "",
Namespace: "",
Labels: nil,
Annotations: nil,
},
Spec: sch.Spec,
},
}
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/schedules/testPodChaos", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusOK))
responseBody, err := json.Marshal(response)
Expect(err).ShouldNot(HaveOccurred())
Expect(rr.Body.Bytes()).Should(Equal(responseBody))
})
It("testErrRecordNotFound", func() {
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/schedules/testErrRecordNotFound", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusInternalServerError))
})
It("test err", func() {
rr := httptest.NewRecorder()
request, _ := http.NewRequest(http.MethodGet, "/api/archives/schedules/testErr", nil)
router.ServeHTTP(rr, request)
Expect(rr.Code).Should(Equal(http.StatusInternalServerError))
})
})
})
|
{
RegisterFailHandler(Fail)
RunSpecs(t, "Archive Suite")
}
|
udphandler.go
|
package network
import (
"context"
"encoding/binary"
"errors"
"fmt"
"github.com/zllangct/RockGO/logger"
"github.com/zllangct/RockGO/timer"
"github.com/zllangct/RockGO/utils/UUID"
"net"
"sync"
"sync/atomic"
"time"
)
var ErrUdpConnClosed = errors.New("this udp conn is closed")
type UdpConn struct {
udpConn *net.UDPConn
remoteAddr *net.UDPAddr
cid uint32
timeout <-chan struct{}
closeCallback func()
m *sync.Map
once *sync.Once
}
func (this *UdpConn) Addr() string {
return this.remoteAddr.String()
}
func (this *UdpConn) Init() {
go func() {
<-this.timeout
this.Close()
this.m.Delete(this.cid)
}()
}
func (this *UdpConn) SetReadDeadline(duration time.Duration) {
this.once.Do(this.Init)
this.timeout = timer.After(duration)
}
func (this *UdpConn) WriteMessage(messageType uint32, data []byte) error {
msg := make([]byte, 12)
msg = append(msg, data...)
binary.BigEndian.PutUint32(msg[:4], uint32(len(msg)))
binary.BigEndian.PutUint32(msg[4:8], this.cid)
binary.BigEndian.PutUint32(msg[8:12], messageType)
if _, err := this.udpConn.WriteToUDP(msg, this.remoteAddr); err != nil {
logger.Error(fmt.Sprintf("send pkg to %v failed %v", this.remoteAddr, err))
}
return nil
}
func (this *UdpConn) Close() error {
this.remoteAddr = nil
return nil
}
type udpHandler struct {
conf *ServerConf
ts *Server
conn *net.UDPConn
conns *sync.Map
numInvoke int32
cid uint32
gpool *Pool
}
func (h *udpHandler) Listen() error {
conf := h.conf
//对象池模式下,初始pool大小为20
if conf.PoolMode && conf.MaxInvoke == 0 {
conf.MaxInvoke = 20
|
l(int(conf.MaxInvoke), conf.QueueCap)
addr, err := net.ResolveUDPAddr("udp", conf.Address)
if err != nil {
return err
}
h.conn, err = net.ListenUDP("udp", addr)
if err != nil {
return err
}
logger.Info(fmt.Sprintf("UDP server listening and serving UDP on: [ %s ]", h.conn.LocalAddr()))
return nil
}
func (h *udpHandler) Handle() error {
wg := sync.WaitGroup{}
buffer := make([]byte, 65535)
for {
wg.Wait()
if h.ts.isClosed {
return nil
}
wg.Add(1)
go func() {
n, udpAddr, err := h.conn.ReadFromUDP(buffer)
if err != nil {
if !isNoDataError(err) {
logger.Error(fmt.Sprintf("Close connection %s: %v", h.conf.Address, err))
return
}
}
data := make([]byte, n)
copy(data, buffer[0:n])
wg.Done()
if h.conf.Handler != nil {
h.conf.Handler(&Session{
conn: &UdpConn{remoteAddr: udpAddr, udpConn: h.conn, m: h.conns},
}, data)
return
}
var new bool
cfg := h.conf
ctx := context.Background()
mid, pkg := h.conf.PackageProtocol.ParseMessage(ctx, data)
if len(mid) != 2 {
logger.Warn("udp data fmt incorrect")
return
}
cid := mid[0]
if cid == 0 {
cid = atomic.AddUint32(&h.cid, 1)
new = true
}
s, _ := h.conns.LoadOrStore(cid, &Session{
ID: UUID.Next(),
properties: make(map[string]interface{}),
conn: &UdpConn{remoteAddr: udpAddr, udpConn: h.conn, m: h.conns},
})
sess := s.(*Session)
sess.conn.(*UdpConn).SetReadDeadline(cfg.ReadTimeout)
wid := int32(-1)
item, ok := sess.GetProperty("workerID")
if ok {
wid = item.(int32)
sess.SetProperty("workerID", int32(-1))
}
if new {
//异常处理
h.ts.conf.OnClientConnected(sess)
sess.conn.(*UdpConn).closeCallback = func() {
h.ts.conf.OnClientDisconnected(sess)
}
}
if h.conf.NetAPI != nil && mid != nil {
// use goroutine pool
if h.conf.PoolMode {
h.gpool.AddJobSerial(h.handler, []interface{}{sess, pkg}, wid, func(workerID int32) {
wid = workerID
})
} else {
go h.handler(sess, pkg)
}
} else {
logger.Error("no message handler")
return
}
}()
}
}
func (h *udpHandler) handler(args ...interface{}) {
ctx := context.Background()
ctx = context.WithValue(ctx, "cid", args[0])
if h.conf.Handler != nil {
h.conf.Handler(args[1].(*Session), args[1].([]byte))
} else {
mid, mes := h.conf.PackageProtocol.ParseMessage(ctx, args[1].([]byte))
if h.conf.NetAPI != nil && mid != nil {
h.ts.invoke(ctx, mid[0], mes)
} else {
logger.Error("no message handler")
return
}
}
}
|
}
h.gpool = GetGloblePoo
|
fi.js
|
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'sourcedialog', 'fi', {
toolbar: 'Koodi',
title: 'Koodi'
} );
|
/*
Copyright (c) 2003-2017, CKSource - Frederico Knabben. All rights reserved.
|
|
upload_revision.py
|
#!/usr/bin/env python
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script takes a Clang git revision as an argument, it then
creates a feature branch, puts this revision into update.py, uploads
a CL, triggers Clang Upload try bots, and tells what to do next"""
from __future__ import print_function
import argparse
import fnmatch
import itertools
import os
import re
import shutil
import subprocess
import sys
from build import CheckoutLLVM, GetCommitDescription, LLVM_DIR
from update import CHROMIUM_DIR
# Path constants.
THIS_DIR = os.path.dirname(__file__)
UPDATE_PY_PATH = os.path.join(THIS_DIR, "update.py")
CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..'))
# Keep lines in here at <= 72 columns, else they wrap in gerrit.
COMMIT_FOOTER = \
'''
Bug: TODO. Remove the Tricium: line below when filling this in.
Tricium: skip
Cq-Include-Trybots: chromium/try:chromeos-amd64-generic-cfi-thin-lto-rel
Cq-Include-Trybots: chromium/try:dawn-win10-x86-deps-rel
Cq-Include-Trybots: chromium/try:linux-chromeos-dbg
Cq-Include-Trybots: chromium/try:linux_angle_deqp_rel_ng
Cq-Include-Trybots: chromium/try:linux_chromium_cfi_rel_ng
Cq-Include-Trybots: chromium/try:linux_chromium_chromeos_msan_rel_ng
Cq-Include-Trybots: chromium/try:linux_chromium_compile_dbg_32_ng
Cq-Include-Trybots: chromium/try:linux_chromium_msan_rel_ng
Cq-Include-Trybots: chromium/try:mac-arm64-rel,mac_chromium_asan_rel_ng
Cq-Include-Trybots: chromium/try:win-asan,win7-rel
Cq-Include-Trybots: chromium/try:android-official,fuchsia-official
Cq-Include-Trybots: chromium/try:mac-official,linux-official
Cq-Include-Trybots: chromium/try:win-official,win32-official
Cq-Include-Trybots: chrome/try:iphone-device,ipad-device
Cq-Include-Trybots: chrome/try:linux-chromeos-chrome
Cq-Include-Trybots: chrome/try:win-chrome,win64-chrome,mac-chrome
'''
is_win = sys.platform.startswith('win32')
def PatchRevision(clang_git_revision, clang_sub_revision):
with open(UPDATE_PY_PATH, 'rb') as f:
content = f.read()
m = re.search("CLANG_REVISION = '([0-9a-z-]+)'", content)
clang_old_git_revision = m.group(1)
m = re.search("CLANG_SUB_REVISION = ([0-9]+)", content)
clang_old_sub_revision = m.group(1)
content = re.sub("CLANG_REVISION = '[0-9a-z-]+'",
"CLANG_REVISION = '{}'".format(clang_git_revision),
content,
count=1)
content = re.sub("CLANG_SUB_REVISION = [0-9]+",
"CLANG_SUB_REVISION = {}".format(clang_sub_revision),
content, count=1)
with open(UPDATE_PY_PATH, 'wb') as f:
f.write(content)
return "{}-{}".format(clang_old_git_revision, clang_old_sub_revision)
def Git(args):
# Needs shell=True on Windows due to git.bat in depot_tools.
subprocess.check_call(["git"] + args, shell=is_win)
def main():
parser = argparse.ArgumentParser(description='upload new clang revision')
parser.add_argument('clang_git_revision', nargs=1,
help='Clang git revision to build the toolchain for.')
parser.add_argument('clang_sub_revision',
type=int, nargs='?', default=1,
help='Clang sub-revision to build the toolchain for.')
args = parser.parse_args()
clang_raw_git_revision = args.clang_git_revision[0]
# To `git describe`, we need a checkout.
CheckoutLLVM(clang_raw_git_revision, LLVM_DIR)
clang_git_revision = GetCommitDescription(clang_raw_git_revision)
clang_sub_revision = args.clang_sub_revision
os.chdir(CHROMIUM_DIR)
print("Making a patch for Clang {}-{}".format(clang_git_revision,
clang_sub_revision))
|
Git(["checkout", "origin/master", "-b", "clang-{}".format(rev_string)])
old_rev_string = PatchRevision(clang_git_revision, clang_sub_revision)
Git(["add", UPDATE_PY_PATH])
commit_message = 'Ran `{}`.'.format(' '.join(sys.argv)) + COMMIT_FOOTER
Git([
"commit", "-m",
"Roll clang {} : {}\n\n{}".format(old_rev_string, rev_string,
commit_message)
])
Git(["cl", "upload", "-f", "--bypass-hooks"])
Git([
"cl", "try", "-B", "chromium/try", "-b", "linux_upload_clang", "-b",
"mac_upload_clang", "-b", "mac_upload_clang_arm", "-b", "win_upload_clang"
])
print ("Please, wait until the try bots succeeded "
"and then push the binaries to goma.")
if __name__ == '__main__':
sys.exit(main())
|
rev_string = "{}-{}".format(clang_git_revision, clang_sub_revision)
|
expression_test.rs
|
use crate::entity::Expression;
#[test]
fn expression_test() {
let expression = Expression {
lhs: false,
rhs: false,
};
assert_eq!(expression.lhs, false);
assert_eq!(expression.rhs, false);
|
}
#[test]
fn create_expression_test() {
let expression = Expression::new(false, false);
assert_eq!(expression.lhs, false);
assert_eq!(expression.rhs, false);
let expression = expression.lhs(true);
assert_eq!(expression.lhs, true);
let expression = expression.rhs(true);
assert_eq!(expression.rhs, true);
}
|
let expression = expression.lhs(true);
assert_eq!(expression.lhs, true);
let expression = expression.rhs(true);
assert_eq!(expression.rhs, true);
|
service_test.go
|
// Copyright 2022 PayPal Inc.
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT License was not distributed with this file,
// you can obtain one at https://mit-license.org/.
//go:build !integration
// +build !integration
package service
import (
"fmt"
"os"
"testing"
"time"
"github.com/honeydipper/honeydipper/internal/config"
"github.com/honeydipper/honeydipper/internal/daemon"
"github.com/honeydipper/honeydipper/internal/driver"
"github.com/honeydipper/honeydipper/pkg/dipper"
"github.com/stretchr/testify/assert"
)
func TestServiceLoopCatchError(t *testing.T) {
if dipper.Logger == nil {
f, _ := os.OpenFile(os.DevNull, os.O_APPEND, 0o777)
defer f.Close()
dipper.GetLogger("test service", "DEBUG", f, f)
}
svc := &Service{
name: "testsvc",
driverRuntimes: map[string]*driver.Runtime{
"d1": {
State: driver.DriverAlive,
Handler: driver.NewDriver(map[string]interface{}{
"name": "testdriver1",
"type": "builtin",
"handlerData": map[string]interface{}{
"shortName": "testdriver1",
},
}),
},
},
responders: map[string][]MessageResponder{
"test:error1": {
func(d *driver.Runtime, m *dipper.Message) {
panic(fmt.Errorf("error in responder"))
},
},
},
transformers: map[string][]func(*driver.Runtime, *dipper.Message) *dipper.Message{
"test:error2": {
func(d *driver.Runtime, m *dipper.Message) *dipper.Message {
panic(fmt.Errorf("error in transformer"))
},
},
},
Route: func(m *dipper.Message) []RoutedMessage {
if m.Channel == "test" && m.Subject == "error0" {
panic(fmt.Errorf("error in route"))
}
return nil
},
}
svc.driverRuntimes["d1"].Stream = make(chan dipper.Message, 1)
svc.driverRuntimes["d1"].Output, _ = os.OpenFile(os.DevNull, os.O_APPEND, 0o777)
go func() {
assert.NotPanics(t, svc.serviceLoop, "service loop should recover panic in route")
}()
// injecting error in route
svc.driverRuntimes["d1"].Stream <- dipper.Message{
Channel: "test",
Subject: "error0",
}
time.Sleep(30 * time.Millisecond)
// quiting faster by send an extra message
svc.driverRuntimes["d1"].Stream <- dipper.Message{
Channel: "test",
Subject: "noerror",
}
daemon.ShutDown()
daemon.ShuttingDown = false
svc.driverRuntimes["d1"].Stream = make(chan dipper.Message, 1)
svc.driverRuntimes["d1"].Output, _ = os.OpenFile(os.DevNull, os.O_APPEND, 0o777)
go func() {
assert.NotPanics(t, svc.serviceLoop, "service loop should recover panic in responder")
}()
// injecting error in responder
svc.driverRuntimes["d1"].Stream <- dipper.Message{
Channel: "test",
Subject: "error1",
}
time.Sleep(30 * time.Millisecond)
// quiting faster by send an extra message
svc.driverRuntimes["d1"].Stream <- dipper.Message{
Channel: "test",
Subject: "noerror",
}
daemon.ShutDown()
daemon.ShuttingDown = false
svc.driverRuntimes["d1"].Stream = make(chan dipper.Message, 1)
svc.driverRuntimes["d1"].Output, _ = os.OpenFile(os.DevNull, os.O_APPEND, 0o777)
go func() {
assert.NotPanics(t, svc.serviceLoop, "service loop should recover panic in transformer")
}()
// injecting error in transformer
svc.driverRuntimes["d1"].Stream <- dipper.Message{
Channel: "test",
Subject: "error2",
}
time.Sleep(30 * time.Millisecond)
// quiting faster by send an extra message
svc.driverRuntimes["d1"].Stream <- dipper.Message{
Channel: "test",
Subject: "noerror",
}
daemon.ShutDown()
daemon.ShuttingDown = false
svc.driverRuntimes["d1"].Stream = make(chan dipper.Message, 1)
svc.driverRuntimes["d1"].Output, _ = os.OpenFile(os.DevNull, os.O_APPEND, 0o777)
// injecting error in process
svc.driverRuntimes["d1"].Handler = nil
go func() {
assert.NotPanics(t, svc.serviceLoop, "service loop should recover panic in process itself")
}()
svc.driverRuntimes["d1"].Stream <- dipper.Message{
Channel: "test",
Subject: "error3",
}
// recover the service object to avoid crash during quiting
svc.driverRuntimes["d1"].Handler = driver.NewDriver(map[string]interface{}{
"name": "testdriver1",
"type": "builtin",
"handlerData": map[string]interface{}{
"shortName": "testdriver1",
},
})
time.Sleep(30 * time.Millisecond)
// quiting faster by send an extra message
svc.driverRuntimes["d1"].Stream <- dipper.Message{
Channel: "test",
Subject: "noerror",
}
daemon.ShutDown()
daemon.ShuttingDown = false
}
func
|
(t *testing.T) {
if dipper.Logger == nil {
f, _ := os.OpenFile(os.DevNull, os.O_APPEND, 0o777)
defer f.Close()
dipper.GetLogger("test service", "DEBUG", f, f)
}
svc := &Service{
name: "testsvc",
driverRuntimes: map[string]*driver.Runtime{
"driver:d1": {
State: driver.DriverAlive,
Handler: driver.NewDriver(map[string]interface{}{
"name": "d1",
"type": "builtin",
"handlerData": map[string]interface{}{
"shortName": "testdriver1",
},
}),
},
"emitter": {
State: driver.DriverAlive,
Handler: driver.NewDriver(map[string]interface{}{
"name": "test-emitter",
"type": "builtin",
"handlerData": map[string]interface{}{
"shortName": "testdriver1",
},
}),
Feature: "emitter",
},
},
Route: func(m *dipper.Message) []RoutedMessage {
return nil
},
}
daemon.Emitters["testsvc"] = svc
daemon.ShuttingDown = false
svc.driverRuntimes["driver:d1"].Stream = make(chan dipper.Message, 1)
svc.driverRuntimes["driver:d1"].Output, _ = os.OpenFile(os.DevNull, os.O_APPEND, 0o777)
svc.driverRuntimes["emitter"].Stream = make(chan dipper.Message, 1)
svc.driverRuntimes["emitter"].Output, _ = os.OpenFile(os.DevNull, os.O_APPEND|os.O_WRONLY, 0o777)
go func() {
assert.NotPanics(t, svc.serviceLoop, "service loop should recover panic if emitter is removed")
}()
go func() {
daemon.Children.Add(1)
defer daemon.Children.Done()
assert.NotPanics(t, func() {
for i := 0; i < 50; i++ {
select {
case svc.driverRuntimes["driver:d1"].Stream <- dipper.Message{
Channel: "test",
Subject: "noerror",
}:
dipper.Logger.Infof("written msg no. %+v", i)
time.Sleep(10 * time.Millisecond)
default:
dipper.Logger.Infof("unable to write, server shutdown")
}
}
}, "sending message to service should not panic when emitter is removed")
}()
newCfg := &config.Config{
Services: []string{"testsvc"},
Staged: &config.DataSet{
Drivers: map[string]interface{}{
"daemon": map[string]interface{}{
"features": map[string]interface{}{
"global": []interface{}{
map[string]interface{}{
"name": "driver:d1",
},
},
},
"drivers": map[string]interface{}{
"d1": map[string]interface{}{
"name": "d1",
"type": "builtin",
},
},
},
},
},
}
svc.config = newCfg
svc.config.ResetStage()
time.Sleep(100 * time.Millisecond)
assert.NotPanics(t, svc.Reload, "service reload should not panic when emitter is removed")
time.Sleep(100 * time.Millisecond)
daemon.ShutDown()
}
func TestServiceEmitterCrashing(t *testing.T) {
if dipper.Logger == nil {
f, _ := os.OpenFile(os.DevNull, os.O_APPEND, 0o777)
defer f.Close()
dipper.GetLogger("test service", "DEBUG", f, f)
}
svc := &Service{
name: "testsvc",
driverRuntimes: map[string]*driver.Runtime{
"driver:d1": {
State: driver.DriverAlive,
Handler: driver.NewDriver(map[string]interface{}{
"name": "d1",
"type": "builtin",
"handlerData": map[string]interface{}{
"shortName": "testdriver1",
},
}),
},
"emitter": {
State: driver.DriverAlive,
Handler: driver.NewDriver(map[string]interface{}{
"name": "test-emitter",
"type": "builtin",
"handlerData": map[string]interface{}{
"shortName": "testdriver1",
},
}),
Feature: "emitter",
},
},
Route: func(m *dipper.Message) []RoutedMessage {
return nil
},
}
daemon.Emitters["testsvc"] = svc
daemon.ShuttingDown = false
svc.driverRuntimes["driver:d1"].Stream = make(chan dipper.Message, 1)
svc.driverRuntimes["driver:d1"].Output, _ = os.OpenFile(os.DevNull, os.O_APPEND, 0o777)
svc.driverRuntimes["emitter"].Stream = make(chan dipper.Message, 1)
svc.driverRuntimes["emitter"].Output, _ = os.OpenFile(os.DevNull, os.O_APPEND|os.O_WRONLY, 0o777)
go func() {
assert.NotPanics(t, svc.serviceLoop, "service loop should recover panic if emitter crashes")
}()
go func() {
daemon.Children.Add(1)
defer daemon.Children.Done()
assert.NotPanics(t, func() {
for i := 0; i < 50; i++ {
select {
case svc.driverRuntimes["driver:d1"].Stream <- dipper.Message{
Channel: "test",
Subject: "noerror",
}:
dipper.Logger.Infof("written msg no. %+v", i)
time.Sleep(10 * time.Millisecond)
default:
dipper.Logger.Infof("unable to write, server shutdown")
}
}
}, "sending message to service should not panic when emitter crashes")
}()
time.Sleep(100 * time.Millisecond)
// mark it as failed to avoid restarting the driver
svc.driverRuntimes["emitter"].State = driver.DriverFailed
// crash emitter
svc.driverRuntimes["emitter"].Output.Close()
close(svc.driverRuntimes["emitter"].Stream)
time.Sleep(100 * time.Millisecond)
daemon.ShutDown()
}
func TestServiceReplaceEmitter(t *testing.T) {
if dipper.Logger == nil {
f, _ := os.OpenFile(os.DevNull, os.O_APPEND, 0o777)
defer f.Close()
dipper.GetLogger("test service", "DEBUG", f, f)
}
svc := &Service{
name: "testsvc",
driverRuntimes: map[string]*driver.Runtime{
"driver:d1": {
State: driver.DriverAlive,
Handler: driver.NewDriver(map[string]interface{}{
"name": "d1",
"type": "builtin",
"handlerData": map[string]interface{}{
"shortName": "testdriver1",
},
}),
},
"emitter": {
State: driver.DriverAlive,
Handler: driver.NewDriver(map[string]interface{}{
"name": "test-emitter",
"type": "builtin",
"handlerData": map[string]interface{}{
"shortName": "testdriver1",
},
}),
Feature: "emitter",
},
},
Route: func(m *dipper.Message) []RoutedMessage {
return nil
},
}
daemon.Emitters["testsvc"] = svc
daemon.ShuttingDown = false
svc.driverRuntimes["driver:d1"].Stream = make(chan dipper.Message, 1)
svc.driverRuntimes["driver:d1"].Output, _ = os.OpenFile(os.DevNull, os.O_APPEND, 0o777)
svc.driverRuntimes["emitter"].Stream = make(chan dipper.Message, 1)
svc.driverRuntimes["emitter"].Output, _ = os.OpenFile(os.DevNull, os.O_APPEND|os.O_WRONLY, 0o777)
go func() {
assert.NotPanics(t, svc.serviceLoop, "service loop should recover panic if emitter is changed")
}()
go func() {
daemon.Children.Add(1)
defer daemon.Children.Done()
assert.NotPanics(t, func() {
for i := 0; i < 50; i++ {
select {
case svc.driverRuntimes["driver:d1"].Stream <- dipper.Message{
Channel: "test",
Subject: "noerror",
}:
dipper.Logger.Infof("written msg no. %+v", i)
time.Sleep(10 * time.Millisecond)
default:
dipper.Logger.Infof("unable to write, server shutdown")
}
}
}, "sending message to service should not panic when emitter is changed")
}()
newCfg := &config.Config{
Services: []string{"testsvc"},
Staged: &config.DataSet{
Drivers: map[string]interface{}{
"daemon": map[string]interface{}{
"featureMap": map[string]interface{}{
"emitter": "emitter2",
},
"features": map[string]interface{}{
"global": []interface{}{
map[string]interface{}{
"name": "driver:d1",
},
map[string]interface{}{
"name": "emitter",
},
},
},
"drivers": map[string]interface{}{
"d1": map[string]interface{}{
"name": "d1",
},
"emitter2": map[string]interface{}{
"name": "emitter2",
},
},
},
},
},
}
svc.config = newCfg
svc.config.ResetStage()
time.Sleep(100 * time.Millisecond)
assert.NotPanics(t, svc.Reload, "service reload should not panic when emitter is changed")
time.Sleep(100 * time.Millisecond)
daemon.ShutDown()
}
|
TestServiceRemoveEmitter
|
ticker.ts
|
export interface ExchangeCurrency {
currencyPair: string;
exchangeName: string;
|
delimiter: string;
first_currency: string;
second_currency: string;
}
export class TickerUpdate {
Pair: CurrencyPair;
CurrencyPair: string;
Last: number;
High: number;
Low: number;
Bid: number;
Ask: number;
Volume: number;
PriceATH: number;
Exchange: string;
}
|
}
export interface CurrencyPair {
|
tests.rs
|
use super::*;
use crate::{Error, mock::*};
use frame_support::{assert_ok, assert_noop};
#[test]
fn it_works_for_default_value() {
new_test_ext().execute_with(|| {
// Dispatch a signed extrinsic.
assert_ok!(ConnectFour::do_something(Origin::signed(1), 42));
// Read pallet storage and assert an expected result.
assert_eq!(ConnectFour::something(), Some(42));
});
}
#[test]
fn correct_error_for_none_value() {
new_test_ext().execute_with(|| {
// Ensure the expected error is thrown when no value is present.
assert_noop!(
ConnectFour::cause_error(Origin::signed(1)),
Error::<Test>::NoneValue
);
});
}
#[test]
fn test_game_creation() {
new_test_ext().execute_with(|| {
// Test player can not play against himself
assert_noop!(
ConnectFour::new_game(Origin::signed(1), 1),
Error::<Test>::NoFakePlay
);
// Test game creation between to different players
assert_ok!(ConnectFour::new_game(Origin::signed(1), 2));
run_to_block(1);
let board_id_1 = ConnectFour::player_board(1);
let board_id_2 = ConnectFour::player_board(2);
assert_eq!(board_id_1, board_id_2);
assert_noop!(
ConnectFour::new_game(Origin::signed(1), 3),
Error::<Test>::PlayerBoardExists
);
assert_noop!(
ConnectFour::new_game(Origin::signed(3), 2),
Error::<Test>::PlayerBoardExists
);
let board = ConnectFour::boards(board_id_1);
assert_eq!(board.last_turn, 0);
});
}
#[test]
fn test_game_play() {
new_test_ext().execute_with(|| {
let mut current_block:u64 = 100;
// start from block 100
run_to_block(current_block);
// Test game creation between to different players
assert_ok!(ConnectFour::new_game(Origin::signed(PLAYER_1 as u64), PLAYER_2 as u64));
let board_id = ConnectFour::player_board(PLAYER_1 as u64);
let board = ConnectFour::boards(board_id);
assert_eq!(board.last_turn, current_block);
run_next_block();
current_block = current_block + 1;
assert_eq!(System::block_number(), current_block);
if board.next_player == PLAYER_1 {
assert_ok!(ConnectFour::play_turn(Origin::signed(PLAYER_1 as u64), 0));
let board = ConnectFour::boards(board_id);
assert!(board.board_state == BoardState::Running);
assert!(board.next_player == PLAYER_2);
assert_eq!(board.last_turn, current_block);
run_next_block();
current_block = current_block + 1;
}
assert_ok!(ConnectFour::play_turn(Origin::signed(PLAYER_2 as u64), 1));
let board = ConnectFour::boards(board_id);
assert_eq!(board.last_turn, current_block);
assert!(board.board_state == BoardState::Running);
assert!(board.next_player == PLAYER_1);
run_next_block();
current_block = current_block + 1;
assert_ok!(ConnectFour::play_turn(Origin::signed(PLAYER_1 as u64), 2));
let board = ConnectFour::boards(board_id);
assert!(board.board_state == BoardState::Running);
run_next_block();
current_block = current_block + 1;
assert_ok!(ConnectFour::play_turn(Origin::signed(PLAYER_2 as u64), 1));
let board = ConnectFour::boards(board_id);
assert!(board.board_state == BoardState::Running);
run_next_block();
current_block = current_block + 1;
assert_ok!(ConnectFour::play_turn(Origin::signed(PLAYER_1 as u64), 3));
let board = ConnectFour::boards(board_id);
assert!(board.board_state == BoardState::Running);
run_next_block();
current_block = current_block + 1;
assert_ok!(ConnectFour::play_turn(Origin::signed(PLAYER_2 as u64), 1));
let board = ConnectFour::boards(board_id);
assert!(board.board_state == BoardState::Running);
run_next_block();
current_block = current_block + 1;
assert_ok!(ConnectFour::play_turn(Origin::signed(PLAYER_1 as u64), 4));
|
current_block = current_block + 1;
assert_ok!(ConnectFour::play_turn(Origin::signed(PLAYER_2 as u64), 1));
let board = ConnectFour::boards(board_id);
assert!(board.board_state == BoardState::Finished(board.blue));
assert_eq!(board.last_turn, current_block);
});
}
#[test]
fn test_game_events() {
new_test_ext().execute_with(|| {
let blocks_to_pass = 10;
let mut current_block:u64 = 100;
// start from block 100
run_to_block(current_block);
assert_eq!(None, ConnectFour::something());
// Test game creation between to different players
assert_ok!(ConnectFour::test_schedule(Origin::signed(PLAYER_1 as u64), blocks_to_pass));
run_next_block();
current_block = current_block + 1;
assert_eq!(None, ConnectFour::something());
run_to_block(current_block + blocks_to_pass);
assert_eq!(77, ConnectFour::something().unwrap());
});
}
#[test]
fn test_force_turn() {
new_test_ext().execute_with(|| {
let mut current_block:u64 = 100;
// start from block 100
run_to_block(current_block);
// Test game creation between to different players
assert_ok!(ConnectFour::new_game(Origin::signed(PLAYER_1 as u64), PLAYER_2 as u64));
let board_id = ConnectFour::player_board(PLAYER_1 as u64);
let board = ConnectFour::boards(board_id);
assert_eq!(board.last_turn, current_block);
run_next_block();
current_block = current_block + 1;
assert_eq!(System::block_number(), current_block);
if board.next_player == PLAYER_1 {
assert_ok!(ConnectFour::play_turn(Origin::signed(PLAYER_1 as u64), 0));
let board = ConnectFour::boards(board_id);
assert!(board.board_state == BoardState::Running);
assert!(board.next_player == PLAYER_2);
assert_eq!(board.last_turn, current_block);
run_next_block();
current_block = current_block + 1;
}
assert_ok!(ConnectFour::play_turn(Origin::signed(PLAYER_2 as u64), 1));
let board = ConnectFour::boards(board_id);
assert_eq!(board.last_turn, current_block);
assert!(board.board_state == BoardState::Running);
assert!(board.next_player == PLAYER_1);
run_to_block(current_block + 10);
current_block = current_block + 10;
// check if force turn ended the game
let board = ConnectFour::boards(board_id);
assert_eq!(board.last_turn, current_block);
assert!(board.board_state == BoardState::Finished(board.blue));
assert!(Boards::<Test>::contains_key(board_id));
assert!(PlayerBoard::<Test>::contains_key(board.red));
assert!(PlayerBoard::<Test>::contains_key(board.blue));
assert!(BoardSchedules::<Test>::contains_key(board_id));
run_to_block(current_block + 20);
current_block = current_block + 20;
// check if boards are cleaned up
assert!(!Boards::<Test>::contains_key(board_id));
assert!(!PlayerBoard::<Test>::contains_key(board.red));
assert!(!PlayerBoard::<Test>::contains_key(board.blue));
assert!(!BoardSchedules::<Test>::contains_key(board_id));
});
}
|
let board = ConnectFour::boards(board_id);
assert!(board.board_state == BoardState::Running);
run_next_block();
|
mintkey.go
|
package mintkey
import (
"encoding/hex"
"fmt"
"github.com/tendermint/crypto/bcrypt"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/armor"
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
"github.com/tendermint/tendermint/crypto/xsalsa20symmetric"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/DFWallet/anatha/crypto/keys/keyerror"
)
const (
blockTypePrivKey = "TENDERMINT PRIVATE KEY"
blockTypeKeyInfo = "TENDERMINT KEY INFO"
blockTypePubKey = "TENDERMINT PUBLIC KEY"
defaultAlgo = "secp256k1"
headerVersion = "version"
headerType = "type"
)
// BcryptSecurityParameter is a var so it can be changed within the lcd test
// Making the bcrypt security parameter a var shouldn't be a security issue:
// One can't verify an invalid key by maliciously changing the bcrypt
// parameter during a runtime vulnerability. The main security
// threat this then exposes would be something that changes this during
// runtime before the user creates their key. This vulnerability must
// succeed to update this to that same value before every subsequent call
// to the keys command in future startups / or the attacker must get access
// to the filesystem. However, with a similar threat model (changing
// variables in runtime), one can cause the user to sign a different tx
// than what they see, which is a significantly cheaper attack then breaking
// a bcrypt hash. (Recall that the nonce still exists to break rainbow tables)
// For further notes on security parameter choice, see README.md
var BcryptSecurityParameter = 12
//-----------------------------------------------------------------
// add armor
// Armor the InfoBytes
func ArmorInfoBytes(bz []byte) string {
header := map[string]string{
headerType: "Info",
headerVersion: "0.0.0",
}
return armor.EncodeArmor(blockTypeKeyInfo, header, bz)
}
// Armor the PubKeyBytes
func ArmorPubKeyBytes(bz []byte, algo string) string {
header := map[string]string{
headerVersion: "0.0.1",
}
if algo != "" {
header[headerType] = algo
}
return armor.EncodeArmor(blockTypePubKey, header, bz)
}
//-----------------------------------------------------------------
// remove armor
// Unarmor the InfoBytes
func UnarmorInfoBytes(armorStr string) ([]byte, error) {
bz, header, err := unarmorBytes(armorStr, blockTypeKeyInfo)
if err != nil {
return nil, err
}
if header[headerVersion] != "0.0.0" {
return nil, fmt.Errorf("unrecognized version: %v", header[headerVersion])
}
return bz, nil
}
// UnarmorPubKeyBytes returns the pubkey byte slice, a string of the algo type, and an error
func UnarmorPubKeyBytes(armorStr string) (bz []byte, algo string, err error) {
bz, header, err := unarmorBytes(armorStr, blockTypePubKey)
if err != nil {
return nil, "", fmt.Errorf("couldn't unarmor bytes: %v", err)
}
switch header[headerVersion] {
case "0.0.0":
return bz, defaultAlgo, err
case "0.0.1":
if header[headerType] == "" {
header[headerType] = defaultAlgo
}
return bz, header[headerType], err
case "":
return nil, "", fmt.Errorf("header's version field is empty")
default:
err = fmt.Errorf("unrecognized version: %v", header[headerVersion])
return nil, "", err
}
}
func unarmorBytes(armorStr, blockType string) (bz []byte, header map[string]string, err error) {
bType, header, bz, err := armor.DecodeArmor(armorStr)
if err != nil {
return
}
if bType != blockType {
err = fmt.Errorf("unrecognized armor type %q, expected: %q", bType, blockType)
return
}
return
}
//-----------------------------------------------------------------
// encrypt/decrypt with armor
// Encrypt and armor the private key.
func
|
(privKey crypto.PrivKey, passphrase string, algo string) string {
saltBytes, encBytes := encryptPrivKey(privKey, passphrase)
header := map[string]string{
"kdf": "bcrypt",
"salt": fmt.Sprintf("%X", saltBytes),
}
if algo != "" {
header[headerType] = algo
}
armorStr := armor.EncodeArmor(blockTypePrivKey, header, encBytes)
return armorStr
}
// encrypt the given privKey with the passphrase using a randomly
// generated salt and the xsalsa20 cipher. returns the salt and the
// encrypted priv key.
func encryptPrivKey(privKey crypto.PrivKey, passphrase string) (saltBytes []byte, encBytes []byte) {
saltBytes = crypto.CRandBytes(16)
key, err := bcrypt.GenerateFromPassword(saltBytes, []byte(passphrase), BcryptSecurityParameter)
if err != nil {
tmos.Exit("Error generating bcrypt key from passphrase: " + err.Error())
}
key = crypto.Sha256(key) // get 32 bytes
privKeyBytes := privKey.Bytes()
return saltBytes, xsalsa20symmetric.EncryptSymmetric(privKeyBytes, key)
}
// UnarmorDecryptPrivKey returns the privkey byte slice, a string of the algo type, and an error
func UnarmorDecryptPrivKey(armorStr string, passphrase string) (privKey crypto.PrivKey, algo string, err error) {
blockType, header, encBytes, err := armor.DecodeArmor(armorStr)
if err != nil {
return privKey, "", err
}
if blockType != blockTypePrivKey {
return privKey, "", fmt.Errorf("unrecognized armor type: %v", blockType)
}
if header["kdf"] != "bcrypt" {
return privKey, "", fmt.Errorf("unrecognized KDF type: %v", header["KDF"])
}
if header["salt"] == "" {
return privKey, "", fmt.Errorf("missing salt bytes")
}
saltBytes, err := hex.DecodeString(header["salt"])
if err != nil {
return privKey, "", fmt.Errorf("error decoding salt: %v", err.Error())
}
privKey, err = decryptPrivKey(saltBytes, encBytes, passphrase)
if header[headerType] == "" {
header[headerType] = defaultAlgo
}
return privKey, header[headerType], err
}
func decryptPrivKey(saltBytes []byte, encBytes []byte, passphrase string) (privKey crypto.PrivKey, err error) {
key, err := bcrypt.GenerateFromPassword(saltBytes, []byte(passphrase), BcryptSecurityParameter)
if err != nil {
tmos.Exit("error generating bcrypt key from passphrase: " + err.Error())
}
key = crypto.Sha256(key) // Get 32 bytes
privKeyBytes, err := xsalsa20symmetric.DecryptSymmetric(encBytes, key)
if err != nil && err.Error() == "Ciphertext decryption failed" {
return privKey, keyerror.NewErrWrongPassword()
} else if err != nil {
return privKey, err
}
privKey, err = cryptoAmino.PrivKeyFromBytes(privKeyBytes)
return privKey, err
}
|
EncryptArmorPrivKey
|
string_functions.py
|
"""
Swaprs token1, for token2 in the sentence
token2 should belong in sentence for this to work
"""
def swap(token1, token2, sentence):
|
def remove(token, sentence):
return remove(token.idx, token.idx + len(token.text), sentence)
"""
Removes all characters
from start to end in sentence
"""
def remove(start, end, sentence):
append = sentence[end:(len(sentence))]
prepend = ''
if start > 0:
prepend = sentence[:(start)]
return prepend + append
|
index = token2.idx
length = len(token2.text)
if index == 0:
prepend = ''
else:
prepend = sentence[:(index)]
append = sentence[(index + length):]
return prepend + token1.text + append
|
root.go
|
// Copyright © 2017 Mesosphere Inc. <http://mesosphere.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"os"
"github.com/dcos/dcos-diagnostics/api"
"github.com/dcos/dcos-diagnostics/config"
"github.com/dcos/dcos-diagnostics/dcos"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var (
version bool
diag bool
cfgFile string
defaultConfig = &config.Config{}
)
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: "dcos-diagnostics",
Short: "DC/OS diagnostics service",
Long: `DC/OS diagnostics service provides health information about cluster.
dcos-diagnostics daemon start an http server and polls the components health.
`,
// Uncomment the following line if your bare application
// has an action associated with it:
Run: func(cmd *cobra.Command, args []string) {
if version {
fmt.Printf("Version: %s\n", config.Version)
os.Exit(0)
}
if diag {
os.Exit(runDiag())
}
cmd.Help()
},
}
// Execute adds all child commands to the root command sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
func i
|
) {
cobra.OnInitialize(initConfig)
RootCmd.PersistentFlags().BoolVar(&version, "version", false, "Print dcos-diagnostics version")
RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.dcos-diagnostics.yaml)")
RootCmd.PersistentFlags().BoolVar(&diag, "diag", false,
"Check DC/OS components health.")
RootCmd.PersistentFlags().BoolVar(&defaultConfig.FlagVerbose, "verbose", defaultConfig.FlagVerbose,
"Use verbose debug output.")
RootCmd.PersistentFlags().StringVar(&defaultConfig.FlagRole, "role", defaultConfig.FlagRole,
"Set node role")
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
viper.SetConfigName("dcos-diagnostics-config") // name of config file (without extension)
viper.AddConfigPath("/opt/mesosphere/etc/")
viper.AutomaticEnv()
if cfgFile != "" { // enable ability to specify config file via flag
viper.SetConfigFile(cfgFile)
}
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
if err := viper.Unmarshal(defaultConfig); err != nil {
logrus.WithError(err).Fatalf("Error loading config file")
}
}
}
func runDiag() int {
sdu := &api.SystemdUnits{}
units, err := sdu.GetUnits(&dcos.Tools{})
if err != nil {
logrus.Errorf("Error getting units properties: %s", err)
return 1
}
var fail bool
for _, unit := range units {
if unit.UnitHealth != 0 {
fmt.Printf("[%s]: %s %s\n", unit.UnitID, unit.UnitTitle, unit.UnitOutput)
fail = true
}
}
if fail {
logrus.Error("Found unhealthy systemd units")
return 1
}
return 0
}
|
nit(
|
lib.rs
|
#![allow(clippy::module_inception)]
#![allow(clippy::upper_case_acronyms)]
#![allow(clippy::large_enum_variant)]
#![allow(clippy::wrong_self_convention)]
#![allow(clippy::should_implement_trait)]
#![allow(clippy::blacklisted_name)]
#![allow(clippy::vec_init_then_push)]
#![allow(rustdoc::bare_urls)]
#![warn(missing_docs)]
//! <fullname>Amazon Cognito Federated Identities</fullname>
//! <p>Amazon Cognito Federated Identities is a web service that delivers scoped temporary
//! credentials to mobile devices and other untrusted environments. It uniquely identifies a
//! device and supplies the user with a consistent identity over the lifetime of an
//! application.</p>
//! <p>Using Amazon Cognito Federated Identities, you can enable authentication with one or
//! more third-party identity providers (Facebook, Google, or Login with Amazon) or an Amazon
//! Cognito user pool, and you can also choose to support unauthenticated access from your app.
//! Cognito delivers a unique identifier for each user and acts as an OpenID token provider
//! trusted by AWS Security Token Service (STS) to access temporary, limited-privilege AWS
//! credentials.</p>
//! <p>For a description of the authentication flow from the Amazon Cognito Developer Guide
//! see <a href="https://docs.aws.amazon.com/cognito/latest/developerguide/authentication-flow.html">Authentication Flow</a>.</p>
//! <p>For more information see <a href="https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html">Amazon Cognito Federated Identities</a>.</p>
//!
//! # Crate Organization
//!
//! The entry point for most customers will be [`Client`]. [`Client`] exposes one method for each API offered
//! by the service.
//!
//! Some APIs require complex or nested arguments. These exist in [`model`].
//!
//! Lastly, errors that can be returned by the service are contained within [`error`]. [`Error`] defines a meta
//! error encompassing all possible errors that can be returned by the service.
//!
//! The other modules within this crate and not required for normal usage.
//!
//! # Examples
//! Examples can be found [here](https://github.com/awslabs/aws-sdk-rust/tree/main/examples/cognitoidentity).
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
|
mod aws_endpoint;
/// Client and fluent builders for calling the service.
#[cfg(feature = "client")]
pub mod client;
/// Configuration for the service.
pub mod config;
/// Errors that can occur when calling the service.
pub mod error;
mod error_meta;
/// Input structures for operations.
pub mod input;
mod json_deser;
mod json_errors;
mod json_ser;
/// Data structures used by operation inputs/outputs.
pub mod model;
mod no_credentials;
/// All operations that this crate can perform.
pub mod operation;
mod operation_deser;
mod operation_ser;
/// Output structures for operations.
pub mod output;
/// Crate version number.
pub static PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
pub use aws_smithy_http::byte_stream::ByteStream;
pub use aws_smithy_http::result::SdkError;
pub use aws_smithy_types::Blob;
pub use aws_smithy_types::DateTime;
static API_METADATA: aws_http::user_agent::ApiMetadata =
aws_http::user_agent::ApiMetadata::new("cognitoidentity", PKG_VERSION);
pub use aws_smithy_http::endpoint::Endpoint;
pub use aws_smithy_types::retry::RetryConfig;
pub use aws_types::app_name::AppName;
pub use aws_types::region::Region;
pub use aws_types::Credentials;
#[cfg(feature = "client")]
pub use client::Client;
|
pub use error_meta::Error;
pub use config::Config;
|
requests.go
|
package recordsets
import (
"github.com/nexclipper/gophercloud"
"github.com/nexclipper/gophercloud/pagination"
)
// ListOptsBuilder allows extensions to add additional parameters to the
// List request.
type ListOptsBuilder interface {
ToRecordSetListQuery() (string, error)
}
// ListOpts allows the filtering and sorting of paginated collections through
// the API. Filtering is achieved by passing in struct field values that map to
// the server attributes you want to see returned. Marker and Limit are used
// for pagination.
// https://developer.openstack.org/api-ref/dns/
type ListOpts struct {
// Integer value for the limit of values to return.
Limit int `q:"limit"`
// UUID of the recordset at which you want to set a marker.
Marker string `q:"marker"`
Data string `q:"data"`
Description string `q:"description"`
Name string `q:"name"`
SortDir string `q:"sort_dir"`
SortKey string `q:"sort_key"`
Status string `q:"status"`
TTL int `q:"ttl"`
Type string `q:"type"`
ZoneID string `q:"zone_id"`
}
// ToRecordSetListQuery formats a ListOpts into a query string.
func (opts ListOpts) ToRecordSetListQuery() (string, error) {
q, err := gophercloud.BuildQueryString(opts)
return q.String(), err
}
// ListByZone implements the recordset list request.
func
|
(client *gophercloud.ServiceClient, zoneID string, opts ListOptsBuilder) pagination.Pager {
url := baseURL(client, zoneID)
if opts != nil {
query, err := opts.ToRecordSetListQuery()
if err != nil {
return pagination.Pager{Err: err}
}
url += query
}
return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page {
return RecordSetPage{pagination.LinkedPageBase{PageResult: r}}
})
}
// Get implements the recordset Get request.
func Get(client *gophercloud.ServiceClient, zoneID string, rrsetID string) (r GetResult) {
resp, err := client.Get(rrsetURL(client, zoneID, rrsetID), &r.Body, nil)
_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)
return
}
// CreateOptsBuilder allows extensions to add additional attributes to the
// Create request.
type CreateOptsBuilder interface {
ToRecordSetCreateMap() (map[string]interface{}, error)
}
// CreateOpts specifies the base attributes that may be used to create a
// RecordSet.
type CreateOpts struct {
// Name is the name of the RecordSet.
Name string `json:"name" required:"true"`
// Description is a description of the RecordSet.
Description string `json:"description,omitempty"`
// Records are the DNS records of the RecordSet.
Records []string `json:"records,omitempty"`
// TTL is the time to live of the RecordSet.
TTL int `json:"ttl,omitempty"`
// Type is the RRTYPE of the RecordSet.
Type string `json:"type,omitempty"`
}
// ToRecordSetCreateMap formats an CreateOpts structure into a request body.
func (opts CreateOpts) ToRecordSetCreateMap() (map[string]interface{}, error) {
b, err := gophercloud.BuildRequestBody(opts, "")
if err != nil {
return nil, err
}
return b, nil
}
// Create creates a recordset in a given zone.
func Create(client *gophercloud.ServiceClient, zoneID string, opts CreateOptsBuilder) (r CreateResult) {
b, err := opts.ToRecordSetCreateMap()
if err != nil {
r.Err = err
return
}
resp, err := client.Post(baseURL(client, zoneID), &b, &r.Body, &gophercloud.RequestOpts{
OkCodes: []int{201, 202},
})
_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)
return
}
// UpdateOptsBuilder allows extensions to add additional attributes to the
// Update request.
type UpdateOptsBuilder interface {
ToRecordSetUpdateMap() (map[string]interface{}, error)
}
// UpdateOpts specifies the base attributes that may be updated on an existing
// RecordSet.
type UpdateOpts struct {
// Description is a description of the RecordSet.
Description *string `json:"description,omitempty"`
// TTL is the time to live of the RecordSet.
TTL *int `json:"ttl,omitempty"`
// Records are the DNS records of the RecordSet.
Records []string `json:"records,omitempty"`
}
// ToRecordSetUpdateMap formats an UpdateOpts structure into a request body.
func (opts UpdateOpts) ToRecordSetUpdateMap() (map[string]interface{}, error) {
b, err := gophercloud.BuildRequestBody(opts, "")
if err != nil {
return nil, err
}
// If opts.TTL was actually set, use 0 as a special value to send "null",
// even though the result from the API is 0.
//
// Otherwise, don't send the TTL field.
if opts.TTL != nil {
ttl := *(opts.TTL)
if ttl > 0 {
b["ttl"] = ttl
} else {
b["ttl"] = nil
}
}
return b, nil
}
// Update updates a recordset in a given zone
func Update(client *gophercloud.ServiceClient, zoneID string, rrsetID string, opts UpdateOptsBuilder) (r UpdateResult) {
b, err := opts.ToRecordSetUpdateMap()
if err != nil {
r.Err = err
return
}
resp, err := client.Put(rrsetURL(client, zoneID, rrsetID), &b, &r.Body, &gophercloud.RequestOpts{
OkCodes: []int{200, 202},
})
_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)
return
}
// Delete removes an existing RecordSet.
func Delete(client *gophercloud.ServiceClient, zoneID string, rrsetID string) (r DeleteResult) {
resp, err := client.Delete(rrsetURL(client, zoneID, rrsetID), &gophercloud.RequestOpts{
OkCodes: []int{202},
})
_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)
return
}
|
ListByZone
|
releaser_hooks.py
|
import os
import subprocess
try:
import polib
except ImportError:
print('Msg to the package releaser: prerelease hooks will not work as you have not installed polib.')
raise
import copy
import codecs
def prereleaser_before(data):
"""
1. Run the unit tests one last time before we make a release.
2. Update the CONTRIBUTORS.txt file.
Note: Install * polib (https://pypi.python.org/pypi/polib).
* pep8.
"""
print('Running unit tests.')
subprocess.check_output(["python", "example_project/manage.py", "test", "photologue"])
print('Running flake8 check.')
# See setup.cfg for configuration options.
subprocess.check_output(["flake8"])
print('Checking that we have no outstanding DB migrations.')
output = subprocess.check_output(["python", "example_project/manage.py", "makemigrations", "--dry-run",
"photologue"])
if not output == b"No changes detected in app 'photologue'\n":
raise Exception('There are outstanding migrations for Photologue.')
print('Updating CONTRIBUTORS.txt')
# This command will get the author of every commit.
output = subprocess.check_output(["git", "log", "--format='%aN'"])
|
# Convert to a list.
contributors_list = [contributor.strip("'") for contributor in output.decode('utf-8').split('\n')]
# Now add info from the translator files. This is incomplete, we can only list
# the 'last contributor' to each translation.
for language in os.listdir('photologue/locale/'):
filename = 'photologue/locale/{}/LC_MESSAGES/django.po'.format(language)
po = polib.pofile(filename)
last_translator = po.metadata['Last-Translator']
contributors_list.append(last_translator[:last_translator.find('<') - 1])
# Now we want to only show each contributor once, and to list them by how many
# contributions they have made - a rough guide to the effort they have put in.
contributors_dict = {}
for author in contributors_list:
author_copy = copy.copy(author)
if author_copy in ('', '(no author)', 'FULL NAME'):
# Skip bad data.
continue
# The creator of this project should always appear first in the list - so
# don't add him to this list, but hard-code his name.
if author_copy in ('Justin Driscoll', 'justin.driscoll'):
continue
# Handle contributors who appear under multiple names.
if author_copy == 'richardbarran':
author_copy = 'Richard Barran'
if author_copy in contributors_dict:
contributors_dict[author_copy] += 1
else:
contributors_dict[author_copy] = 1
with codecs.open('CONTRIBUTORS.txt', 'w', encoding='utf8') as f:
f.write('Photologue is made possible by all the people who have contributed'
' to it. A non-exhaustive list follows:\n\n')
f.write('Justin Driscoll\n')
for i in sorted(contributors_dict, key=contributors_dict.get, reverse=True):
f.write(i + '\n')
# And commit the new contributors file.
if subprocess.check_output(["git", "diff", "CONTRIBUTORS.txt"]):
subprocess.check_output(["git", "commit", "-m", "Updated the list of contributors.", "CONTRIBUTORS.txt"])
| |
text_dataset_format_details.py
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .dataset_format_details import DatasetFormatDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class TextDatasetFormatDetails(DatasetFormatDetails):
"""
Indicates the dataset is comprised of txt files.
"""
def __init__(self, **kwargs):
"""
Initializes a new TextDatasetFormatDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.data_labeling_service_dataplane.models.TextDatasetFormatDetails.format_type` attribute
of this class is ``TEXT`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param format_type:
The value to assign to the format_type property of this TextDatasetFormatDetails.
Allowed values for this property are: "DOCUMENT", "IMAGE", "TEXT"
:type format_type: str
"""
self.swagger_types = {
'format_type': 'str'
}
self.attribute_map = {
'format_type': 'formatType'
}
|
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
self._format_type = None
self._format_type = 'TEXT'
|
avx.rs
|
// the whole file taken from pikkr
use x86intrin::{m256i, mm256_setr_epi8};
#[inline]
pub fn mm256i(i: i8) -> m256i {
mm256_setr_epi8(
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
)
}
/// Create a value of `m256i` from the slice of bytes.
///
/// # Safety
/// This function guarantees the safety only if the length of `s` is greater than or equal to `i + 32`.
#[inline(always)]
pub unsafe fn u8_to_m256i(s: &[u8], i: usize) -> m256i {
mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
*s.as_ptr().offset(i as isize + 19) as i8,
*s.as_ptr().offset(i as isize + 20) as i8,
*s.as_ptr().offset(i as isize + 21) as i8,
*s.as_ptr().offset(i as isize + 22) as i8,
*s.as_ptr().offset(i as isize + 23) as i8,
*s.as_ptr().offset(i as isize + 24) as i8,
*s.as_ptr().offset(i as isize + 25) as i8,
*s.as_ptr().offset(i as isize + 26) as i8,
*s.as_ptr().offset(i as isize + 27) as i8,
*s.as_ptr().offset(i as isize + 28) as i8,
*s.as_ptr().offset(i as isize + 29) as i8,
*s.as_ptr().offset(i as isize + 30) as i8,
*s.as_ptr().offset(i as isize + 31) as i8,
)
}
#[inline]
pub unsafe fn u8_to_m256i_rest(s: &[u8], i: usize) -> m256i {
match s.len() - i {
31 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
*s.as_ptr().offset(i as isize + 19) as i8,
*s.as_ptr().offset(i as isize + 20) as i8,
*s.as_ptr().offset(i as isize + 21) as i8,
*s.as_ptr().offset(i as isize + 22) as i8,
*s.as_ptr().offset(i as isize + 23) as i8,
*s.as_ptr().offset(i as isize + 24) as i8,
*s.as_ptr().offset(i as isize + 25) as i8,
*s.as_ptr().offset(i as isize + 26) as i8,
*s.as_ptr().offset(i as isize + 27) as i8,
*s.as_ptr().offset(i as isize + 28) as i8,
*s.as_ptr().offset(i as isize + 29) as i8,
*s.as_ptr().offset(i as isize + 30) as i8,
0,
),
30 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
*s.as_ptr().offset(i as isize + 19) as i8,
*s.as_ptr().offset(i as isize + 20) as i8,
*s.as_ptr().offset(i as isize + 21) as i8,
*s.as_ptr().offset(i as isize + 22) as i8,
*s.as_ptr().offset(i as isize + 23) as i8,
*s.as_ptr().offset(i as isize + 24) as i8,
*s.as_ptr().offset(i as isize + 25) as i8,
*s.as_ptr().offset(i as isize + 26) as i8,
*s.as_ptr().offset(i as isize + 27) as i8,
*s.as_ptr().offset(i as isize + 28) as i8,
*s.as_ptr().offset(i as isize + 29) as i8,
0,
0,
),
29 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
*s.as_ptr().offset(i as isize + 19) as i8,
*s.as_ptr().offset(i as isize + 20) as i8,
*s.as_ptr().offset(i as isize + 21) as i8,
*s.as_ptr().offset(i as isize + 22) as i8,
*s.as_ptr().offset(i as isize + 23) as i8,
*s.as_ptr().offset(i as isize + 24) as i8,
*s.as_ptr().offset(i as isize + 25) as i8,
*s.as_ptr().offset(i as isize + 26) as i8,
*s.as_ptr().offset(i as isize + 27) as i8,
*s.as_ptr().offset(i as isize + 28) as i8,
0,
0,
0,
),
28 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
*s.as_ptr().offset(i as isize + 19) as i8,
*s.as_ptr().offset(i as isize + 20) as i8,
*s.as_ptr().offset(i as isize + 21) as i8,
*s.as_ptr().offset(i as isize + 22) as i8,
*s.as_ptr().offset(i as isize + 23) as i8,
*s.as_ptr().offset(i as isize + 24) as i8,
*s.as_ptr().offset(i as isize + 25) as i8,
*s.as_ptr().offset(i as isize + 26) as i8,
*s.as_ptr().offset(i as isize + 27) as i8,
0,
0,
0,
0,
),
27 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
*s.as_ptr().offset(i as isize + 19) as i8,
*s.as_ptr().offset(i as isize + 20) as i8,
*s.as_ptr().offset(i as isize + 21) as i8,
*s.as_ptr().offset(i as isize + 22) as i8,
*s.as_ptr().offset(i as isize + 23) as i8,
*s.as_ptr().offset(i as isize + 24) as i8,
*s.as_ptr().offset(i as isize + 25) as i8,
*s.as_ptr().offset(i as isize + 26) as i8,
0,
0,
0,
0,
0,
),
26 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
*s.as_ptr().offset(i as isize + 19) as i8,
*s.as_ptr().offset(i as isize + 20) as i8,
*s.as_ptr().offset(i as isize + 21) as i8,
*s.as_ptr().offset(i as isize + 22) as i8,
*s.as_ptr().offset(i as isize + 23) as i8,
*s.as_ptr().offset(i as isize + 24) as i8,
*s.as_ptr().offset(i as isize + 25) as i8,
0,
0,
0,
0,
0,
0,
),
25 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
*s.as_ptr().offset(i as isize + 19) as i8,
*s.as_ptr().offset(i as isize + 20) as i8,
*s.as_ptr().offset(i as isize + 21) as i8,
*s.as_ptr().offset(i as isize + 22) as i8,
*s.as_ptr().offset(i as isize + 23) as i8,
*s.as_ptr().offset(i as isize + 24) as i8,
0,
0,
0,
0,
0,
0,
0,
),
24 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
*s.as_ptr().offset(i as isize + 19) as i8,
*s.as_ptr().offset(i as isize + 20) as i8,
*s.as_ptr().offset(i as isize + 21) as i8,
*s.as_ptr().offset(i as isize + 22) as i8,
*s.as_ptr().offset(i as isize + 23) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
),
23 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
*s.as_ptr().offset(i as isize + 19) as i8,
*s.as_ptr().offset(i as isize + 20) as i8,
*s.as_ptr().offset(i as isize + 21) as i8,
*s.as_ptr().offset(i as isize + 22) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
22 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
*s.as_ptr().offset(i as isize + 19) as i8,
*s.as_ptr().offset(i as isize + 20) as i8,
*s.as_ptr().offset(i as isize + 21) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
21 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
|
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
*s.as_ptr().offset(i as isize + 19) as i8,
*s.as_ptr().offset(i as isize + 20) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
20 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
*s.as_ptr().offset(i as isize + 19) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
19 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
*s.as_ptr().offset(i as isize + 18) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
18 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
*s.as_ptr().offset(i as isize + 17) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
17 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
*s.as_ptr().offset(i as isize + 16) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
16 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
*s.as_ptr().offset(i as isize + 15) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
15 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
*s.as_ptr().offset(i as isize + 14) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
14 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
*s.as_ptr().offset(i as isize + 13) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
13 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
*s.as_ptr().offset(i as isize + 12) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
12 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
*s.as_ptr().offset(i as isize + 11) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
11 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
*s.as_ptr().offset(i as isize + 10) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
10 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
*s.as_ptr().offset(i as isize + 9) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
9 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
*s.as_ptr().offset(i as isize + 8) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
8 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
*s.as_ptr().offset(i as isize + 7) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
7 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
*s.as_ptr().offset(i as isize + 6) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
6 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
*s.as_ptr().offset(i as isize + 5) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
5 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
4 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
*s.as_ptr().offset(i as isize + 3) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
3 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
*s.as_ptr().offset(i as isize + 2) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
2 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
*s.as_ptr().offset(i as isize + 1) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
1 => mm256_setr_epi8(
*s.as_ptr().offset(i as isize + 0) as i8,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
_ => mm256_setr_epi8(
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mm256i() {
let test_cases = vec![0, 1, 2, 3];
for i in test_cases {
let want = mm256_setr_epi8(
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
i,
);
let got = mm256i(i);
assert_eq!(want.as_u8x32().as_array(), got.as_u8x32().as_array());
}
}
}
|
*s.as_ptr().offset(i as isize + 3) as i8,
*s.as_ptr().offset(i as isize + 4) as i8,
|
teleBot.py
|
import time
import telebot
from Responses import TELE_HI_GREET, TELE_CLASS_CODE
import BacaPdf as pdf
import csvHandler as csvH
with open('API_KEY.txt') as API_KEY:
bot = telebot.TeleBot(API_KEY.read()[:-1])
#Message type check
#ClassCode, TimeInterval, Status, Feedback
messageBool = [False, False, False, False]
def Echooo(themessage):
for ID in csvH.AllID():
bot.send_message(ID, themessage)
def Greet(message):
print(message.text)
if (message.text).lower() in TELE_HI_GREET:
return True
return False
def
|
(message):
if (message.text).lower() in TELE_CLASS_CODE:
return True
return False
def TimeInterval(message):
message = (message.text).lower()
if message.isdigit():
return True
return False
def feedbackCatch(message):
if messageBool[3]:
return True
return False
#Commands
@bot.message_handler(commands=['start'])
def start(message):
bot.reply_to(message,"HEY! Welcome to bot Ukrida")
if csvH.checkID(message.chat.id) == 0:
classCom(message,True)
csvH.newID(message.chat.id,
message.chat.first_name,
message.chat.username,
"1PEEA", 10, 'active')
@bot.message_handler(commands=['classcode'])
def classCom(message, first = False):
global messageBool
messageBool = [True, False, False, False]
if first:
bot.send_message(message.chat.id, "Ketik kode kelasmu,\n(Contoh 1Peea):")
else:
bot.send_message(message.chat.id, "Ketik kode kelasmu, atau /cancel untuk membatalkan\n(Contoh 1Peea):")
@bot.message_handler(commands=['cancel'])
def cancelCom(message):
global messageBool
for x in messageBool:
if x:
messageBool = [False, False, False, False]
bot.send_message(message.chat.id, "OK :)")
return
@bot.message_handler(commands=['feedback'])
def feedbackCom(message):
global messageBool
messageBool = [False, False, False, True]
bot.send_message(message.chat.id, "Feedback, atau laporan error:")
@bot.message_handler(commands=['schedules'])
def schedulesCom(message,classCode=0):
if classCode == 0:
classCode = csvH.checkClass(message.chat.id)
queryClass = pdf.openFile(classCode)
if len(queryClass) > 0:
for kelas in queryClass:
sendTo = "Matkul: "+kelas[0]+"\n"
sendTo += "Waktu: "+kelas[1]+", "+kelas[2]+kelas[3]+"\n"
sendTo += "Dosen: "+kelas[4]+"\n"
if kelas[5] == "PTM":
sendTo += "Room:" + kelas[5]
elif kelas[5] == "Meet":
sendTo += "Room:" +'G'+ kelas[5]
else:#angka
sendTo += "MeetID: "+kelas[5]+"\n"
sendTo += "Pass: "+kelas[6]
bot.send_message(message.chat.id, sendTo)
bot.send_message(message.chat.id, "Selamat Kuliah!")
else:
bot.send_message(message.chat.id, "Maaf, kode kelas "+classCode.upper()+" belum ada di list.")
@bot.message_handler(commands=['timer', 'help'])
def notyetCom(message):
bot.send_message(message.chat.id, "Under Construction")
#Commands Child
@bot.message_handler(func=Greet)
def GreetCH(message):
bot.send_message(message.chat.id, "Halo "+message.chat.first_name+" :)")
@bot.message_handler(func=feedbackCatch)
def GreetCH(message):
with open('feedback.txt','a') as f:
f.write(message.text)
#bot.send_message(895523970, str(message.chat.first_name)+":"+message.text)
bot.send_message(message.chat.id, "Pesan terkirim :)")
@bot.message_handler(func=ClassCode)
def ClassCH(message):
if messageBool[0]:
bot.send_message(message.chat.id, "OK, kelasmu tercatat: "+(message.text).upper())
schedulesCom(message,message.text)
csvH.changeClass(csvH.checkID(message.chat.id), (message.text).upper())
messageBool[0] = False
else:
bot.send_message(message.chat.id, "Ketik /classcode untuk mengganti kode kelas, atau /schedules untuk melihat jadwal kelasmu")
if __name__ == "__main__":
Echooo("Hi! Server On 7-12 Maret 2022")
# bot.infinity_polling()
# time.sleep(2)
|
ClassCode
|
getVirtualMachine.go
|
// *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20170330
|
import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func LookupVirtualMachine(ctx *pulumi.Context, args *LookupVirtualMachineArgs, opts ...pulumi.InvokeOption) (*LookupVirtualMachineResult, error) {
var rv LookupVirtualMachineResult
err := ctx.Invoke("azure-native:compute/v20170330:getVirtualMachine", args, &rv, opts...)
if err != nil {
return nil, err
}
return &rv, nil
}
type LookupVirtualMachineArgs struct {
Expand *string `pulumi:"expand"`
ResourceGroupName string `pulumi:"resourceGroupName"`
VmName string `pulumi:"vmName"`
}
// Describes a Virtual Machine.
type LookupVirtualMachineResult struct {
AvailabilitySet *SubResourceResponse `pulumi:"availabilitySet"`
DiagnosticsProfile *DiagnosticsProfileResponse `pulumi:"diagnosticsProfile"`
HardwareProfile *HardwareProfileResponse `pulumi:"hardwareProfile"`
Id string `pulumi:"id"`
Identity *VirtualMachineIdentityResponse `pulumi:"identity"`
InstanceView VirtualMachineInstanceViewResponse `pulumi:"instanceView"`
LicenseType *string `pulumi:"licenseType"`
Location string `pulumi:"location"`
Name string `pulumi:"name"`
NetworkProfile *NetworkProfileResponse `pulumi:"networkProfile"`
OsProfile *OSProfileResponse `pulumi:"osProfile"`
Plan *PlanResponse `pulumi:"plan"`
ProvisioningState string `pulumi:"provisioningState"`
Resources []VirtualMachineExtensionResponse `pulumi:"resources"`
StorageProfile *StorageProfileResponse `pulumi:"storageProfile"`
Tags map[string]string `pulumi:"tags"`
Type string `pulumi:"type"`
VmId string `pulumi:"vmId"`
Zones []string `pulumi:"zones"`
}
| |
subheader.js
|
/* */
"format cjs";
/*!
* Angular Material Design
* https://github.com/angular/material
* @license MIT
* v1.0.0-rc2-master-fcd199e
*/
(function( window, angular, undefined ){
"use strict";
/**
* @ngdoc module
* @name material.components.subheader
* @description
* SubHeader module
*
* Subheaders are special list tiles that delineate distinct sections of a
* list or grid list and are typically related to the current filtering or
* sorting criteria. Subheader tiles are either displayed inline with tiles or
* can be associated with content, for example, in an adjacent column.
*
* Upon scrolling, subheaders remain pinned to the top of the screen and remain
* pinned until pushed on or off screen by the next subheader. @see [Material
* Design Specifications](https://www.google.com/design/spec/components/subheaders.html)
*
* > To improve the visual grouping of content, use the system color for your subheaders.
*
*/
angular
.module('material.components.subheader', [
'material.core',
'material.components.sticky'
])
.directive('mdSubheader', MdSubheaderDirective);
/**
* @ngdoc directive
* @name mdSubheader
* @module material.components.subheader
*
* @restrict E
*
* @description
* The `<md-subheader>` directive is a subheader for a section. By default it is sticky.
* You can make it not sticky by applying the `md-no-sticky` class to the subheader.
*
*
* @usage
* <hljs lang="html">
* <md-subheader>Online Friends</md-subheader>
* </hljs>
*/
function
|
($mdSticky, $compile, $mdTheming, $mdUtil) {
return {
restrict: 'E',
replace: true,
transclude: true,
template: (
'<div class="md-subheader">' +
' <div class="md-subheader-inner">' +
' <span class="md-subheader-content"></span>' +
' </div>' +
'</div>'
),
link: function postLink(scope, element, attr, controllers, transclude) {
$mdTheming(element);
var outerHTML = element[0].outerHTML;
function getContent(el) {
return angular.element(el[0].querySelector('.md-subheader-content'));
}
// Transclude the user-given contents of the subheader
// the conventional way.
transclude(scope, function(clone) {
getContent(element).append(clone);
});
// Create another clone, that uses the outer and inner contents
// of the element, that will be 'stickied' as the user scrolls.
if (!element.hasClass('md-no-sticky')) {
transclude(scope, function(clone) {
// If the user adds an ng-if or ng-repeat directly to the md-subheader element, the
// compiled clone below will only be a comment tag (since they replace their elements with
// a comment) which cannot be properly passed to the $mdSticky; so we wrap it in our own
// DIV to ensure we have something $mdSticky can use
var wrapperHtml = '<div class="md-subheader-wrapper">' + outerHTML + '</div>';
var stickyClone = $compile(wrapperHtml)(scope);
// Append the sticky
$mdSticky(scope, element, stickyClone);
// Delay initialization until after any `ng-if`/`ng-repeat`/etc has finished before
// attempting to create the clone
$mdUtil.nextTick(function() {
getContent(stickyClone).append(clone);
});
});
}
}
}
}
MdSubheaderDirective.$inject = ["$mdSticky", "$compile", "$mdTheming", "$mdUtil"];
})(window, window.angular);
|
MdSubheaderDirective
|
matrix.rs
|
//! An efficient representation of a 2D matrix.
use crate::prelude::*;
use std::ops::Index;
use std::ops::IndexMut;
// ============
// == Matrix ==
// ============
/// An efficient 2D matrix implemented on top of [`std::vec::Vec`].
#[derive(Clone,Debug,Default,PartialEq,Eq)]
pub struct Matrix<T> {
/// The number of rows in the matrix.
rows:usize,
/// The number of columns in the matrix.
columns:usize,
/// The matrix.
matrix:Vec<T>,
}
impl<T> Matrix<T> {
/// Get the number of rows in the matrix.
pub fn rows(&self) -> usize {
self.rows
}
/// Get the number of columns in the matrix.
pub fn columns(&self) -> usize {
self.columns
}
/// Obtain the indices for the rows in this matrix.
pub fn row_indices(&self) -> Range<usize> {
0..self.rows()
}
}
impl<T:Default> Matrix<T> {
/// Constructs a matrix with the dimensions given by `rows` and `columns`.
pub fn new(rows:usize, columns:usize) -> Self {
let mut matrix = Vec::with_capacity(rows*columns);
for _ in 0..matrix.capacity() {
matrix.push(default())
}
Self{rows,columns,matrix}
}
/// Adds a new row to the matrix `self`, filled with default values.
pub fn new_row(&mut self) {
for _ in 0..self.columns {
self.matrix.push(default());
}
self.rows += 1;
}
}
// === Trait Impls ===
impl<T> Index<(usize,usize)> for Matrix<T> {
type Output = T;
fn
|
(&self, index:(usize,usize)) -> &T {
&self.matrix[index.0*self.columns+index.1]
}
}
impl<T> IndexMut<(usize,usize)> for Matrix<T> {
fn index_mut(&mut self, index:(usize,usize)) -> &mut T {
&mut self.matrix[index.0*self.columns+index.1]
}
}
|
index
|
host_file_system_volume.py
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def
|
(vim, *args, **kwargs):
'''Detailed information about a file system. This is a base type for derived types
that have more specific details about specific filesystem types.Typically a
FileSystem is exposed as a datatoreSee DatastoreInfoSee HostVmfsVolumeSee
HostNasVolumeSee HostLocalFileSystemVolumeSee HostVfatVolume'''
obj = vim.client.factory.create('{urn:vim25}HostFileSystemVolume')
# do some validation checking...
if (len(args) + len(kwargs)) < 3:
raise IndexError('Expected at least 4 arguments got: %d' % len(args))
required = [ 'capacity', 'name', 'type' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
HostFileSystemVolume
|
saving.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model saving utilities.
|
"""
from keras.saving import * # noqa: F401,F403
|
Everything has been moved to keras/saving/. This file will be deleted soon.
|
connector.py
|
# Copyright 2020 John Reese
# Licensed under the MIT license
import re
from typing import Any, Pattern, Union
from .engines.base import Connection
from .errors import InvalidURI
from .types import Location
_uri_regex: Pattern = re.compile(r"(?P<engine>\w+)://(?P<location>.+)")
def
|
(location: Union[str, Location], *args: Any, **kwargs: Any) -> Connection:
"""Connect to the specified database."""
if isinstance(location, str):
match = _uri_regex.match(location)
if match:
engine, database = match.groups()
location = Location(engine, database=database)
else:
raise InvalidURI(f"Invalid database connection URI {location}")
connector, engine_kls = Connection.get_connector(location.engine)
return connector(engine_kls(), location, *args, **kwargs)
|
connect
|
query_cluster_disk_specification.go
|
package mse
|
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// QueryClusterDiskSpecification invokes the mse.QueryClusterDiskSpecification API synchronously
func (client *Client) QueryClusterDiskSpecification(request *QueryClusterDiskSpecificationRequest) (response *QueryClusterDiskSpecificationResponse, err error) {
response = CreateQueryClusterDiskSpecificationResponse()
err = client.DoAction(request, response)
return
}
// QueryClusterDiskSpecificationWithChan invokes the mse.QueryClusterDiskSpecification API asynchronously
func (client *Client) QueryClusterDiskSpecificationWithChan(request *QueryClusterDiskSpecificationRequest) (<-chan *QueryClusterDiskSpecificationResponse, <-chan error) {
responseChan := make(chan *QueryClusterDiskSpecificationResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.QueryClusterDiskSpecification(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// QueryClusterDiskSpecificationWithCallback invokes the mse.QueryClusterDiskSpecification API asynchronously
func (client *Client) QueryClusterDiskSpecificationWithCallback(request *QueryClusterDiskSpecificationRequest, callback func(response *QueryClusterDiskSpecificationResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *QueryClusterDiskSpecificationResponse
var err error
defer close(result)
response, err = client.QueryClusterDiskSpecification(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// QueryClusterDiskSpecificationRequest is the request struct for api QueryClusterDiskSpecification
type QueryClusterDiskSpecificationRequest struct {
*requests.RpcRequest
ClusterType string `position:"Query" name:"ClusterType"`
}
// QueryClusterDiskSpecificationResponse is the response struct for api QueryClusterDiskSpecification
type QueryClusterDiskSpecificationResponse struct {
*responses.BaseResponse
HttpStatusCode int `json:"HttpStatusCode" xml:"HttpStatusCode"`
RequestId string `json:"RequestId" xml:"RequestId"`
Success bool `json:"Success" xml:"Success"`
ErrorCode string `json:"ErrorCode" xml:"ErrorCode"`
Code int `json:"Code" xml:"Code"`
Message string `json:"Message" xml:"Message"`
DynamicMessage string `json:"DynamicMessage" xml:"DynamicMessage"`
Data Data `json:"Data" xml:"Data"`
}
// CreateQueryClusterDiskSpecificationRequest creates a request to invoke QueryClusterDiskSpecification API
func CreateQueryClusterDiskSpecificationRequest() (request *QueryClusterDiskSpecificationRequest) {
request = &QueryClusterDiskSpecificationRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("mse", "2019-05-31", "QueryClusterDiskSpecification", "", "")
request.Method = requests.POST
return
}
// CreateQueryClusterDiskSpecificationResponse creates a response to parse from QueryClusterDiskSpecification response
func CreateQueryClusterDiskSpecificationResponse() (response *QueryClusterDiskSpecificationResponse) {
response = &QueryClusterDiskSpecificationResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| |
time.rs
|
use crate::types::*;
use chrono::{FixedOffset, NaiveTime, Offset, Timelike};
use neo4jrs_macros::BoltStruct;
#[derive(Debug, PartialEq, Clone, BoltStruct)]
#[signature(0xB2, 0x54)]
pub struct BoltTime {
nanoseconds: BoltInteger,
tz_offset_seconds: BoltInteger,
}
#[derive(Debug, PartialEq, Clone, BoltStruct)]
#[signature(0xB1, 0x74)]
pub struct BoltLocalTime {
nanoseconds: BoltInteger,
}
impl Into<BoltTime> for (NaiveTime, FixedOffset) {
fn into(self) -> BoltTime {
let seconds_from_midnight = self.0.num_seconds_from_midnight() as i64;
let nanoseconds = seconds_from_midnight * 1_000_000_000 + self.0.nanosecond() as i64;
BoltTime {
nanoseconds: nanoseconds.into(),
tz_offset_seconds: self.1.fix().local_minus_utc().into(),
}
}
}
impl Into<(NaiveTime, FixedOffset)> for BoltTime {
fn into(self) -> (NaiveTime, FixedOffset) {
let nanos = self.nanoseconds.value;
let seconds = (nanos / 1_000_000_000) as u32;
let nanoseconds = (nanos % 1_000_000_000) as u32;
(
NaiveTime::from_num_seconds_from_midnight(seconds, nanoseconds),
FixedOffset::east(self.tz_offset_seconds.value as i32),
)
}
}
impl Into<BoltLocalTime> for NaiveTime {
fn into(self) -> BoltLocalTime {
let seconds_from_midnight = self.num_seconds_from_midnight() as i64;
let nanoseconds = seconds_from_midnight * 1_000_000_000 + self.nanosecond() as i64;
BoltLocalTime {
nanoseconds: nanoseconds.into(),
}
}
}
impl Into<NaiveTime> for BoltLocalTime {
fn
|
(self) -> NaiveTime {
let nanos = self.nanoseconds.value;
let seconds = (nanos / 1_000_000_000) as u32;
let nanoseconds = (nanos % 1_000_000_000) as u32;
NaiveTime::from_num_seconds_from_midnight(seconds, nanoseconds)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::version::Version;
use bytes::*;
use std::cell::RefCell;
use std::rc::Rc;
#[test]
fn should_serialize_time() {
let time = NaiveTime::from_hms_nano_opt(7, 8, 9, 100).unwrap();
let offset = FixedOffset::east(2 * 3600);
let time: BoltTime = (time, offset).into();
assert_eq!(
time.into_bytes(Version::V4_1).unwrap(),
Bytes::from_static(&[
0xB2, 0x54, 0xCB, 0x00, 0x00, 0x17, 0x5D, 0x2F, 0xB8, 0x3A, 0x64, 0xC9, 0x1C, 0x20,
])
);
}
#[test]
fn should_deserialize_time() {
let bytes = Rc::new(RefCell::new(Bytes::from_static(&[
0xB2, 0x54, 0xCB, 0x00, 0x00, 0x17, 0x5D, 0x2F, 0xB8, 0x3A, 0x64, 0xC9, 0x1C, 0x20,
])));
let (time, offset) = BoltTime::parse(Version::V4_1, bytes)
.unwrap()
.try_into()
.unwrap();
assert_eq!(time.to_string(), "07:08:09.000000100");
assert_eq!(offset, FixedOffset::east(2 * 3600));
}
#[test]
fn should_serialize_local_time() {
let naive_time = NaiveTime::from_hms_nano_opt(7, 8, 9, 100).unwrap();
let time: BoltLocalTime = naive_time.into();
assert_eq!(
time.into_bytes(Version::V4_1).unwrap(),
Bytes::from_static(&[
0xB1, 0x74, 0xCB, 0x00, 0x00, 0x17, 0x5D, 0x2F, 0xB8, 0x3A, 0x64,
])
);
}
#[test]
fn should_deserialize_local_time() {
let bytes = Rc::new(RefCell::new(Bytes::from_static(&[
0xB1, 0x74, 0xCB, 0x00, 0x00, 0x17, 0x5D, 0x2F, 0xB8, 0x3A, 0x64,
])));
let time: NaiveTime = BoltLocalTime::parse(Version::V4_1, bytes)
.unwrap()
.try_into()
.unwrap();
assert_eq!(time.to_string(), "07:08:09.000000100");
}
}
|
into
|
error.rs
|
use ansi_term::Colour::Red;
use std::fmt::Display;
#[derive(Debug)]
pub enum Error {
Io(::std::io::Error),
Opencv(::opencv::Error),
Msg(String),
}
impl Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Error::Io(io_error) => write!(f, "IO ERR: {io_error}"),
Error::Opencv(opencv_error) => write!(f, "{opencv_error}"),
Error::Msg(error) => write!(f, "ERR: {error}"),
}
}
}
|
impl From<::std::io::Error> for Error {
fn from(s: ::std::io::Error) -> Self {
Error::Io(s)
}
}
impl From<::opencv::Error> for Error {
fn from(s: ::opencv::Error) -> Self {
Error::Opencv(s)
}
}
impl From<&'static str> for Error {
fn from(s: &'static str) -> Self {
Error::Msg(s.to_owned())
}
}
impl From<String> for Error {
fn from(s: String) -> Self {
Error::Msg(s)
}
}
impl std::error::Error for Error {}
pub type Result<T> = std::result::Result<T, Error>;
pub fn default_error_handler(handle: &mut dyn std::io::Write, error: &Error) {
writeln!(
handle,
"{} {}\n\nFor more information go to docs.rs/asciiframe",
Red.paint("error:"),
error
)
.unwrap();
}
| |
index.js
|
/**
* Copyright 2019 The AMPHTML Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
|
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const path = require('path');
const express = require('express');
// eslint-disable-next-line new-cap
const playground = express.Router();
playground.use(
'/boilerplate',
express.static(path.join(__dirname, '../dist'), {extensions: ['html']})
);
module.exports = playground;
|
*
* http://www.apache.org/licenses/LICENSE-2.0
|
request.js
|
import axios from 'axios'
import { Message } from 'element-ui'
import router from '@/router'
// create an axios instance
const service = axios.create({
baseURL: process.env.VUE_APP_BASE_API, // url = base url + request url
// withCredentials: true, // send cookies when cross-domain requests
timeout: 5000 // request timeout
})
// request interceptor
service.interceptors.request.use(
config => {
/* 待给所有接口携带上用户fuserId*/
const fuserId = window.sessionStorage.getItem('fuserId')
if (config.method === 'post' || config.method === 'put') {
// 新增审核权限、分配/禁止角色,不带fuserId
/* 审核权限列表-修改数据,不带fuserId config.url !== '/tJxQuery/updayeTJxQuery' */
switch (config.url) {
case '/tJxQuery/updayeTJxQuery':
|
case '/tJxQuery/insertTJxQuery':
break
case '/tJxAdminrole/distributionRole':
break
case '/tSecUser/updateUserDisable':
break
default:
config.data['fuserId'] = fuserId
}
}
return config
},
error => {
// do something with request error
return Promise.reject(error)
}
)
// response interceptor
service.interceptors.response.use(
response => {
const res = response.data
if (res.code === 0) {
return Promise.resolve(res)
} else if (res.code === 2) {
return Promise.resolve(res)
} else if (res.code === 400) {
router.replace({
name: 'login'
})
} else if (res.code === 1) {
return Promise.resolve(res)
} else {
return Message({
message: res.message || '网络异常,请重新尝试',
type: 'warning',
duration: 5 * 1000
})
}
},
error => {
if (error.response.status) {
switch (error.response.status) {
case 500:
router.replace({
name: 'login'
})
Message({
message: '网络连接错误,请重新尝试!',
type: 'error',
duration: 5 * 1000
})
break
case 404:
Message({
message: '网络请求不存在!',
type: 'error',
duration: 5 * 1000
})
break
case 502:
Message({
message: '当前服务器不稳定,请稍后尝试!',
type: 'error',
duration: 5 * 1000
})
break
default:
Message({
message: error.response.data.message,
type: 'error',
duration: 5 * 1000
})
}
}
return Promise.reject(error.response)
}
)
export default service
|
break
|
pages-Order-Orderpages-PaySuccessful.27349172.js
|
(window["webpackJsonp"]=window["webpackJsonp"]||[]).push([["pages-Order-Orderpages-PaySuccessful"],{"187c":function(n,a,t){"use strict";var r;t.r(a);var e,s=function(){var n=this,a=n.$createElement,r=n._self._c||a;return r("v-uni-view",{staticClass:"PaySunc"},[r("v-uni-view",{staticClass:"PaySunc_img"},[r("v-uni-image",{staticClass:"PaySunc_img",attrs:{src:t("e900")}}),r("v-uni-text",{staticClass:"PaySunc_text"},[n._v("支付成功")])],1),r("v-uni-view",{staticClass:"flish_btn"},[n._v("完成")])],1)},i=[],c=(t("c703"),t("f0c5")),u={},o=Object(c["a"])(u,s,i,!1,null,"51ae2085",null,!1,r,e);a["default"]=o.exports},"8fa1":function(n,a,t){var r=t("b6ce");"string"===typeof r&&(r=[[n.i,r,""]]),r.locals&&(n.exports=r.locals);var e=t("4f06").default;e("7cc90daa",r,!0,{sourceMap:!1,shadowMode:!1})},b6ce:function(n,a,t){var r=t("24fb");a=r(!1),a.push([n.i,'@charset "UTF-8";\r\n/**\r\n * 这里是uni-app内置的常用样式变量\r\n *\r\n * uni-app 官方扩展插件及插件市场(https://ext.dcloud.net.cn)上很多三方插件均使用了这些样式变量\r\n * 如果你是插件开发者,建议你使用scss预处理,并在插件代码中直接使用这些变量(无需 import 这个文件),方便用户通过搭积木的方式开发整体风格一致的App\r\n *\r\n */\r\n/**\r\n * 如果你是App开发者(插件使用者),你可以通过修改这些变量来定制自己的插件主题,实现自定义主题功能\r\n *\r\n * 如果你的项目同样使用了scss预处理,你也可以直接在你的 scss 代码中使用如下变量,同时无需 import 这个文件\r\n */\r\n/* 颜色变量 */\r\n/* 行为相关颜色 */\r\n/* 文字基本颜色 */\r\n/* 背景颜色 */\r\n/* 边框颜色 */\r\n/* 尺寸变量 */\r\n/* 文字尺寸 */\r\n/* 图片尺寸 */\r\n/* Border Radius */\r\n/* 水平间距 */\r\n/* 垂直间距 */\r\n/* 透明度 */\r\n/* 文章场景相关 */.PaySunc[data-v-51ae2085]{text-align:center;color:#666}.PaySunc .PaySunc_img[data-v-51ae2085]{width:%?500?%;height:%?500?%;margin:auto}.PaySunc .PaySunc_text[data-v-51ae2085]{position:relative;top:%?-60?%}.PaySunc .flish_btn[data-v-51ae2085]{width:%?180?%;height:%?56?%;line-height:%?56?%;-webkit-border-radius:%?12?%;border-radius:%?12?%;border:1px solid #333;margin:%?160?% auto 0}',""]),n.exports=a},c703:function(n,a,t){"use strict";var r=t("8fa1"),e=t.n(r);e.a},e900:function(n,a,t){n.exports=t.p+"static/img/zhifu_zhifuchenggong.0cb97b97.png"}}]);
|
||
timeoutmode.js
|
// Copyright 2019 The Closure Library Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Provides an interface that defines how users can extend the
* `goog.labs.mock` mocking framework with a TimeoutMode. This is used
* with waitAndVerify to specify a max timeout.
*
* In addition it exports a factory method that allows users to easily obtain
* a TimeoutMode instance.
*/
goog.provide('goog.labs.mock.timeout');
goog.provide('goog.labs.mock.timeout.TimeoutMode');
/**
* Used to specify max timeout on waitAndVerify
* @const
*/
goog.labs.mock.timeout.TimeoutMode = class TimeoutMode {
/**
* @param {number} duration
*/
constructor(duration) {
/**
* @type {number} duration
|
this.duration = duration;
}
};
/**
* @param {number} duration
* @return {!goog.labs.mock.timeout.TimeoutMode}
*/
goog.labs.mock.timeout.timeout = function(duration) {
return new goog.labs.mock.timeout.TimeoutMode(duration);
};
|
* @public
*/
|
endpoints.ts
|
import { PartitionHash, RegionHash, getRegionInfo } from "@aws-sdk/config-resolver";
import { RegionInfoProvider } from "@aws-sdk/types";
const regionHash: RegionHash = {
fips: {
|
};
const partitionHash: PartitionHash = {
aws: {
regions: [
"af-south-1",
"ap-east-1",
"ap-northeast-1",
"ap-northeast-2",
"ap-northeast-3",
"ap-south-1",
"ap-southeast-1",
"ap-southeast-2",
"ca-central-1",
"eu-central-1",
"eu-north-1",
"eu-south-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"fips",
"me-south-1",
"sa-east-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
],
hostname: "elasticache.{region}.amazonaws.com",
},
"aws-cn": {
regions: ["cn-north-1", "cn-northwest-1"],
hostname: "elasticache.{region}.amazonaws.com.cn",
},
"aws-iso": {
regions: ["us-iso-east-1"],
hostname: "elasticache.{region}.c2s.ic.gov",
},
"aws-iso-b": {
regions: ["us-isob-east-1"],
hostname: "elasticache.{region}.sc2s.sgov.gov",
},
"aws-us-gov": {
regions: ["fips", "us-gov-east-1", "us-gov-west-1"],
hostname: "elasticache.{region}.amazonaws.com",
},
};
export const defaultRegionInfoProvider: RegionInfoProvider = async (region: string, options?: any) =>
getRegionInfo(region, {
...options,
signingService: "elasticache",
regionHash,
partitionHash,
});
|
hostname: "elasticache.us-gov-west-1.amazonaws.com",
signingRegion: "us-gov-west-1",
},
|
runtest.rs
|
// ignore-tidy-filelength
use crate::common::CompareMode;
use crate::common::{expected_output_path, UI_EXTENSIONS, UI_FIXED, UI_STDERR, UI_STDOUT};
use crate::common::{output_base_dir, output_base_name, output_testname_unique};
use crate::common::{Codegen, CodegenUnits, Rustdoc};
use crate::common::{DebugInfoCdb, DebugInfoGdbLldb, DebugInfoGdb, DebugInfoLldb};
use crate::common::{CompileFail, Pretty, RunFail, RunPass, RunPassValgrind};
use crate::common::{Config, TestPaths};
use crate::common::{Incremental, MirOpt, RunMake, Ui, JsDocTest, Assembly, YkTir};
use diff;
use crate::errors::{self, Error, ErrorKind};
use crate::header::TestProps;
use crate::json;
use regex::{Captures, Regex};
use rustfix::{apply_suggestions, get_suggestions_from_json, Filter};
use crate::util::{logv, PathBufExt};
use std::collections::hash_map::DefaultHasher;
use std::collections::{HashMap, HashSet, VecDeque};
use std::env;
use std::ffi::{OsStr, OsString};
use std::fmt;
use std::fs::{self, create_dir_all, File, OpenOptions};
use std::hash::{Hash, Hasher};
use std::io::prelude::*;
use std::io::{self, BufReader};
use std::path::{Path, PathBuf};
use std::process::{Child, Command, ExitStatus, Output, Stdio};
use std::str;
use lazy_static::lazy_static;
use log::*;
use crate::extract_gdb_version;
use crate::is_android_gdb_target;
#[cfg(windows)]
fn disable_error_reporting<F: FnOnce() -> R, R>(f: F) -> R {
use std::sync::Mutex;
const SEM_NOGPFAULTERRORBOX: u32 = 0x0002;
extern "system" {
fn SetErrorMode(mode: u32) -> u32;
}
lazy_static! {
static ref LOCK: Mutex<()> = { Mutex::new(()) };
}
// Error mode is a global variable, so lock it so only one thread will change it
let _lock = LOCK.lock().unwrap();
// Tell Windows to not show any UI on errors (such as terminating abnormally).
// This is important for running tests, since some of them use abnormal
// termination by design. This mode is inherited by all child processes.
unsafe {
let old_mode = SetErrorMode(SEM_NOGPFAULTERRORBOX); // read inherited flags
SetErrorMode(old_mode | SEM_NOGPFAULTERRORBOX);
let r = f();
SetErrorMode(old_mode);
r
}
}
#[cfg(not(windows))]
fn disable_error_reporting<F: FnOnce() -> R, R>(f: F) -> R {
f()
}
/// The name of the environment variable that holds dynamic library locations.
pub fn dylib_env_var() -> &'static str {
if cfg!(windows) {
"PATH"
} else if cfg!(target_os = "macos") {
"DYLD_LIBRARY_PATH"
} else if cfg!(target_os = "haiku") {
"LIBRARY_PATH"
} else {
"LD_LIBRARY_PATH"
}
}
/// The platform-specific library name
pub fn get_lib_name(lib: &str, dylib: bool) -> String {
// In some casess (e.g. MUSL), we build a static
// library, rather than a dynamic library.
// In this case, the only path we can pass
// with '--extern-meta' is the '.lib' file
if !dylib {
return format!("lib{}.rlib", lib);
}
if cfg!(windows) {
format!("{}.dll", lib)
} else if cfg!(target_os = "macos") {
format!("lib{}.dylib", lib)
} else {
format!("lib{}.so", lib)
}
}
#[derive(Debug, PartialEq)]
pub enum DiffLine {
Context(String),
Expected(String),
Resulting(String),
}
#[derive(Debug, PartialEq)]
pub struct Mismatch {
pub line_number: u32,
pub lines: Vec<DiffLine>,
}
impl Mismatch {
fn new(line_number: u32) -> Mismatch {
Mismatch {
line_number: line_number,
lines: Vec::new(),
}
}
}
// Produces a diff between the expected output and actual output.
pub fn make_diff(expected: &str, actual: &str, context_size: usize) -> Vec<Mismatch> {
let mut line_number = 1;
let mut context_queue: VecDeque<&str> = VecDeque::with_capacity(context_size);
let mut lines_since_mismatch = context_size + 1;
let mut results = Vec::new();
let mut mismatch = Mismatch::new(0);
for result in diff::lines(expected, actual) {
match result {
diff::Result::Left(str) => {
if lines_since_mismatch >= context_size && lines_since_mismatch > 0 {
results.push(mismatch);
mismatch = Mismatch::new(line_number - context_queue.len() as u32);
}
while let Some(line) = context_queue.pop_front() {
mismatch.lines.push(DiffLine::Context(line.to_owned()));
}
mismatch.lines.push(DiffLine::Expected(str.to_owned()));
line_number += 1;
lines_since_mismatch = 0;
}
diff::Result::Right(str) => {
if lines_since_mismatch >= context_size && lines_since_mismatch > 0 {
results.push(mismatch);
mismatch = Mismatch::new(line_number - context_queue.len() as u32);
}
while let Some(line) = context_queue.pop_front() {
mismatch.lines.push(DiffLine::Context(line.to_owned()));
}
mismatch.lines.push(DiffLine::Resulting(str.to_owned()));
lines_since_mismatch = 0;
}
diff::Result::Both(str, _) => {
if context_queue.len() >= context_size {
let _ = context_queue.pop_front();
}
if lines_since_mismatch < context_size {
mismatch.lines.push(DiffLine::Context(str.to_owned()));
} else if context_size > 0 {
context_queue.push_back(str);
}
line_number += 1;
lines_since_mismatch += 1;
}
}
}
results.push(mismatch);
results.remove(0);
results
}
pub fn run(config: Config, testpaths: &TestPaths, revision: Option<&str>) {
match &*config.target {
"arm-linux-androideabi"
| "armv7-linux-androideabi"
| "thumbv7neon-linux-androideabi"
| "aarch64-linux-android" => {
if !config.adb_device_status {
panic!("android device not available");
}
}
_ => {
// android has its own gdb handling
if config.mode == DebugInfoGdb && config.gdb.is_none() {
panic!("gdb not available but debuginfo gdb debuginfo test requested");
}
}
}
if config.verbose {
// We're going to be dumping a lot of info. Start on a new line.
print!("\n\n");
}
debug!("running {:?}", testpaths.file.display());
let props = TestProps::from_file(&testpaths.file, revision, &config);
let cx = TestCx {
config: &config,
props: &props,
testpaths,
revision: revision,
};
create_dir_all(&cx.output_base_dir()).unwrap();
if config.mode == Incremental {
// Incremental tests are special because they cannot be run in
// parallel.
assert!(
!props.revisions.is_empty(),
"Incremental tests require revisions."
);
cx.init_incremental_test();
for revision in &props.revisions {
let revision_props = TestProps::from_file(&testpaths.file, Some(revision), &config);
let rev_cx = TestCx {
config: &config,
props: &revision_props,
testpaths,
revision: Some(revision),
};
rev_cx.run_revision();
}
} else {
cx.run_revision();
}
cx.create_stamp();
}
pub fn compute_stamp_hash(config: &Config) -> String {
let mut hash = DefaultHasher::new();
config.stage_id.hash(&mut hash);
if config.mode == DebugInfoCdb {
config.cdb.hash(&mut hash);
}
if config.mode == DebugInfoGdb || config.mode == DebugInfoGdbLldb {
match config.gdb {
None => env::var_os("PATH").hash(&mut hash),
Some(ref s) if s.is_empty() => env::var_os("PATH").hash(&mut hash),
Some(ref s) => s.hash(&mut hash),
};
}
if config.mode == DebugInfoLldb || config.mode == DebugInfoGdbLldb {
env::var_os("PATH").hash(&mut hash);
env::var_os("PYTHONPATH").hash(&mut hash);
}
format!("{:x}", hash.finish())
}
struct TestCx<'test> {
config: &'test Config,
props: &'test TestProps,
testpaths: &'test TestPaths,
revision: Option<&'test str>,
}
struct DebuggerCommands {
commands: Vec<String>,
check_lines: Vec<String>,
breakpoint_lines: Vec<usize>,
}
enum ReadFrom {
Path,
Stdin(String),
}
impl<'test> TestCx<'test> {
/// Code executed for each revision in turn (or, if there are no
/// revisions, exactly once, with revision == None).
fn run_revision(&self) {
match self.config.mode {
CompileFail => self.run_cfail_test(),
RunFail => self.run_rfail_test(),
RunPassValgrind => self.run_valgrind_test(),
Pretty => self.run_pretty_test(),
DebugInfoGdbLldb => {
self.run_debuginfo_gdb_test();
self.run_debuginfo_lldb_test();
},
DebugInfoCdb => self.run_debuginfo_cdb_test(),
DebugInfoGdb => self.run_debuginfo_gdb_test(),
DebugInfoLldb => self.run_debuginfo_lldb_test(),
Codegen => self.run_codegen_test(),
Rustdoc => self.run_rustdoc_test(),
CodegenUnits => self.run_codegen_units_test(),
Incremental => self.run_incremental_test(),
RunMake => self.run_rmake_test(),
RunPass | Ui => self.run_ui_test(),
MirOpt => self.run_mir_opt_test(),
YkTir => self.run_yk_tir_test(),
Assembly => self.run_assembly_test(),
JsDocTest => self.run_js_doc_test(),
}
}
fn should_run_successfully(&self) -> bool {
let run_pass = match self.config.mode {
RunPass => true,
Ui => self.props.run_pass,
_ => unimplemented!(),
};
return run_pass && !self.props.skip_codegen;
}
fn should_compile_successfully(&self) -> bool {
match self.config.mode {
CompileFail => self.props.compile_pass,
RunPass => true,
JsDocTest => true,
Ui => self.props.compile_pass,
Incremental => {
let revision = self.revision
.expect("incremental tests require a list of revisions");
if revision.starts_with("rpass") || revision.starts_with("rfail") {
true
} else if revision.starts_with("cfail") {
// FIXME: would be nice if incremental revs could start with "cpass"
self.props.compile_pass
} else {
panic!("revision name must begin with rpass, rfail, or cfail");
}
}
mode => panic!("unimplemented for mode {:?}", mode),
}
}
fn check_if_test_should_compile(&self, proc_res: &ProcRes) {
if self.should_compile_successfully() {
if !proc_res.status.success() {
self.fatal_proc_rec("test compilation failed although it shouldn't!", proc_res);
}
} else {
if proc_res.status.success() {
self.fatal_proc_rec(
&format!("{} test compiled successfully!", self.config.mode)[..],
proc_res,
);
}
self.check_correct_failure_status(proc_res);
}
}
fn run_cfail_test(&self) {
let proc_res = self.compile_test();
self.check_if_test_should_compile(&proc_res);
self.check_no_compiler_crash(&proc_res);
let output_to_check = self.get_output(&proc_res);
let expected_errors = errors::load_errors(&self.testpaths.file, self.revision);
if !expected_errors.is_empty() {
if !self.props.error_patterns.is_empty() {
self.fatal("both error pattern and expected errors specified");
}
self.check_expected_errors(expected_errors, &proc_res);
} else {
self.check_error_patterns(&output_to_check, &proc_res);
}
self.check_forbid_output(&output_to_check, &proc_res);
}
fn run_rfail_test(&self) {
let proc_res = self.compile_test();
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
let proc_res = self.exec_compiled_test();
// The value our Makefile configures valgrind to return on failure
const VALGRIND_ERR: i32 = 100;
if proc_res.status.code() == Some(VALGRIND_ERR) {
self.fatal_proc_rec("run-fail test isn't valgrind-clean!", &proc_res);
}
let output_to_check = self.get_output(&proc_res);
self.check_correct_failure_status(&proc_res);
self.check_error_patterns(&output_to_check, &proc_res);
}
fn get_output(&self, proc_res: &ProcRes) -> String {
if self.props.check_stdout {
format!("{}{}", proc_res.stdout, proc_res.stderr)
} else {
proc_res.stderr.clone()
}
}
fn check_correct_failure_status(&self, proc_res: &ProcRes) {
let expected_status = Some(self.props.failure_status);
let received_status = proc_res.status.code();
if expected_status != received_status {
self.fatal_proc_rec(
&format!(
"Error: expected failure status ({:?}) but received status {:?}.",
expected_status, received_status
),
proc_res,
);
}
}
fn run_rpass_test(&self) {
let proc_res = self.compile_test();
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
// FIXME(#41968): Move this check to tidy?
let expected_errors = errors::load_errors(&self.testpaths.file, self.revision);
assert!(
expected_errors.is_empty(),
"run-pass tests with expected warnings should be moved to ui/"
);
if !self.props.skip_codegen {
let proc_res = self.exec_compiled_test();
if !proc_res.status.success() {
self.fatal_proc_rec("test run failed!", &proc_res);
}
}
}
fn run_valgrind_test(&self) {
assert!(self.revision.is_none(), "revisions not relevant here");
if self.config.valgrind_path.is_none() {
assert!(!self.config.force_valgrind);
return self.run_rpass_test();
}
let mut proc_res = self.compile_test();
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
let mut new_config = self.config.clone();
new_config.runtool = new_config.valgrind_path.clone();
let new_cx = TestCx {
config: &new_config,
..*self
};
proc_res = new_cx.exec_compiled_test();
if !proc_res.status.success() {
self.fatal_proc_rec("test run failed!", &proc_res);
}
}
fn run_pretty_test(&self) {
if self.props.pp_exact.is_some() {
logv(self.config, "testing for exact pretty-printing".to_owned());
} else {
logv(
self.config,
"testing for converging pretty-printing".to_owned(),
);
}
let rounds = match self.props.pp_exact {
Some(_) => 1,
None => 2,
};
let src = fs::read_to_string(&self.testpaths.file).unwrap();
let mut srcs = vec![src];
let mut round = 0;
while round < rounds {
logv(
self.config,
format!(
"pretty-printing round {} revision {:?}",
round, self.revision
),
);
let read_from = if round == 0 {
ReadFrom::Path
} else {
ReadFrom::Stdin(srcs[round].to_owned())
};
let proc_res = self.print_source(read_from,
&self.props.pretty_mode);
if !proc_res.status.success() {
self.fatal_proc_rec(
&format!(
"pretty-printing failed in round {} revision {:?}",
round, self.revision
),
&proc_res,
);
}
let ProcRes { stdout, .. } = proc_res;
srcs.push(stdout);
round += 1;
}
let mut expected = match self.props.pp_exact {
Some(ref file) => {
let filepath = self.testpaths.file.parent().unwrap().join(file);
fs::read_to_string(&filepath).unwrap()
}
None => srcs[srcs.len() - 2].clone(),
};
let mut actual = srcs[srcs.len() - 1].clone();
if self.props.pp_exact.is_some() {
// Now we have to care about line endings
let cr = "\r".to_owned();
actual = actual.replace(&cr, "").to_owned();
expected = expected.replace(&cr, "").to_owned();
}
self.compare_source(&expected, &actual);
// If we're only making sure that the output matches then just stop here
if self.props.pretty_compare_only {
return;
}
// Finally, let's make sure it actually appears to remain valid code
let proc_res = self.typecheck_source(actual);
if !proc_res.status.success() {
self.fatal_proc_rec("pretty-printed source does not typecheck", &proc_res);
}
if !self.props.pretty_expanded {
return;
}
// additionally, run `--pretty expanded` and try to build it.
let proc_res = self.print_source(ReadFrom::Path, "expanded");
if !proc_res.status.success() {
self.fatal_proc_rec("pretty-printing (expanded) failed", &proc_res);
}
let ProcRes {
stdout: expanded_src,
..
} = proc_res;
let proc_res = self.typecheck_source(expanded_src);
if !proc_res.status.success() {
self.fatal_proc_rec(
"pretty-printed source (expanded) does not typecheck",
&proc_res,
);
}
}
fn print_source(&self, read_from: ReadFrom, pretty_type: &str) -> ProcRes {
let aux_dir = self.aux_output_dir_name();
let input: &str = match read_from {
ReadFrom::Stdin(_) => "-",
ReadFrom::Path => self.testpaths.file.to_str().unwrap(),
};
let mut rustc = Command::new(&self.config.rustc_path);
rustc
.arg(input)
.args(&["-Z", &format!("unpretty={}", pretty_type)])
.args(&["--target", &self.config.target])
.arg("-L")
.arg(&aux_dir)
.args(&self.props.compile_flags)
.envs(self.props.exec_env.clone());
self.maybe_add_external_args(&mut rustc,
self.split_maybe_args(&self.config.target_rustcflags));
let src = match read_from {
ReadFrom::Stdin(src) => Some(src),
ReadFrom::Path => None
};
self.compose_and_run(
rustc,
self.config.compile_lib_path.to_str().unwrap(),
Some(aux_dir.to_str().unwrap()),
src,
)
}
fn compare_source(&self, expected: &str, actual: &str) {
if expected != actual {
self.fatal(&format!(
"pretty-printed source does not match expected source\n\
expected:\n\
------------------------------------------\n\
{}\n\
------------------------------------------\n\
actual:\n\
------------------------------------------\n\
{}\n\
------------------------------------------\n\
\n",
expected, actual)
);
}
}
fn set_revision_flags(&self, cmd: &mut Command) {
if let Some(revision) = self.revision {
// Normalize revisions to be lowercase and replace `-`s with `_`s.
// Otherwise the `--cfg` flag is not valid.
let normalized_revision = revision.to_lowercase().replace("-", "_");
cmd.args(&["--cfg", &normalized_revision]);
}
}
fn typecheck_source(&self, src: String) -> ProcRes {
let mut rustc = Command::new(&self.config.rustc_path);
let out_dir = self.output_base_name().with_extension("pretty-out");
let _ = fs::remove_dir_all(&out_dir);
create_dir_all(&out_dir).unwrap();
let target = if self.props.force_host {
&*self.config.host
} else {
&*self.config.target
};
let aux_dir = self.aux_output_dir_name();
rustc
.arg("-")
.arg("-Zno-codegen")
.arg("--out-dir")
.arg(&out_dir)
.arg(&format!("--target={}", target))
.arg("-L")
.arg(&self.config.build_base)
.arg("-L")
.arg(aux_dir);
self.set_revision_flags(&mut rustc);
self.maybe_add_external_args(&mut rustc,
self.split_maybe_args(&self.config.target_rustcflags));
rustc.args(&self.props.compile_flags);
self.compose_and_run_compiler(rustc, Some(src))
}
fn run_debuginfo_cdb_test(&self) {
assert!(self.revision.is_none(), "revisions not relevant here");
let config = Config {
target_rustcflags: self.cleanup_debug_info_options(&self.config.target_rustcflags),
host_rustcflags: self.cleanup_debug_info_options(&self.config.host_rustcflags),
mode: DebugInfoCdb,
..self.config.clone()
};
let test_cx = TestCx {
config: &config,
..*self
};
test_cx.run_debuginfo_cdb_test_no_opt();
}
fn run_debuginfo_cdb_test_no_opt(&self) {
// compile test file (it should have 'compile-flags:-g' in the header)
let compile_result = self.compile_test();
if !compile_result.status.success() {
self.fatal_proc_rec("compilation failed!", &compile_result);
}
let exe_file = self.make_exe_name();
let prefixes = {
static PREFIXES: &'static [&'static str] = &["cdb", "cdbg"];
// No "native rust support" variation for CDB yet.
PREFIXES
};
// Parse debugger commands etc from test files
let DebuggerCommands {
commands,
check_lines,
breakpoint_lines,
..
} = self.parse_debugger_commands(prefixes);
// https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/debugger-commands
let mut script_str = String::with_capacity(2048);
script_str.push_str("version\n"); // List CDB (and more) version info in test output
script_str.push_str(".nvlist\n"); // List loaded `*.natvis` files, bulk of custom MSVC debug
// Set breakpoints on every line that contains the string "#break"
let source_file_name = self.testpaths.file.file_name().unwrap().to_string_lossy();
for line in &breakpoint_lines {
script_str.push_str(&format!(
"bp `{}:{}`\n",
source_file_name, line
));
}
// Append the other `cdb-command:`s
for line in &commands {
script_str.push_str(line);
script_str.push_str("\n");
}
script_str.push_str("\nqq\n"); // Quit the debugger (including remote debugger, if any)
// Write the script into a file
debug!("script_str = {}", script_str);
self.dump_output_file(&script_str, "debugger.script");
let debugger_script = self.make_out_name("debugger.script");
let cdb_path = &self.config.cdb.as_ref().unwrap();
let mut cdb = Command::new(cdb_path);
cdb
.arg("-lines") // Enable source line debugging.
.arg("-cf").arg(&debugger_script)
.arg(&exe_file);
let debugger_run_result = self.compose_and_run(
cdb,
self.config.run_lib_path.to_str().unwrap(),
None, // aux_path
None // input
);
if !debugger_run_result.status.success() {
self.fatal_proc_rec("Error while running CDB", &debugger_run_result);
}
self.check_debugger_output(&debugger_run_result, &check_lines);
}
fn run_debuginfo_gdb_test(&self) {
assert!(self.revision.is_none(), "revisions not relevant here");
let config = Config {
target_rustcflags: self.cleanup_debug_info_options(&self.config.target_rustcflags),
host_rustcflags: self.cleanup_debug_info_options(&self.config.host_rustcflags),
mode: DebugInfoGdb,
..self.config.clone()
};
let test_cx = TestCx {
config: &config,
..*self
};
test_cx.run_debuginfo_gdb_test_no_opt();
}
fn run_debuginfo_gdb_test_no_opt(&self) {
let prefixes = if self.config.gdb_native_rust {
// GDB with Rust
static PREFIXES: &'static [&'static str] = &["gdb", "gdbr"];
println!("NOTE: compiletest thinks it is using GDB with native rust support");
PREFIXES
} else {
// Generic GDB
static PREFIXES: &'static [&'static str] = &["gdb", "gdbg"];
println!("NOTE: compiletest thinks it is using GDB without native rust support");
PREFIXES
};
let DebuggerCommands {
commands,
check_lines,
breakpoint_lines,
} = self.parse_debugger_commands(prefixes);
let mut cmds = commands.join("\n");
// compile test file (it should have 'compile-flags:-g' in the header)
let compiler_run_result = self.compile_test();
if !compiler_run_result.status.success() {
self.fatal_proc_rec("compilation failed!", &compiler_run_result);
}
let exe_file = self.make_exe_name();
let debugger_run_result;
if is_android_gdb_target(&self.config.target) {
cmds = cmds.replace("run", "continue");
let tool_path = match self.config.android_cross_path.to_str() {
Some(x) => x.to_owned(),
None => self.fatal("cannot find android cross path"),
};
// write debugger script
let mut script_str = String::with_capacity(2048);
script_str.push_str(&format!("set charset {}\n", Self::charset()));
script_str.push_str(&format!("set sysroot {}\n", tool_path));
script_str.push_str(&format!("file {}\n", exe_file.to_str().unwrap()));
script_str.push_str("target remote :5039\n");
script_str.push_str(&format!(
"set solib-search-path \
./{}/stage2/lib/rustlib/{}/lib/\n",
self.config.host, self.config.target
));
for line in &breakpoint_lines {
script_str.push_str(
&format!(
"break {:?}:{}\n",
self.testpaths.file.file_name().unwrap().to_string_lossy(),
*line
)[..],
);
}
script_str.push_str(&cmds);
script_str.push_str("\nquit\n");
debug!("script_str = {}", script_str);
self.dump_output_file(&script_str, "debugger.script");
let adb_path = &self.config.adb_path;
Command::new(adb_path)
.arg("push")
.arg(&exe_file)
.arg(&self.config.adb_test_dir)
.status()
.expect(&format!("failed to exec `{:?}`", adb_path));
Command::new(adb_path)
.args(&["forward", "tcp:5039", "tcp:5039"])
.status()
.expect(&format!("failed to exec `{:?}`", adb_path));
let adb_arg = format!(
"export LD_LIBRARY_PATH={}; \
gdbserver{} :5039 {}/{}",
self.config.adb_test_dir.clone(),
if self.config.target.contains("aarch64") {
"64"
} else {
""
},
self.config.adb_test_dir.clone(),
exe_file.file_name().unwrap().to_str().unwrap()
);
debug!("adb arg: {}", adb_arg);
let mut adb = Command::new(adb_path)
.args(&["shell", &adb_arg])
.stdout(Stdio::piped())
.stderr(Stdio::inherit())
.spawn()
.expect(&format!("failed to exec `{:?}`", adb_path));
// Wait for the gdbserver to print out "Listening on port ..."
// at which point we know that it's started and then we can
// execute the debugger below.
let mut stdout = BufReader::new(adb.stdout.take().unwrap());
let mut line = String::new();
loop {
line.truncate(0);
stdout.read_line(&mut line).unwrap();
if line.starts_with("Listening on port 5039") {
break;
}
}
drop(stdout);
let mut debugger_script = OsString::from("-command=");
debugger_script.push(self.make_out_name("debugger.script"));
let debugger_opts: &[&OsStr] = &[
"-quiet".as_ref(),
"-batch".as_ref(),
"-nx".as_ref(),
&debugger_script,
];
let gdb_path = self.config.gdb.as_ref().unwrap();
let Output {
status,
stdout,
stderr,
} = Command::new(&gdb_path)
.args(debugger_opts)
.output()
.expect(&format!("failed to exec `{:?}`", gdb_path));
let cmdline = {
let mut gdb = Command::new(&format!("{}-gdb", self.config.target));
gdb.args(debugger_opts);
let cmdline = self.make_cmdline(&gdb, "");
logv(self.config, format!("executing {}", cmdline));
cmdline
};
debugger_run_result = ProcRes {
status,
stdout: String::from_utf8(stdout).unwrap(),
stderr: String::from_utf8(stderr).unwrap(),
cmdline,
};
if adb.kill().is_err() {
println!("Adb process is already finished.");
}
} else {
let rust_src_root = self
.config
.find_rust_src_root()
.expect("Could not find Rust source root");
let rust_pp_module_rel_path = Path::new("./src/etc");
let rust_pp_module_abs_path = rust_src_root
.join(rust_pp_module_rel_path)
.to_str()
.unwrap()
.to_owned();
// write debugger script
let mut script_str = String::with_capacity(2048);
script_str.push_str(&format!("set charset {}\n", Self::charset()));
script_str.push_str("show version\n");
match self.config.gdb_version {
Some(version) => {
println!(
"NOTE: compiletest thinks it is using GDB version {}",
version
);
if version > extract_gdb_version("7.4").unwrap() {
// Add the directory containing the pretty printers to
// GDB's script auto loading safe path
script_str.push_str(&format!(
"add-auto-load-safe-path {}\n",
rust_pp_module_abs_path.replace(r"\", r"\\")
));
}
}
_ => {
println!(
"NOTE: compiletest does not know which version of \
GDB it is using"
);
}
}
// The following line actually doesn't have to do anything with
// pretty printing, it just tells GDB to print values on one line:
script_str.push_str("set print pretty off\n");
// Add the pretty printer directory to GDB's source-file search path
script_str.push_str(&format!("directory {}\n", rust_pp_module_abs_path));
// Load the target executable
script_str.push_str(&format!(
"file {}\n",
exe_file.to_str().unwrap().replace(r"\", r"\\")
));
// Force GDB to print values in the Rust format.
if self.config.gdb_native_rust {
script_str.push_str("set language rust\n");
}
// Add line breakpoints
for line in &breakpoint_lines {
script_str.push_str(&format!(
"break '{}':{}\n",
self.testpaths.file.file_name().unwrap().to_string_lossy(),
*line
));
}
script_str.push_str(&cmds);
script_str.push_str("\nquit\n");
debug!("script_str = {}", script_str);
self.dump_output_file(&script_str, "debugger.script");
let mut debugger_script = OsString::from("-command=");
debugger_script.push(self.make_out_name("debugger.script"));
let debugger_opts: &[&OsStr] = &[
"-quiet".as_ref(),
"-batch".as_ref(),
"-nx".as_ref(),
&debugger_script,
];
let mut gdb = Command::new(self.config.gdb.as_ref().unwrap());
gdb.args(debugger_opts)
.env("PYTHONPATH", rust_pp_module_abs_path);
debugger_run_result = self.compose_and_run(
gdb,
self.config.run_lib_path.to_str().unwrap(),
None,
None,
);
}
if !debugger_run_result.status.success() {
self.fatal_proc_rec("gdb failed to execute", &debugger_run_result);
}
self.check_debugger_output(&debugger_run_result, &check_lines);
}
fn run_debuginfo_lldb_test(&self) {
assert!(self.revision.is_none(), "revisions not relevant here");
if self.config.lldb_python_dir.is_none() {
self.fatal("Can't run LLDB test because LLDB's python path is not set.");
}
let config = Config {
target_rustcflags: self.cleanup_debug_info_options(&self.config.target_rustcflags),
host_rustcflags: self.cleanup_debug_info_options(&self.config.host_rustcflags),
mode: DebugInfoLldb,
..self.config.clone()
};
let test_cx = TestCx {
config: &config,
..*self
};
test_cx.run_debuginfo_lldb_test_no_opt();
}
fn run_debuginfo_lldb_test_no_opt(&self) {
// compile test file (it should have 'compile-flags:-g' in the header)
let compile_result = self.compile_test();
if !compile_result.status.success() {
self.fatal_proc_rec("compilation failed!", &compile_result);
}
let exe_file = self.make_exe_name();
match self.config.lldb_version {
Some(ref version) => {
println!(
"NOTE: compiletest thinks it is using LLDB version {}",
version
);
}
_ => {
println!(
"NOTE: compiletest does not know which version of \
LLDB it is using"
);
}
}
let prefixes = if self.config.lldb_native_rust {
static PREFIXES: &'static [&'static str] = &["lldb", "lldbr"];
println!("NOTE: compiletest thinks it is using LLDB with native rust support");
PREFIXES
} else {
static PREFIXES: &'static [&'static str] = &["lldb", "lldbg"];
println!("NOTE: compiletest thinks it is using LLDB without native rust support");
PREFIXES
};
// Parse debugger commands etc from test files
let DebuggerCommands {
commands,
check_lines,
breakpoint_lines,
..
} = self.parse_debugger_commands(prefixes);
// Write debugger script:
// We don't want to hang when calling `quit` while the process is still running
let mut script_str = String::from("settings set auto-confirm true\n");
// Make LLDB emit its version, so we have it documented in the test output
script_str.push_str("version\n");
// Switch LLDB into "Rust mode"
let rust_src_root = self
.config
.find_rust_src_root()
.expect("Could not find Rust source root");
let rust_pp_module_rel_path = Path::new("./src/etc/lldb_rust_formatters.py");
let rust_pp_module_abs_path = rust_src_root
.join(rust_pp_module_rel_path)
.to_str()
.unwrap()
.to_owned();
script_str
.push_str(&format!("command script import {}\n", &rust_pp_module_abs_path[..])[..]);
script_str.push_str("type summary add --no-value ");
script_str.push_str("--python-function lldb_rust_formatters.print_val ");
script_str.push_str("-x \".*\" --category Rust\n");
script_str.push_str("type category enable Rust\n");
// Set breakpoints on every line that contains the string "#break"
let source_file_name = self.testpaths.file.file_name().unwrap().to_string_lossy();
for line in &breakpoint_lines {
script_str.push_str(&format!(
"breakpoint set --file '{}' --line {}\n",
source_file_name, line
));
}
// Append the other commands
for line in &commands {
script_str.push_str(line);
script_str.push_str("\n");
}
// Finally, quit the debugger
script_str.push_str("\nquit\n");
// Write the script into a file
debug!("script_str = {}", script_str);
self.dump_output_file(&script_str, "debugger.script");
let debugger_script = self.make_out_name("debugger.script");
// Let LLDB execute the script via lldb_batchmode.py
let debugger_run_result = self.run_lldb(&exe_file, &debugger_script, &rust_src_root);
if !debugger_run_result.status.success() {
self.fatal_proc_rec("Error while running LLDB", &debugger_run_result);
}
self.check_debugger_output(&debugger_run_result, &check_lines);
}
fn run_lldb(
&self,
test_executable: &Path,
debugger_script: &Path,
rust_src_root: &Path,
) -> ProcRes {
// Prepare the lldb_batchmode which executes the debugger script
let lldb_script_path = rust_src_root.join("src/etc/lldb_batchmode.py");
self.cmd2procres(
Command::new(&self.config.lldb_python)
.arg(&lldb_script_path)
.arg(test_executable)
.arg(debugger_script)
.env("PYTHONPATH", self.config.lldb_python_dir.as_ref().unwrap()),
)
}
fn cmd2procres(&self, cmd: &mut Command) -> ProcRes {
let (status, out, err) = match cmd.output() {
Ok(Output {
status,
stdout,
stderr,
}) => (
status,
String::from_utf8(stdout).unwrap(),
String::from_utf8(stderr).unwrap(),
),
Err(e) => self.fatal(&format!(
"Failed to setup Python process for \
LLDB script: {}",
e
)),
};
self.dump_output(&out, &err);
ProcRes {
status,
stdout: out,
stderr: err,
cmdline: format!("{:?}", cmd),
}
}
fn parse_debugger_commands(&self, debugger_prefixes: &[&str]) -> DebuggerCommands {
let directives = debugger_prefixes
.iter()
.map(|prefix| (format!("{}-command", prefix), format!("{}-check", prefix)))
.collect::<Vec<_>>();
let mut breakpoint_lines = vec![];
let mut commands = vec![];
let mut check_lines = vec![];
let mut counter = 1;
let reader = BufReader::new(File::open(&self.testpaths.file).unwrap());
for line in reader.lines() {
match line {
Ok(line) => {
let line = if line.starts_with("//") {
line[2..].trim_start()
} else {
line.as_str()
};
if line.contains("#break") {
breakpoint_lines.push(counter);
}
for &(ref command_directive, ref check_directive) in &directives {
self.config
.parse_name_value_directive(&line, command_directive)
.map(|cmd| commands.push(cmd));
self.config
.parse_name_value_directive(&line, check_directive)
.map(|cmd| check_lines.push(cmd));
}
}
Err(e) => self.fatal(&format!("Error while parsing debugger commands: {}", e)),
}
counter += 1;
}
DebuggerCommands {
commands,
check_lines,
breakpoint_lines,
}
}
fn cleanup_debug_info_options(&self, options: &Option<String>) -> Option<String> {
if options.is_none() {
return None;
}
// Remove options that are either unwanted (-O) or may lead to duplicates due to RUSTFLAGS.
let options_to_remove = ["-O".to_owned(), "-g".to_owned(), "--debuginfo".to_owned()];
let new_options = self
.split_maybe_args(options)
.into_iter()
.filter(|x| !options_to_remove.contains(x))
.collect::<Vec<String>>();
Some(new_options.join(" "))
}
fn maybe_add_external_args(&self, cmd: &mut Command, args: Vec<String>) {
// Filter out the arguments that should not be added by runtest here.
//
// Notable use-cases are: do not add our optimisation flag if
// `compile-flags: -Copt-level=x` and similar for debug-info level as well.
const OPT_FLAGS: &[&str] = &["-O", "-Copt-level=", /*-C<space>*/"opt-level="];
const DEBUG_FLAGS: &[&str] = &["-g", "-Cdebuginfo=", /*-C<space>*/"debuginfo="];
// FIXME: ideally we would "just" check the `cmd` itself, but it does not allow inspecting
// its arguments. They need to be collected separately. For now I cannot be bothered to
// implement this the "right" way.
let have_opt_flag = self.props.compile_flags.iter().any(|arg| {
OPT_FLAGS.iter().any(|f| arg.starts_with(f))
});
let have_debug_flag = self.props.compile_flags.iter().any(|arg| {
DEBUG_FLAGS.iter().any(|f| arg.starts_with(f))
});
for arg in args {
if OPT_FLAGS.iter().any(|f| arg.starts_with(f)) && have_opt_flag {
continue;
}
if DEBUG_FLAGS.iter().any(|f| arg.starts_with(f)) && have_debug_flag {
continue;
}
cmd.arg(arg);
}
}
fn check_debugger_output(&self, debugger_run_result: &ProcRes, check_lines: &[String]) {
let num_check_lines = check_lines.len();
let mut check_line_index = 0;
for line in debugger_run_result.stdout.lines() {
if check_line_index >= num_check_lines {
break;
}
if check_single_line(line, &(check_lines[check_line_index])[..]) {
check_line_index += 1;
}
}
if check_line_index != num_check_lines && num_check_lines > 0 {
self.fatal_proc_rec(
&format!(
"line not found in debugger output: {}",
check_lines[check_line_index]
),
debugger_run_result,
);
}
fn check_single_line(line: &str, check_line: &str) -> bool {
// Allow check lines to leave parts unspecified (e.g., uninitialized
// bits in the wrong case of an enum) with the notation "[...]".
let line = line.trim();
let check_line = check_line.trim();
let can_start_anywhere = check_line.starts_with("[...]");
let can_end_anywhere = check_line.ends_with("[...]");
let check_fragments: Vec<&str> = check_line
.split("[...]")
.filter(|frag| !frag.is_empty())
.collect();
if check_fragments.is_empty() {
return true;
}
let (mut rest, first_fragment) = if can_start_anywhere {
match line.find(check_fragments[0]) {
Some(pos) => (&line[pos + check_fragments[0].len()..], 1),
None => return false,
}
} else {
(line, 0)
};
for current_fragment in &check_fragments[first_fragment..] {
match rest.find(current_fragment) {
Some(pos) => {
rest = &rest[pos + current_fragment.len()..];
}
None => return false,
}
}
if !can_end_anywhere && !rest.is_empty() {
return false;
}
true
}
}
fn check_error_patterns(&self, output_to_check: &str, proc_res: &ProcRes) {
debug!("check_error_patterns");
if self.props.error_patterns.is_empty() {
if self.props.compile_pass {
return;
} else {
self.fatal(&format!(
"no error pattern specified in {:?}",
self.testpaths.file.display()
));
}
}
let mut missing_patterns: Vec<String> = Vec::new();
for pattern in &self.props.error_patterns {
if output_to_check.contains(pattern.trim()) {
debug!("found error pattern {}", pattern);
} else {
missing_patterns.push(pattern.to_string());
}
}
if missing_patterns.is_empty() {
return;
}
if missing_patterns.len() == 1 {
self.fatal_proc_rec(
&format!("error pattern '{}' not found!", missing_patterns[0]),
proc_res,
);
} else {
for pattern in missing_patterns {
self.error(&format!("error pattern '{}' not found!", pattern));
}
self.fatal_proc_rec("multiple error patterns not found", proc_res);
}
}
fn check_no_compiler_crash(&self, proc_res: &ProcRes) {
match proc_res.status.code() {
Some(101) => self.fatal_proc_rec("compiler encountered internal error", proc_res),
None => self.fatal_proc_rec("compiler terminated by signal", proc_res),
_ => (),
}
}
fn check_forbid_output(&self, output_to_check: &str, proc_res: &ProcRes) {
for pat in &self.props.forbid_output {
if output_to_check.contains(pat) {
self.fatal_proc_rec("forbidden pattern found in compiler output", proc_res);
}
}
}
fn check_expected_errors(&self, expected_errors: Vec<errors::Error>, proc_res: &ProcRes) {
debug!("check_expected_errors: expected_errors={:?} proc_res.status={:?}",
expected_errors, proc_res.status);
if proc_res.status.success()
&& expected_errors
.iter()
.any(|x| x.kind == Some(ErrorKind::Error))
{
self.fatal_proc_rec("process did not return an error status", proc_res);
}
// On Windows, keep all '\' path separators to match the paths reported in the JSON output
// from the compiler
let os_file_name = self.testpaths.file.display().to_string();
// on windows, translate all '\' path separators to '/'
let file_name = format!("{}", self.testpaths.file.display()).replace(r"\", "/");
// If the testcase being checked contains at least one expected "help"
// message, then we'll ensure that all "help" messages are expected.
// Otherwise, all "help" messages reported by the compiler will be ignored.
// This logic also applies to "note" messages.
let expect_help = expected_errors
.iter()
.any(|ee| ee.kind == Some(ErrorKind::Help));
let expect_note = expected_errors
.iter()
.any(|ee| ee.kind == Some(ErrorKind::Note));
// Parse the JSON output from the compiler and extract out the messages.
let actual_errors = json::parse_output(&os_file_name, &proc_res.stderr, proc_res);
let mut unexpected = Vec::new();
let mut found = vec![false; expected_errors.len()];
for actual_error in &actual_errors {
let opt_index = expected_errors.iter().enumerate().position(
|(index, expected_error)| {
!found[index] && actual_error.line_num == expected_error.line_num
&& (expected_error.kind.is_none()
|| actual_error.kind == expected_error.kind)
&& actual_error.msg.contains(&expected_error.msg)
},
);
match opt_index {
Some(index) => {
// found a match, everybody is happy
assert!(!found[index]);
found[index] = true;
}
None => {
if self.is_unexpected_compiler_message(actual_error, expect_help, expect_note) {
self.error(&format!(
"{}:{}: unexpected {}: '{}'",
file_name,
actual_error.line_num,
actual_error
.kind
.as_ref()
.map_or(String::from("message"), |k| k.to_string()),
actual_error.msg
));
unexpected.push(actual_error);
}
}
}
}
let mut not_found = Vec::new();
// anything not yet found is a problem
for (index, expected_error) in expected_errors.iter().enumerate() {
if !found[index] {
self.error(&format!(
"{}:{}: expected {} not found: {}",
file_name,
expected_error.line_num,
expected_error
.kind
.as_ref()
.map_or("message".into(), |k| k.to_string()),
expected_error.msg
));
not_found.push(expected_error);
}
}
if !unexpected.is_empty() || !not_found.is_empty() {
self.error(&format!(
"{} unexpected errors found, {} expected errors not found",
unexpected.len(),
not_found.len()
));
println!("status: {}\ncommand: {}", proc_res.status, proc_res.cmdline);
if !unexpected.is_empty() {
println!("unexpected errors (from JSON output): {:#?}\n", unexpected);
}
if !not_found.is_empty() {
println!("not found errors (from test file): {:#?}\n", not_found);
}
panic!();
}
}
/// Returns `true` if we should report an error about `actual_error`,
/// which did not match any of the expected error. We always require
/// errors/warnings to be explicitly listed, but only require
/// helps/notes if there are explicit helps/notes given.
fn is_unexpected_compiler_message(
&self,
actual_error: &Error,
expect_help: bool,
expect_note: bool,
) -> bool {
match actual_error.kind {
Some(ErrorKind::Help) => expect_help,
Some(ErrorKind::Note) => expect_note,
Some(ErrorKind::Error) | Some(ErrorKind::Warning) => true,
Some(ErrorKind::Suggestion) | None => false,
}
}
fn compile_test(&self) -> ProcRes {
// Only use `make_exe_name` when the test ends up being executed.
let will_execute = match self.config.mode {
RunPass | Ui => self.should_run_successfully(),
Incremental => self.revision.unwrap().starts_with("r"),
RunFail | RunPassValgrind | MirOpt | YkTir |
DebugInfoCdb | DebugInfoGdbLldb | DebugInfoGdb | DebugInfoLldb => true,
_ => false,
};
let output_file = if will_execute {
TargetLocation::ThisFile(self.make_exe_name())
} else {
TargetLocation::ThisDirectory(self.output_base_dir())
};
let mut rustc = self.make_compile_args(&self.testpaths.file, output_file);
rustc.arg("-L").arg(&self.aux_output_dir_name());
match self.config.mode {
CompileFail | Ui => {
// compile-fail and ui tests tend to have tons of unused code as
// it's just testing various pieces of the compile, but we don't
// want to actually assert warnings about all this code. Instead
// let's just ignore unused code warnings by defaults and tests
// can turn it back on if needed.
if !self.config.src_base.ends_with("rustdoc-ui") {
rustc.args(&["-A", "unused"]);
}
}
_ => {}
}
self.compose_and_run_compiler(rustc, None)
}
fn document(&self, out_dir: &Path) -> ProcRes {
if self.props.build_aux_docs {
for rel_ab in &self.props.aux_builds {
let aux_testpaths = self.compute_aux_test_paths(rel_ab);
let aux_props =
self.props
.from_aux_file(&aux_testpaths.file, self.revision, self.config);
let aux_cx = TestCx {
config: self.config,
props: &aux_props,
testpaths: &aux_testpaths,
revision: self.revision,
};
// Create the directory for the stdout/stderr files.
create_dir_all(aux_cx.output_base_dir()).unwrap();
let auxres = aux_cx.document(out_dir);
if !auxres.status.success() {
return auxres;
}
}
}
let aux_dir = self.aux_output_dir_name();
let rustdoc_path = self
.config
.rustdoc_path
.as_ref()
.expect("--rustdoc-path passed");
let mut rustdoc = Command::new(rustdoc_path);
rustdoc
.arg("-L")
.arg(self.config.run_lib_path.to_str().unwrap())
.arg("-L")
.arg(aux_dir)
.arg("-o")
.arg(out_dir)
.arg(&self.testpaths.file)
.args(&self.props.compile_flags);
if let Some(ref linker) = self.config.linker {
rustdoc
.arg("--linker")
.arg(linker)
.arg("-Z")
.arg("unstable-options");
}
self.compose_and_run_compiler(rustdoc, None)
}
fn exec_compiled_test(&self) -> ProcRes {
let env = &self.props.exec_env;
let proc_res = match &*self.config.target {
// This is pretty similar to below, we're transforming:
//
// program arg1 arg2
//
// into
//
// remote-test-client run program:support-lib.so arg1 arg2
//
// The test-client program will upload `program` to the emulator
// along with all other support libraries listed (in this case
// `support-lib.so`. It will then execute the program on the
// emulator with the arguments specified (in the environment we give
// the process) and then report back the same result.
_ if self.config.remote_test_client.is_some() => {
let aux_dir = self.aux_output_dir_name();
let ProcArgs { mut prog, args } = self.make_run_args();
if let Ok(entries) = aux_dir.read_dir() {
for entry in entries {
let entry = entry.unwrap();
if !entry.path().is_file() {
continue;
}
prog.push_str(":");
prog.push_str(entry.path().to_str().unwrap());
}
}
let mut test_client =
Command::new(self.config.remote_test_client.as_ref().unwrap());
test_client
.args(&["run", &prog])
.args(args)
.envs(env.clone());
self.compose_and_run(
test_client,
self.config.run_lib_path.to_str().unwrap(),
Some(aux_dir.to_str().unwrap()),
None,
)
}
_ => {
let aux_dir = self.aux_output_dir_name();
let ProcArgs { prog, args } = self.make_run_args();
let mut program = Command::new(&prog);
program
.args(args)
.current_dir(&self.output_base_dir())
.envs(env.clone());
self.compose_and_run(
program,
self.config.run_lib_path.to_str().unwrap(),
Some(aux_dir.to_str().unwrap()),
None,
)
}
};
if proc_res.status.success() {
// delete the executable after running it to save space.
// it is ok if the deletion failed.
let _ = fs::remove_file(self.make_exe_name());
}
proc_res
}
/// For each `aux-build: foo/bar` annotation, we check to find the
/// file in a `auxiliary` directory relative to the test itself.
fn compute_aux_test_paths(&self, rel_ab: &str) -> TestPaths {
let test_ab = self
.testpaths
.file
.parent()
.expect("test file path has no parent")
.join("auxiliary")
.join(rel_ab);
if !test_ab.exists() {
self.fatal(&format!(
"aux-build `{}` source not found",
test_ab.display()
))
}
TestPaths {
file: test_ab,
relative_dir: self
.testpaths
.relative_dir
.join(self.output_testname_unique())
.join("auxiliary")
.join(rel_ab)
.parent()
.expect("aux-build path has no parent")
.to_path_buf(),
}
}
fn compose_and_run_compiler(&self, mut rustc: Command, input: Option<String>) -> ProcRes {
let aux_dir = self.aux_output_dir_name();
if !self.props.aux_builds.is_empty() {
let _ = fs::remove_dir_all(&aux_dir);
create_dir_all(&aux_dir).unwrap();
}
// Use a Vec instead of a HashMap to preserve original order
let mut extern_priv = self.props.extern_private.clone();
let mut add_extern_priv = |priv_dep: &str, dylib: bool| {
let lib_name = get_lib_name(priv_dep, dylib);
rustc
.arg("--extern-private")
.arg(format!("{}={}", priv_dep, aux_dir.join(lib_name).to_str().unwrap()));
};
for rel_ab in &self.props.aux_builds {
let aux_testpaths = self.compute_aux_test_paths(rel_ab);
let aux_props =
self.props
.from_aux_file(&aux_testpaths.file, self.revision, self.config);
let aux_output = TargetLocation::ThisDirectory(self.aux_output_dir_name());
let aux_cx = TestCx {
config: self.config,
props: &aux_props,
testpaths: &aux_testpaths,
revision: self.revision,
};
// Create the directory for the stdout/stderr files.
create_dir_all(aux_cx.output_base_dir()).unwrap();
let mut aux_rustc = aux_cx.make_compile_args(&aux_testpaths.file, aux_output);
let (dylib, crate_type) = if aux_props.no_prefer_dynamic {
(true, None)
} else if self.config.target.contains("cloudabi")
|| self.config.target.contains("emscripten")
|| (self.config.target.contains("musl")
&& !aux_props.force_host
&& !self.config.host.contains("musl"))
|| self.config.target.contains("wasm32")
|| self.config.target.contains("nvptx")
{
// We primarily compile all auxiliary libraries as dynamic libraries
// to avoid code size bloat and large binaries as much as possible
// for the test suite (otherwise including libstd statically in all
// executables takes up quite a bit of space).
//
// For targets like MUSL or Emscripten, however, there is no support for
// dynamic libraries so we just go back to building a normal library. Note,
// however, that for MUSL if the library is built with `force_host` then
// it's ok to be a dylib as the host should always support dylibs.
(false, Some("lib"))
} else {
(true, Some("dylib"))
};
let trimmed = rel_ab.trim_end_matches(".rs").to_string();
// Normally, every 'extern-private' has a correspodning 'aux-build'
// entry. If so, we remove it from our list of private crates,
// and add an '--extern-private' flag to rustc
if extern_priv.remove_item(&trimmed).is_some() {
add_extern_priv(&trimmed, dylib);
}
if let Some(crate_type) = crate_type {
aux_rustc.args(&["--crate-type", crate_type]);
}
aux_rustc.arg("-L").arg(&aux_dir);
let auxres = aux_cx.compose_and_run(
aux_rustc,
aux_cx.config.compile_lib_path.to_str().unwrap(),
Some(aux_dir.to_str().unwrap()),
None,
);
if !auxres.status.success() {
self.fatal_proc_rec(
&format!(
"auxiliary build of {:?} failed to compile: ",
aux_testpaths.file.display()
),
&auxres,
);
}
}
// Add any '--extern-private' entries without a matching
// 'aux-build'
for private_lib in extern_priv {
add_extern_priv(&private_lib, true);
}
self.props.unset_rustc_env.clone()
.iter()
.fold(&mut rustc, |rustc, v| rustc.env_remove(v));
rustc.envs(self.props.rustc_env.clone());
self.compose_and_run(
rustc,
self.config.compile_lib_path.to_str().unwrap(),
Some(aux_dir.to_str().unwrap()),
input,
)
}
fn compose_and_run(
&self,
mut command: Command,
lib_path: &str,
aux_path: Option<&str>,
input: Option<String>,
) -> ProcRes {
let cmdline = {
let cmdline = self.make_cmdline(&command, lib_path);
logv(self.config, format!("executing {}", cmdline));
cmdline
};
command
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.stdin(Stdio::piped());
// Need to be sure to put both the lib_path and the aux path in the dylib
// search path for the child.
let mut path = env::split_paths(&env::var_os(dylib_env_var()).unwrap_or(OsString::new()))
.collect::<Vec<_>>();
if let Some(p) = aux_path {
path.insert(0, PathBuf::from(p))
}
path.insert(0, PathBuf::from(lib_path));
// Add the new dylib search path var
let newpath = env::join_paths(&path).unwrap();
command.env(dylib_env_var(), newpath);
let mut child = disable_error_reporting(|| command.spawn())
.expect(&format!("failed to exec `{:?}`", &command));
if let Some(input) = input {
child
.stdin
.as_mut()
.unwrap()
.write_all(input.as_bytes())
.unwrap();
}
let Output {
status,
stdout,
stderr,
} = read2_abbreviated(child).expect("failed to read output");
let result = ProcRes {
status,
stdout: String::from_utf8_lossy(&stdout).into_owned(),
stderr: String::from_utf8_lossy(&stderr).into_owned(),
cmdline,
};
self.dump_output(&result.stdout, &result.stderr);
result
}
fn make_compile_args(&self, input_file: &Path, output_file: TargetLocation) -> Command {
let is_rustdoc = self.config.src_base.ends_with("rustdoc-ui") ||
self.config.src_base.ends_with("rustdoc-js");
let mut rustc = if !is_rustdoc {
Command::new(&self.config.rustc_path)
} else {
Command::new(
&self
.config
.rustdoc_path
.clone()
.expect("no rustdoc built yet"),
)
};
// FIXME Why is -L here?
rustc.arg(input_file); //.arg("-L").arg(&self.config.build_base);
// Use a single thread for efficiency and a deterministic error message order
rustc.arg("-Zthreads=1");
// Optionally prevent default --target if specified in test compile-flags.
let custom_target = self
.props
.compile_flags
.iter()
.any(|x| x.starts_with("--target"));
if !custom_target {
let target = if self.props.force_host {
&*self.config.host
} else {
&*self.config.target
};
rustc.arg(&format!("--target={}", target));
}
self.set_revision_flags(&mut rustc);
if !is_rustdoc {
if let Some(ref incremental_dir) = self.props.incremental_dir {
rustc.args(&["-C", &format!("incremental={}", incremental_dir.display())]);
rustc.args(&["-Z", "incremental-verify-ich"]);
rustc.args(&["-Z", "incremental-queries"]);
}
if self.config.mode == CodegenUnits {
rustc.args(&["-Z", "human_readable_cgu_names"]);
}
}
match self.config.mode {
CompileFail | Incremental => {
// If we are extracting and matching errors in the new
// fashion, then you want JSON mode. Old-skool error
// patterns still match the raw compiler output.
if self.props.error_patterns.is_empty() {
rustc.args(&["--error-format", "json"]);
}
if !self.props.disable_ui_testing_normalization {
rustc.arg("-Zui-testing");
}
}
RunPass | Ui => {
if !self
.props
.compile_flags
.iter()
.any(|s| s.starts_with("--error-format"))
{
rustc.args(&["--error-format", "json"]);
}
if !self.props.disable_ui_testing_normalization {
rustc.arg("-Zui-testing");
}
}
MirOpt => {
rustc.args(&[
"-Zdump-mir=all",
"-Zmir-opt-level=3",
"-Zdump-mir-exclude-pass-number",
]);
let mir_dump_dir = self.get_mir_dump_dir();
let _ = fs::remove_dir_all(&mir_dump_dir);
create_dir_all(mir_dump_dir.as_path()).unwrap();
let mut dir_opt = "-Zdump-mir-dir=".to_string();
dir_opt.push_str(mir_dump_dir.to_str().unwrap());
debug!("dir_opt: {:?}", dir_opt);
rustc.arg(dir_opt);
}
YkTir => {
rustc.args(&[ "--emit", "yk-tir"]);
let mir_dump_dir = self.get_mir_dump_dir();
let _ = fs::remove_dir_all(&mir_dump_dir);
create_dir_all(mir_dump_dir.as_path()).unwrap();
},
RunFail | RunPassValgrind | Pretty | DebugInfoCdb | DebugInfoGdbLldb | DebugInfoGdb
| DebugInfoLldb | Codegen | Rustdoc | RunMake | CodegenUnits | JsDocTest | Assembly => {
// do not use JSON output
}
}
if self.props.skip_codegen {
assert!(
!self
.props
.compile_flags
.iter()
.any(|s| s.starts_with("--emit"))
);
rustc.args(&["--emit", "metadata"]);
}
if !is_rustdoc {
if self.config.target == "wasm32-unknown-unknown" {
// rustc.arg("-g"); // get any backtrace at all on errors
} else if !self.props.no_prefer_dynamic {
rustc.args(&["-C", "prefer-dynamic"]);
}
}
match output_file {
TargetLocation::ThisFile(path) => {
rustc.arg("-o").arg(path);
}
TargetLocation::ThisDirectory(path) => {
if is_rustdoc {
// `rustdoc` uses `-o` for the output directory.
rustc.arg("-o").arg(path);
} else {
rustc.arg("--out-dir").arg(path);
}
}
}
match self.config.compare_mode {
Some(CompareMode::Nll) => {
rustc.args(&["-Zborrowck=mir"]);
}
Some(CompareMode::Polonius) => {
rustc.args(&["-Zpolonius", "-Zborrowck=mir"]);
}
None => {}
}
if self.props.force_host {
self.maybe_add_external_args(&mut rustc,
self.split_maybe_args(&self.config.host_rustcflags));
} else {
self.maybe_add_external_args(&mut rustc,
self.split_maybe_args(&self.config.target_rustcflags));
if !is_rustdoc {
if let Some(ref linker) = self.config.linker {
rustc.arg(format!("-Clinker={}", linker));
}
}
}
// Use dynamic musl for tests because static doesn't allow creating dylibs
if self.config.host.contains("musl") {
rustc.arg("-Ctarget-feature=-crt-static");
}
rustc.args(&self.props.compile_flags);
rustc
}
fn make_exe_name(&self) -> PathBuf {
// Using a single letter here to keep the path length down for
// Windows. Some test names get very long. rustc creates `rcgu`
// files with the module name appended to it which can more than
// double the length.
let mut f = self.output_base_dir().join("a");
// FIXME: This is using the host architecture exe suffix, not target!
if self.config.target.contains("emscripten") {
f = f.with_extra_extension("js");
} else if self.config.target.contains("wasm32") {
f = f.with_extra_extension("wasm");
} else if !env::consts::EXE_SUFFIX.is_empty() {
f = f.with_extra_extension(env::consts::EXE_SUFFIX);
}
f
}
fn make_run_args(&self) -> ProcArgs {
// If we've got another tool to run under (valgrind),
// then split apart its command
let mut args = self.split_maybe_args(&self.config.runtool);
// If this is emscripten, then run tests under nodejs
if self.config.target.contains("emscripten") {
if let Some(ref p) = self.config.nodejs {
args.push(p.clone());
} else {
self.fatal("no NodeJS binary found (--nodejs)");
}
// If this is otherwise wasm, then run tests under nodejs with our
// shim
} else if self.config.target.contains("wasm32") {
if let Some(ref p) = self.config.nodejs {
args.push(p.clone());
} else {
self.fatal("no NodeJS binary found (--nodejs)");
}
let src = self.config.src_base
.parent().unwrap() // chop off `run-pass`
.parent().unwrap() // chop off `test`
.parent().unwrap(); // chop off `src`
args.push(src.join("src/etc/wasm32-shim.js").display().to_string());
}
let exe_file = self.make_exe_name();
// FIXME (#9639): This needs to handle non-utf8 paths
args.push(exe_file.to_str().unwrap().to_owned());
// Add the arguments in the run_flags directive
args.extend(self.split_maybe_args(&self.props.run_flags));
let prog = args.remove(0);
ProcArgs { prog, args }
}
fn split_maybe_args(&self, argstr: &Option<String>) -> Vec<String> {
match *argstr {
Some(ref s) => s
.split(' ')
.filter_map(|s| {
if s.chars().all(|c| c.is_whitespace()) {
None
} else {
Some(s.to_owned())
}
})
.collect(),
None => Vec::new(),
}
}
fn make_cmdline(&self, command: &Command, libpath: &str) -> String {
use crate::util;
// Linux and mac don't require adjusting the library search path
if cfg!(unix) {
format!("{:?}", command)
} else {
// Build the LD_LIBRARY_PATH variable as it would be seen on the command line
// for diagnostic purposes
fn lib_path_cmd_prefix(path: &str) -> String {
format!(
"{}=\"{}\"",
util::lib_path_env_var(),
util::make_new_path(path)
)
}
format!("{} {:?}", lib_path_cmd_prefix(libpath), command)
}
}
fn dump_output(&self, out: &str, err: &str) {
let revision = if let Some(r) = self.revision {
format!("{}.", r)
} else {
String::new()
};
self.dump_output_file(out, &format!("{}out", revision));
self.dump_output_file(err, &format!("{}err", revision));
self.maybe_dump_to_stdout(out, err);
}
fn dump_output_file(&self, out: &str, extension: &str) {
let outfile = self.make_out_name(extension);
fs::write(&outfile, out).unwrap();
}
/// Creates a filename for output with the given extension.
/// E.g., `/.../testname.revision.mode/testname.extension`.
fn make_out_name(&self, extension: &str) -> PathBuf {
self.output_base_name().with_extension(extension)
}
/// Gets the directory where auxiliary files are written.
/// E.g., `/.../testname.revision.mode/auxiliary/`.
fn aux_output_dir_name(&self) -> PathBuf {
self.output_base_dir()
.join("auxiliary")
.with_extra_extension(self.config.mode.disambiguator())
}
/// Generates a unique name for the test, such as `testname.revision.mode`.
fn output_testname_unique(&self) -> PathBuf {
output_testname_unique(self.config, self.testpaths, self.safe_revision())
}
/// The revision, ignored for incremental compilation since it wants all revisions in
/// the same directory.
fn
|
(&self) -> Option<&str> {
if self.config.mode == Incremental {
None
} else {
self.revision
}
}
/// Gets the absolute path to the directory where all output for the given
/// test/revision should reside.
/// E.g., `/path/to/build/host-triple/test/ui/relative/testname.revision.mode/`.
fn output_base_dir(&self) -> PathBuf {
output_base_dir(self.config, self.testpaths, self.safe_revision())
}
/// Gets the absolute path to the base filename used as output for the given
/// test/revision.
/// E.g., `/.../relative/testname.revision.mode/testname`.
fn output_base_name(&self) -> PathBuf {
output_base_name(self.config, self.testpaths, self.safe_revision())
}
fn maybe_dump_to_stdout(&self, out: &str, err: &str) {
if self.config.verbose {
println!("------{}------------------------------", "stdout");
println!("{}", out);
println!("------{}------------------------------", "stderr");
println!("{}", err);
println!("------------------------------------------");
}
}
fn error(&self, err: &str) {
match self.revision {
Some(rev) => println!("\nerror in revision `{}`: {}", rev, err),
None => println!("\nerror: {}", err),
}
}
fn fatal(&self, err: &str) -> ! {
self.error(err);
error!("fatal error, panic: {:?}", err);
panic!("fatal error");
}
fn fatal_proc_rec(&self, err: &str, proc_res: &ProcRes) -> ! {
self.error(err);
proc_res.fatal(None);
}
// codegen tests (using FileCheck)
fn compile_test_and_save_ir(&self) -> ProcRes {
let aux_dir = self.aux_output_dir_name();
let output_file = TargetLocation::ThisDirectory(self.output_base_dir());
let mut rustc = self.make_compile_args(&self.testpaths.file, output_file);
rustc.arg("-L").arg(aux_dir).arg("--emit=llvm-ir");
self.compose_and_run_compiler(rustc, None)
}
fn compile_test_and_save_assembly(&self) -> (ProcRes, PathBuf) {
// This works with both `--emit asm` (as default output name for the assembly)
// and `ptx-linker` because the latter can write output at requested location.
let output_path = self.output_base_name().with_extension("s");
let output_file = TargetLocation::ThisFile(output_path.clone());
let mut rustc = self.make_compile_args(&self.testpaths.file, output_file);
rustc.arg("-L").arg(self.aux_output_dir_name());
match self.props.assembly_output.as_ref().map(AsRef::as_ref) {
Some("emit-asm") => {
rustc.arg("--emit=asm");
}
Some("ptx-linker") => {
// No extra flags needed.
}
Some(_) => self.fatal("unknown 'assembly-output' header"),
None => self.fatal("missing 'assembly-output' header"),
}
(self.compose_and_run_compiler(rustc, None), output_path)
}
fn verify_with_filecheck(&self, output: &Path) -> ProcRes {
let mut filecheck = Command::new(self.config.llvm_filecheck.as_ref().unwrap());
filecheck
.arg("--input-file")
.arg(output)
.arg(&self.testpaths.file);
// It would be more appropriate to make most of the arguments configurable through
// a comment-attribute similar to `compile-flags`. For example, --check-prefixes is a very
// useful flag.
//
// For now, though…
if let Some(rev) = self.revision {
let prefixes = format!("CHECK,{}", rev);
filecheck.args(&["--check-prefixes", &prefixes]);
}
self.compose_and_run(filecheck, "", None, None)
}
fn run_codegen_test(&self) {
if self.config.llvm_filecheck.is_none() {
self.fatal("missing --llvm-filecheck");
}
let proc_res = self.compile_test_and_save_ir();
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
let output_path = self.output_base_name().with_extension("ll");
let proc_res = self.verify_with_filecheck(&output_path);
if !proc_res.status.success() {
self.fatal_proc_rec("verification with 'FileCheck' failed", &proc_res);
}
}
fn run_assembly_test(&self) {
if self.config.llvm_filecheck.is_none() {
self.fatal("missing --llvm-filecheck");
}
let (proc_res, output_path) = self.compile_test_and_save_assembly();
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
let proc_res = self.verify_with_filecheck(&output_path);
if !proc_res.status.success() {
self.fatal_proc_rec("verification with 'FileCheck' failed", &proc_res);
}
}
fn charset() -> &'static str {
// FreeBSD 10.1 defaults to GDB 6.1.1 which doesn't support "auto" charset
if cfg!(target_os = "freebsd") {
"ISO-8859-1"
} else {
"UTF-8"
}
}
fn run_rustdoc_test(&self) {
assert!(self.revision.is_none(), "revisions not relevant here");
let out_dir = self.output_base_dir();
let _ = fs::remove_dir_all(&out_dir);
create_dir_all(&out_dir).unwrap();
let proc_res = self.document(&out_dir);
if !proc_res.status.success() {
self.fatal_proc_rec("rustdoc failed!", &proc_res);
}
if self.props.check_test_line_numbers_match {
self.check_rustdoc_test_option(proc_res);
} else {
let root = self.config.find_rust_src_root().unwrap();
let res = self.cmd2procres(
Command::new(&self.config.docck_python)
.arg(root.join("src/etc/htmldocck.py"))
.arg(out_dir)
.arg(&self.testpaths.file),
);
if !res.status.success() {
self.fatal_proc_rec("htmldocck failed!", &res);
}
}
}
fn get_lines<P: AsRef<Path>>(
&self,
path: &P,
mut other_files: Option<&mut Vec<String>>,
) -> Vec<usize> {
let content = fs::read_to_string(&path).unwrap();
let mut ignore = false;
content
.lines()
.enumerate()
.filter_map(|(line_nb, line)| {
if (line.trim_start().starts_with("pub mod ")
|| line.trim_start().starts_with("mod "))
&& line.ends_with(';')
{
if let Some(ref mut other_files) = other_files {
other_files.push(line.rsplit("mod ").next().unwrap().replace(";", ""));
}
None
} else {
let sline = line.split("///").last().unwrap_or("");
let line = sline.trim_start();
if line.starts_with("```") {
if ignore {
ignore = false;
None
} else {
ignore = true;
Some(line_nb + 1)
}
} else {
None
}
}
})
.collect()
}
fn check_rustdoc_test_option(&self, res: ProcRes) {
let mut other_files = Vec::new();
let mut files: HashMap<String, Vec<usize>> = HashMap::new();
let cwd = env::current_dir().unwrap();
files.insert(
self.testpaths
.file
.strip_prefix(&cwd)
.unwrap_or(&self.testpaths.file)
.to_str()
.unwrap()
.replace('\\', "/"),
self.get_lines(&self.testpaths.file, Some(&mut other_files)),
);
for other_file in other_files {
let mut path = self.testpaths.file.clone();
path.set_file_name(&format!("{}.rs", other_file));
files.insert(
path.strip_prefix(&cwd)
.unwrap_or(&path)
.to_str()
.unwrap()
.replace('\\', "/"),
self.get_lines(&path, None),
);
}
let mut tested = 0;
for _ in res
.stdout
.split('\n')
.filter(|s| s.starts_with("test "))
.inspect(|s| {
let tmp: Vec<&str> = s.split(" - ").collect();
if tmp.len() == 2 {
let path = tmp[0].rsplit("test ").next().unwrap();
if let Some(ref mut v) = files.get_mut(&path.replace('\\', "/")) {
tested += 1;
let mut iter = tmp[1].split("(line ");
iter.next();
let line = iter
.next()
.unwrap_or(")")
.split(')')
.next()
.unwrap_or("0")
.parse()
.unwrap_or(0);
if let Ok(pos) = v.binary_search(&line) {
v.remove(pos);
} else {
self.fatal_proc_rec(
&format!("Not found doc test: \"{}\" in \"{}\":{:?}", s, path, v),
&res,
);
}
}
}
}) {}
if tested == 0 {
self.fatal_proc_rec(&format!("No test has been found... {:?}", files), &res);
} else {
for (entry, v) in &files {
if !v.is_empty() {
self.fatal_proc_rec(
&format!(
"Not found test at line{} \"{}\":{:?}",
if v.len() > 1 { "s" } else { "" },
entry,
v
),
&res,
);
}
}
}
}
fn run_codegen_units_test(&self) {
assert!(self.revision.is_none(), "revisions not relevant here");
let proc_res = self.compile_test();
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
self.check_no_compiler_crash(&proc_res);
const PREFIX: &'static str = "MONO_ITEM ";
const CGU_MARKER: &'static str = "@@";
let actual: Vec<MonoItem> = proc_res
.stdout
.lines()
.filter(|line| line.starts_with(PREFIX))
.map(|line| str_to_mono_item(line, true))
.collect();
let expected: Vec<MonoItem> = errors::load_errors(&self.testpaths.file, None)
.iter()
.map(|e| str_to_mono_item(&e.msg[..], false))
.collect();
let mut missing = Vec::new();
let mut wrong_cgus = Vec::new();
for expected_item in &expected {
let actual_item_with_same_name = actual.iter().find(|ti| ti.name == expected_item.name);
if let Some(actual_item) = actual_item_with_same_name {
if !expected_item.codegen_units.is_empty() &&
// Also check for codegen units
expected_item.codegen_units != actual_item.codegen_units
{
wrong_cgus.push((expected_item.clone(), actual_item.clone()));
}
} else {
missing.push(expected_item.string.clone());
}
}
let unexpected: Vec<_> = actual
.iter()
.filter(|acgu| !expected.iter().any(|ecgu| acgu.name == ecgu.name))
.map(|acgu| acgu.string.clone())
.collect();
if !missing.is_empty() {
missing.sort();
println!("\nThese items should have been contained but were not:\n");
for item in &missing {
println!("{}", item);
}
println!("\n");
}
if !unexpected.is_empty() {
let sorted = {
let mut sorted = unexpected.clone();
sorted.sort();
sorted
};
println!("\nThese items were contained but should not have been:\n");
for item in sorted {
println!("{}", item);
}
println!("\n");
}
if !wrong_cgus.is_empty() {
wrong_cgus.sort_by_key(|pair| pair.0.name.clone());
println!("\nThe following items were assigned to wrong codegen units:\n");
for &(ref expected_item, ref actual_item) in &wrong_cgus {
println!("{}", expected_item.name);
println!(
" expected: {}",
codegen_units_to_str(&expected_item.codegen_units)
);
println!(
" actual: {}",
codegen_units_to_str(&actual_item.codegen_units)
);
println!("");
}
}
if !(missing.is_empty() && unexpected.is_empty() && wrong_cgus.is_empty()) {
panic!();
}
#[derive(Clone, Eq, PartialEq)]
struct MonoItem {
name: String,
codegen_units: HashSet<String>,
string: String,
}
// [MONO_ITEM] name [@@ (cgu)+]
fn str_to_mono_item(s: &str, cgu_has_crate_disambiguator: bool) -> MonoItem {
let s = if s.starts_with(PREFIX) {
(&s[PREFIX.len()..]).trim()
} else {
s.trim()
};
let full_string = format!("{}{}", PREFIX, s);
let parts: Vec<&str> = s
.split(CGU_MARKER)
.map(str::trim)
.filter(|s| !s.is_empty())
.collect();
let name = parts[0].trim();
let cgus = if parts.len() > 1 {
let cgus_str = parts[1];
cgus_str
.split(' ')
.map(str::trim)
.filter(|s| !s.is_empty())
.map(|s| {
if cgu_has_crate_disambiguator {
remove_crate_disambiguator_from_cgu(s)
} else {
s.to_string()
}
})
.collect()
} else {
HashSet::new()
};
MonoItem {
name: name.to_owned(),
codegen_units: cgus,
string: full_string,
}
}
fn codegen_units_to_str(cgus: &HashSet<String>) -> String {
let mut cgus: Vec<_> = cgus.iter().collect();
cgus.sort();
let mut string = String::new();
for cgu in cgus {
string.push_str(&cgu[..]);
string.push_str(" ");
}
string
}
// Given a cgu-name-prefix of the form <crate-name>.<crate-disambiguator> or
// the form <crate-name1>.<crate-disambiguator1>-in-<crate-name2>.<crate-disambiguator2>,
// remove all crate-disambiguators.
fn remove_crate_disambiguator_from_cgu(cgu: &str) -> String {
lazy_static! {
static ref RE: Regex = Regex::new(
r"^[^\.]+(?P<d1>\.[[:alnum:]]+)(-in-[^\.]+(?P<d2>\.[[:alnum:]]+))?"
).unwrap();
}
let captures = RE.captures(cgu).unwrap_or_else(|| {
panic!("invalid cgu name encountered: {}", cgu)
});
let mut new_name = cgu.to_owned();
if let Some(d2) = captures.name("d2") {
new_name.replace_range(d2.start() .. d2.end(), "");
}
let d1 = captures.name("d1").unwrap();
new_name.replace_range(d1.start() .. d1.end(), "");
new_name
}
}
fn init_incremental_test(&self) {
// (See `run_incremental_test` for an overview of how incremental tests work.)
// Before any of the revisions have executed, create the
// incremental workproduct directory. Delete any old
// incremental work products that may be there from prior
// runs.
let incremental_dir = self.incremental_dir();
if incremental_dir.exists() {
// Canonicalizing the path will convert it to the //?/ format
// on Windows, which enables paths longer than 260 character
let canonicalized = incremental_dir.canonicalize().unwrap();
fs::remove_dir_all(canonicalized).unwrap();
}
fs::create_dir_all(&incremental_dir).unwrap();
if self.config.verbose {
print!(
"init_incremental_test: incremental_dir={}",
incremental_dir.display()
);
}
}
fn run_incremental_test(&self) {
// Basic plan for a test incremental/foo/bar.rs:
// - load list of revisions rpass1, cfail2, rpass3
// - each should begin with `rpass`, `cfail`, or `rfail`
// - if `rpass`, expect compile and execution to succeed
// - if `cfail`, expect compilation to fail
// - if `rfail`, expect execution to fail
// - create a directory build/foo/bar.incremental
// - compile foo/bar.rs with -Z incremental=.../foo/bar.incremental and -C rpass1
// - because name of revision starts with "rpass", expect success
// - compile foo/bar.rs with -Z incremental=.../foo/bar.incremental and -C cfail2
// - because name of revision starts with "cfail", expect an error
// - load expected errors as usual, but filter for those that end in `[rfail2]`
// - compile foo/bar.rs with -Z incremental=.../foo/bar.incremental and -C rpass3
// - because name of revision starts with "rpass", expect success
// - execute build/foo/bar.exe and save output
//
// FIXME -- use non-incremental mode as an oracle? That doesn't apply
// to #[rustc_dirty] and clean tests I guess
let revision = self
.revision
.expect("incremental tests require a list of revisions");
// Incremental workproduct directory should have already been created.
let incremental_dir = self.incremental_dir();
assert!(
incremental_dir.exists(),
"init_incremental_test failed to create incremental dir"
);
// Add an extra flag pointing at the incremental directory.
let mut revision_props = self.props.clone();
revision_props.incremental_dir = Some(incremental_dir);
let revision_cx = TestCx {
config: self.config,
props: &revision_props,
testpaths: self.testpaths,
revision: self.revision,
};
if self.config.verbose {
print!(
"revision={:?} revision_props={:#?}",
revision, revision_props
);
}
if revision.starts_with("rpass") {
revision_cx.run_rpass_test();
} else if revision.starts_with("rfail") {
revision_cx.run_rfail_test();
} else if revision.starts_with("cfail") {
revision_cx.run_cfail_test();
} else {
revision_cx.fatal("revision name must begin with rpass, rfail, or cfail");
}
}
/// Directory where incremental work products are stored.
fn incremental_dir(&self) -> PathBuf {
self.output_base_name().with_extension("inc")
}
fn run_rmake_test(&self) {
let cwd = env::current_dir().unwrap();
let src_root = self
.config
.src_base
.parent()
.unwrap()
.parent()
.unwrap()
.parent()
.unwrap();
let src_root = cwd.join(&src_root);
let tmpdir = cwd.join(self.output_base_name());
if tmpdir.exists() {
self.aggressive_rm_rf(&tmpdir).unwrap();
}
create_dir_all(&tmpdir).unwrap();
let host = &self.config.host;
let make = if host.contains("dragonfly")
|| host.contains("freebsd")
|| host.contains("netbsd")
|| host.contains("openbsd")
{
"gmake"
} else {
"make"
};
let mut cmd = Command::new(make);
cmd.current_dir(&self.testpaths.file)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.env("TARGET", &self.config.target)
.env("PYTHON", &self.config.docck_python)
.env("S", src_root)
.env("RUST_BUILD_STAGE", &self.config.stage_id)
.env("RUSTC", cwd.join(&self.config.rustc_path))
.env("TMPDIR", &tmpdir)
.env("LD_LIB_PATH_ENVVAR", dylib_env_var())
.env("HOST_RPATH_DIR", cwd.join(&self.config.compile_lib_path))
.env("TARGET_RPATH_DIR", cwd.join(&self.config.run_lib_path))
.env("LLVM_COMPONENTS", &self.config.llvm_components)
.env("LLVM_CXXFLAGS", &self.config.llvm_cxxflags)
// We for sure don't want these tests to run in parallel, so make
// sure they don't have access to these vars if we run via `make`
// at the top level
.env_remove("MAKEFLAGS")
.env_remove("MFLAGS")
.env_remove("CARGO_MAKEFLAGS");
if let Some(ref rustdoc) = self.config.rustdoc_path {
cmd.env("RUSTDOC", cwd.join(rustdoc));
}
if let Some(ref node) = self.config.nodejs {
cmd.env("NODE", node);
}
if let Some(ref linker) = self.config.linker {
cmd.env("RUSTC_LINKER", linker);
}
if let Some(ref clang) = self.config.run_clang_based_tests_with {
cmd.env("CLANG", clang);
}
if let Some(ref filecheck) = self.config.llvm_filecheck {
cmd.env("LLVM_FILECHECK", filecheck);
}
if let Some(ref llvm_bin_dir) = self.config.llvm_bin_dir {
cmd.env("LLVM_BIN_DIR", llvm_bin_dir);
}
// We don't want RUSTFLAGS set from the outside to interfere with
// compiler flags set in the test cases:
cmd.env_remove("RUSTFLAGS");
// Use dynamic musl for tests because static doesn't allow creating dylibs
if self.config.host.contains("musl") {
cmd.env("RUSTFLAGS", "-Ctarget-feature=-crt-static")
.env("IS_MUSL_HOST", "1");
}
if self.config.target.contains("msvc") && self.config.cc != "" {
// We need to pass a path to `lib.exe`, so assume that `cc` is `cl.exe`
// and that `lib.exe` lives next to it.
let lib = Path::new(&self.config.cc).parent().unwrap().join("lib.exe");
// MSYS doesn't like passing flags of the form `/foo` as it thinks it's
// a path and instead passes `C:\msys64\foo`, so convert all
// `/`-arguments to MSVC here to `-` arguments.
let cflags = self
.config
.cflags
.split(' ')
.map(|s| s.replace("/", "-"))
.collect::<Vec<_>>()
.join(" ");
cmd.env("IS_MSVC", "1")
.env("IS_WINDOWS", "1")
.env("MSVC_LIB", format!("'{}' -nologo", lib.display()))
.env("CC", format!("'{}' {}", self.config.cc, cflags))
.env("CXX", format!("'{}'", &self.config.cxx));
} else {
cmd.env("CC", format!("{} {}", self.config.cc, self.config.cflags))
.env("CXX", format!("{} {}", self.config.cxx, self.config.cflags))
.env("AR", &self.config.ar);
if self.config.target.contains("windows") {
cmd.env("IS_WINDOWS", "1");
}
}
let output = cmd
.spawn()
.and_then(read2_abbreviated)
.expect("failed to spawn `make`");
if !output.status.success() {
let res = ProcRes {
status: output.status,
stdout: String::from_utf8_lossy(&output.stdout).into_owned(),
stderr: String::from_utf8_lossy(&output.stderr).into_owned(),
cmdline: format!("{:?}", cmd),
};
self.fatal_proc_rec("make failed", &res);
}
}
fn aggressive_rm_rf(&self, path: &Path) -> io::Result<()> {
for e in path.read_dir()? {
let entry = e?;
let path = entry.path();
if entry.file_type()?.is_dir() {
self.aggressive_rm_rf(&path)?;
} else {
// Remove readonly files as well on windows (by default we can't)
fs::remove_file(&path).or_else(|e| {
if cfg!(windows) && e.kind() == io::ErrorKind::PermissionDenied {
let mut meta = entry.metadata()?.permissions();
meta.set_readonly(false);
fs::set_permissions(&path, meta)?;
fs::remove_file(&path)
} else {
Err(e)
}
})?;
}
}
fs::remove_dir(path)
}
fn run_js_doc_test(&self) {
if let Some(nodejs) = &self.config.nodejs {
let out_dir = self.output_base_dir();
self.document(&out_dir);
let root = self.config.find_rust_src_root().unwrap();
let res = self.cmd2procres(
Command::new(&nodejs)
.arg(root.join("src/tools/rustdoc-js/tester.js"))
.arg(out_dir.parent().expect("no parent"))
.arg(&self.testpaths.file.file_stem().expect("couldn't get file stem")),
);
if !res.status.success() {
self.fatal_proc_rec("rustdoc-js test failed!", &res);
}
} else {
self.fatal("no nodeJS");
}
}
fn run_ui_test(&self) {
// if the user specified a format in the ui test
// print the output to the stderr file, otherwise extract
// the rendered error messages from json and print them
let explicit = self
.props
.compile_flags
.iter()
.any(|s| s.contains("--error-format"));
let proc_res = self.compile_test();
self.check_if_test_should_compile(&proc_res);
let expected_stderr = self.load_expected_output(UI_STDERR);
let expected_stdout = self.load_expected_output(UI_STDOUT);
let expected_fixed = self.load_expected_output(UI_FIXED);
let normalized_stdout =
self.normalize_output(&proc_res.stdout, &self.props.normalize_stdout);
let stderr = if explicit {
proc_res.stderr.clone()
} else {
json::extract_rendered(&proc_res.stderr)
};
let normalized_stderr = self.normalize_output(&stderr, &self.props.normalize_stderr);
let mut errors = 0;
if !self.props.dont_check_compiler_stdout {
errors += self.compare_output("stdout", &normalized_stdout, &expected_stdout);
}
if !self.props.dont_check_compiler_stderr {
errors += self.compare_output("stderr", &normalized_stderr, &expected_stderr);
}
let modes_to_prune = vec![CompareMode::Nll];
self.prune_duplicate_outputs(&modes_to_prune);
if self.config.compare_mode.is_some() {
// don't test rustfix with nll right now
} else if self.config.rustfix_coverage {
// Find out which tests have `MachineApplicable` suggestions but are missing
// `run-rustfix` or `run-rustfix-only-machine-applicable` headers.
//
// This will return an empty `Vec` in case the executed test file has a
// `compile-flags: --error-format=xxxx` header with a value other than `json`.
let suggestions = get_suggestions_from_json(
&proc_res.stderr,
&HashSet::new(),
Filter::MachineApplicableOnly
).unwrap_or_default();
if suggestions.len() > 0
&& !self.props.run_rustfix
&& !self.props.rustfix_only_machine_applicable {
let mut coverage_file_path = self.config.build_base.clone();
coverage_file_path.push("rustfix_missing_coverage.txt");
debug!("coverage_file_path: {}", coverage_file_path.display());
let mut file = OpenOptions::new()
.create(true)
.append(true)
.open(coverage_file_path.as_path())
.expect("could not create or open file");
if let Err(_) = writeln!(file, "{}", self.testpaths.file.display()) {
panic!("couldn't write to {}", coverage_file_path.display());
}
}
} else if self.props.run_rustfix {
// Apply suggestions from rustc to the code itself
let unfixed_code = self
.load_expected_output_from_path(&self.testpaths.file)
.unwrap();
let suggestions = get_suggestions_from_json(
&proc_res.stderr,
&HashSet::new(),
if self.props.rustfix_only_machine_applicable {
Filter::MachineApplicableOnly
} else {
Filter::Everything
},
).unwrap();
let fixed_code = apply_suggestions(&unfixed_code, &suggestions).expect(&format!(
"failed to apply suggestions for {:?} with rustfix",
self.testpaths.file
));
errors += self.compare_output("fixed", &fixed_code, &expected_fixed);
} else if !expected_fixed.is_empty() {
panic!(
"the `// run-rustfix` directive wasn't found but a `*.fixed` \
file was found"
);
}
if errors > 0 {
println!("To update references, rerun the tests and pass the `--bless` flag");
let relative_path_to_file = self
.testpaths
.relative_dir
.join(self.testpaths.file.file_name().unwrap());
println!(
"To only update this specific test, also pass `--test-args {}`",
relative_path_to_file.display(),
);
self.fatal_proc_rec(
&format!("{} errors occurred comparing output.", errors),
&proc_res,
);
}
let expected_errors = errors::load_errors(&self.testpaths.file, self.revision);
if self.should_run_successfully() {
let proc_res = self.exec_compiled_test();
if !proc_res.status.success() {
self.fatal_proc_rec("test run failed!", &proc_res);
}
}
debug!("run_ui_test: explicit={:?} config.compare_mode={:?} expected_errors={:?} \
proc_res.status={:?} props.error_patterns={:?}",
explicit, self.config.compare_mode, expected_errors, proc_res.status,
self.props.error_patterns);
if !explicit && self.config.compare_mode.is_none() {
if !proc_res.status.success() {
if !self.props.error_patterns.is_empty() {
// "// error-pattern" comments
self.check_error_patterns(&proc_res.stderr, &proc_res);
} else {
// "//~ERROR comments"
self.check_expected_errors(expected_errors, &proc_res);
}
}
}
if self.props.run_rustfix && self.config.compare_mode.is_none() {
// And finally, compile the fixed code and make sure it both
// succeeds and has no diagnostics.
let mut rustc = self.make_compile_args(
&self.testpaths.file.with_extension(UI_FIXED),
TargetLocation::ThisFile(self.make_exe_name()),
);
rustc.arg("-L").arg(&self.aux_output_dir_name());
let res = self.compose_and_run_compiler(rustc, None);
if !res.status.success() {
self.fatal_proc_rec("failed to compile fixed code", &res);
}
if !res.stderr.is_empty() && !self.props.rustfix_only_machine_applicable {
self.fatal_proc_rec("fixed code is still producing diagnostics", &res);
}
}
}
fn run_mir_opt_test(&self) {
let proc_res = self.compile_test();
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
let proc_res = self.exec_compiled_test();
if !proc_res.status.success() {
self.fatal_proc_rec("test run failed!", &proc_res);
}
self.check_mir_dump();
}
fn check_mir_dump(&self) {
let test_file_contents = fs::read_to_string(&self.testpaths.file).unwrap();
if let Some(idx) = test_file_contents.find("// END RUST SOURCE") {
let (_, tests_text) = test_file_contents.split_at(idx + "// END_RUST SOURCE".len());
let tests_text_str = String::from(tests_text);
let mut curr_test: Option<&str> = None;
let mut curr_test_contents = vec![ExpectedLine::Elision];
for l in tests_text_str.lines() {
debug!("line: {:?}", l);
if l.starts_with("// START ") {
let (_, t) = l.split_at("// START ".len());
curr_test = Some(t);
} else if l.starts_with("// END") {
let (_, t) = l.split_at("// END ".len());
if Some(t) != curr_test {
panic!("mismatched START END test name");
}
self.compare_mir_test_output(curr_test.unwrap(), &curr_test_contents);
curr_test = None;
curr_test_contents.clear();
curr_test_contents.push(ExpectedLine::Elision);
} else if l.is_empty() {
// ignore
} else if l.starts_with("//") && l.split_at("//".len()).1.trim() == "..." {
curr_test_contents.push(ExpectedLine::Elision)
} else if l.starts_with("// ") {
let (_, test_content) = l.split_at("// ".len());
curr_test_contents.push(ExpectedLine::Text(test_content));
}
}
}
}
fn check_mir_test_timestamp(&self, test_name: &str, output_file: &Path) {
let t = |file| fs::metadata(file).unwrap().modified().unwrap();
let source_file = &self.testpaths.file;
let output_time = t(output_file);
let source_time = t(source_file);
if source_time > output_time {
debug!(
"source file time: {:?} output file time: {:?}",
source_time, output_time
);
panic!(
"test source file `{}` is newer than potentially stale output file `{}`.",
source_file.display(),
test_name
);
}
}
fn compare_mir_test_output(&self, test_name: &str, expected_content: &[ExpectedLine<&str>]) {
let mut output_file = PathBuf::new();
output_file.push(self.get_mir_dump_dir());
output_file.push(test_name);
debug!("comparing the contests of: {:?}", output_file);
debug!("with: {:?}", expected_content);
if !output_file.exists() {
panic!(
"Output file `{}` from test does not exist",
output_file.into_os_string().to_string_lossy()
);
}
self.check_mir_test_timestamp(test_name, &output_file);
let dumped_string = fs::read_to_string(&output_file).unwrap();
let mut dumped_lines = dumped_string
.lines()
.map(|l| nocomment_mir_line(l))
.filter(|l| !l.is_empty());
let mut expected_lines = expected_content
.iter()
.filter(|&l| {
if let &ExpectedLine::Text(l) = l {
!l.is_empty()
} else {
true
}
})
.peekable();
let compare = |expected_line, dumped_line| {
let e_norm = normalize_mir_line(expected_line);
let d_norm = normalize_mir_line(dumped_line);
debug!("found: {:?}", d_norm);
debug!("expected: {:?}", e_norm);
e_norm == d_norm
};
let error = |expected_line, extra_msg| {
let normalize_all = dumped_string
.lines()
.map(nocomment_mir_line)
.filter(|l| !l.is_empty())
.collect::<Vec<_>>()
.join("\n");
let f = |l: &ExpectedLine<_>| match l {
&ExpectedLine::Elision => "... (elided)".into(),
&ExpectedLine::Text(t) => t,
};
let expected_content = expected_content
.iter()
.map(|l| f(l))
.collect::<Vec<_>>()
.join("\n");
panic!(
"Did not find expected line, error: {}\n\
Expected Line: {:?}\n\
Test Name: {}\n\
Expected:\n{}\n\
Actual:\n{}",
extra_msg, expected_line, test_name, expected_content, normalize_all
);
};
// We expect each non-empty line to appear consecutively, non-consecutive lines
// must be separated by at least one Elision
let mut start_block_line = None;
while let Some(dumped_line) = dumped_lines.next() {
match expected_lines.next() {
Some(&ExpectedLine::Text(expected_line)) => {
let normalized_expected_line = normalize_mir_line(expected_line);
if normalized_expected_line.contains(":{") {
start_block_line = Some(expected_line);
}
if !compare(expected_line, dumped_line) {
error!("{:?}", start_block_line);
error(
expected_line,
format!(
"Mismatch in lines\n\
Current block: {}\n\
Actual Line: {:?}",
start_block_line.unwrap_or("None"),
dumped_line
),
);
}
}
Some(&ExpectedLine::Elision) => {
// skip any number of elisions in a row.
while let Some(&&ExpectedLine::Elision) = expected_lines.peek() {
expected_lines.next();
}
if let Some(&ExpectedLine::Text(expected_line)) = expected_lines.next() {
let mut found = compare(expected_line, dumped_line);
if found {
continue;
}
while let Some(dumped_line) = dumped_lines.next() {
found = compare(expected_line, dumped_line);
if found {
break;
}
}
if !found {
error(expected_line, "ran out of mir dump to match against".into());
}
}
}
None => {}
}
}
}
fn get_mir_dump_dir(&self) -> PathBuf {
let mut mir_dump_dir = PathBuf::from(self.config.build_base.as_path());
debug!("input_file: {:?}", self.testpaths.file);
mir_dump_dir.push(&self.testpaths.relative_dir);
mir_dump_dir.push(self.testpaths.file.file_stem().unwrap());
mir_dump_dir
}
fn normalize_output(&self, output: &str, custom_rules: &[(String, String)]) -> String {
let cflags = self.props.compile_flags.join(" ");
let json = cflags.contains("--error-format json")
|| cflags.contains("--error-format pretty-json")
|| cflags.contains("--error-format=json")
|| cflags.contains("--error-format=pretty-json");
let mut normalized = output.to_string();
let mut normalize_path = |from: &Path, to: &str| {
let mut from = from.display().to_string();
if json {
from = from.replace("\\", "\\\\");
}
normalized = normalized.replace(&from, to);
};
let parent_dir = self.testpaths.file.parent().unwrap();
normalize_path(parent_dir, "$DIR");
// Paths into the libstd/libcore
let src_dir = self.config.src_base.parent().unwrap().parent().unwrap();
normalize_path(src_dir, "$SRC_DIR");
// Paths into the build directory
let test_build_dir = &self.config.build_base;
let parent_build_dir = test_build_dir.parent().unwrap().parent().unwrap().parent().unwrap();
// eg. /home/user/rust/build/x86_64-unknown-linux-gnu/test/ui
normalize_path(test_build_dir, "$TEST_BUILD_DIR");
// eg. /home/user/rust/build
normalize_path(parent_build_dir, "$BUILD_DIR");
// Paths into lib directory.
normalize_path(&parent_build_dir.parent().unwrap().join("lib"), "$LIB_DIR");
if json {
// escaped newlines in json strings should be readable
// in the stderr files. There's no point int being correct,
// since only humans process the stderr files.
// Thus we just turn escaped newlines back into newlines.
normalized = normalized.replace("\\n", "\n");
}
// If there are `$SRC_DIR` normalizations with line and column numbers, then replace them
// with placeholders as we do not want tests needing updated when compiler source code
// changes.
// eg. $SRC_DIR/libcore/mem.rs:323:14 becomes $SRC_DIR/libcore/mem.rs:LL:COL
normalized = Regex::new("SRC_DIR(.+):\\d+:\\d+").unwrap()
.replace_all(&normalized, "SRC_DIR$1:LL:COL").into_owned();
normalized = Self::normalize_platform_differences(&normalized);
normalized = normalized.replace("\t", "\\t"); // makes tabs visible
// Remove test annotations like `//~ ERROR text` from the output,
// since they duplicate actual errors and make the output hard to read.
normalized = Regex::new("\\s*//(\\[.*\\])?~.*").unwrap()
.replace_all(&normalized, "").into_owned();
for rule in custom_rules {
let re = Regex::new(&rule.0).expect("bad regex in custom normalization rule");
normalized = re.replace_all(&normalized, &rule.1[..]).into_owned();
}
normalized
}
/// Normalize output differences across platforms. Generally changes Windows output to be more
/// Unix-like.
///
/// Replaces backslashes in paths with forward slashes, and replaces CRLF line endings
/// with LF.
fn normalize_platform_differences(output: &str) -> String {
lazy_static! {
/// Used to find Windows paths.
///
/// It's not possible to detect paths in the error messages generally, but this is a
/// decent enough heuristic.
static ref PATH_BACKSLASH_RE: Regex = Regex::new(r#"(?x)
(?:
# Match paths that don't include spaces.
(?:\\[\pL\pN\.\-_']+)+\.\pL+
|
# If the path starts with a well-known root, then allow spaces.
\$(?:DIR|SRC_DIR|TEST_BUILD_DIR|BUILD_DIR|LIB_DIR)(?:\\[\pL\pN\.\-_' ]+)+
)"#
).unwrap();
}
let output = output.replace(r"\\", r"\");
PATH_BACKSLASH_RE.replace_all(&output, |caps: &Captures<'_>| {
println!("{}", &caps[0]);
caps[0].replace(r"\", "/")
}).replace("\r\n", "\n")
}
fn expected_output_path(&self, kind: &str) -> PathBuf {
let mut path = expected_output_path(
&self.testpaths,
self.revision,
&self.config.compare_mode,
kind,
);
if !path.exists() {
if let Some(CompareMode::Polonius) = self.config.compare_mode {
path = expected_output_path(
&self.testpaths,
self.revision,
&Some(CompareMode::Nll),
kind,
);
}
}
if !path.exists() {
path = expected_output_path(&self.testpaths, self.revision, &None, kind);
}
path
}
fn load_expected_output(&self, kind: &str) -> String {
let path = self.expected_output_path(kind);
if path.exists() {
match self.load_expected_output_from_path(&path) {
Ok(x) => x,
Err(x) => self.fatal(&x),
}
} else {
String::new()
}
}
fn load_expected_output_from_path(&self, path: &Path) -> Result<String, String> {
fs::read_to_string(path).map_err(|err| {
format!("failed to load expected output from `{}`: {}", path.display(), err)
})
}
fn delete_file(&self, file: &PathBuf) {
if let Err(e) = fs::remove_file(file) {
self.fatal(&format!(
"failed to delete `{}`: {}",
file.display(),
e,
));
}
}
fn compare_output(&self, kind: &str, actual: &str, expected: &str) -> usize {
if actual == expected {
return 0;
}
if !self.config.bless {
if expected.is_empty() {
println!("normalized {}:\n{}\n", kind, actual);
} else {
println!("diff of {}:\n", kind);
let diff_results = make_diff(expected, actual, 3);
for result in diff_results {
let mut line_number = result.line_number;
for line in result.lines {
match line {
DiffLine::Expected(e) => {
println!("-\t{}", e);
line_number += 1;
}
DiffLine::Context(c) => {
println!("{}\t{}", line_number, c);
line_number += 1;
}
DiffLine::Resulting(r) => {
println!("+\t{}", r);
}
}
}
println!("");
}
}
}
let mode = self.config.compare_mode.as_ref().map_or("", |m| m.to_str());
let output_file = self
.output_base_name()
.with_extra_extension(self.revision.unwrap_or(""))
.with_extra_extension(mode)
.with_extra_extension(kind);
let mut files = vec![output_file];
if self.config.bless {
files.push(expected_output_path(
self.testpaths,
self.revision,
&self.config.compare_mode,
kind,
));
}
for output_file in &files {
if actual.is_empty() {
self.delete_file(output_file);
} else if let Err(err) = fs::write(&output_file, &actual) {
self.fatal(&format!(
"failed to write {} to `{}`: {}",
kind,
output_file.display(),
err,
));
}
}
println!("\nThe actual {0} differed from the expected {0}.", kind);
for output_file in files {
println!("Actual {} saved to {}", kind, output_file.display());
}
if self.config.bless {
0
} else {
1
}
}
fn prune_duplicate_output(&self, mode: CompareMode, kind: &str, canon_content: &str) {
let examined_path = expected_output_path(
&self.testpaths,
self.revision,
&Some(mode),
kind,
);
let examined_content = self
.load_expected_output_from_path(&examined_path)
.unwrap_or_else(|_| String::new());
if examined_path.exists() && canon_content == &examined_content {
self.delete_file(&examined_path);
}
}
fn prune_duplicate_outputs(&self, modes: &[CompareMode]) {
if self.config.bless {
for kind in UI_EXTENSIONS {
let canon_comparison_path = expected_output_path(
&self.testpaths,
self.revision,
&None,
kind,
);
if let Ok(canon) = self.load_expected_output_from_path(&canon_comparison_path) {
for mode in modes {
self.prune_duplicate_output(mode.clone(), kind, &canon);
}
}
}
}
}
fn create_stamp(&self) {
let stamp = crate::stamp(&self.config, self.testpaths, self.revision);
fs::write(&stamp, compute_stamp_hash(&self.config)).unwrap();
}
fn run_yk_tir_test(&self) {
let proc_res = self.compile_test();
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
self.check_yk_tir_dump();
}
fn check_yk_tir_dump(&self) {
let test_file_contents = fs::read_to_string(&self.testpaths.file).unwrap();
if let Some(idx) = test_file_contents.find("// END RUST SOURCE") {
let (_, test_text) = test_file_contents.split_at(idx + "// END_RUST SOURCE".len());
let mut test_lines = vec![ExpectedLine::Elision];
for l in test_text.lines() {
if l.is_empty() {
// ignore
} else if l.starts_with("//") && l.split_at("//".len()).1.trim() == "..." {
test_lines.push(ExpectedLine::Elision)
} else if l.starts_with("// ") {
let (_, test_content) = l.split_at("// ".len());
test_lines.push(ExpectedLine::Text(test_content));
}
}
// From here on out, we are re-using parts of the `MirOpt` test suite's matcher. The
// "exe_name" here is actually a textual MIR dump because we invoked rustc with:
// `--emit yk-mir`.
self.compare_mir_test_output(self.make_exe_name().to_str().unwrap(), &test_lines);
} else {
panic!("no expected outcome in test file!");
}
}
}
struct ProcArgs {
prog: String,
args: Vec<String>,
}
pub struct ProcRes {
status: ExitStatus,
stdout: String,
stderr: String,
cmdline: String,
}
impl ProcRes {
pub fn fatal(&self, err: Option<&str>) -> ! {
if let Some(e) = err {
println!("\nerror: {}", e);
}
print!(
"\
status: {}\n\
command: {}\n\
stdout:\n\
------------------------------------------\n\
{}\n\
------------------------------------------\n\
stderr:\n\
------------------------------------------\n\
{}\n\
------------------------------------------\n\
\n",
self.status, self.cmdline,
json::extract_rendered(&self.stdout),
json::extract_rendered(&self.stderr),
);
// Use resume_unwind instead of panic!() to prevent a panic message + backtrace from
// compiletest, which is unnecessary noise.
std::panic::resume_unwind(Box::new(()));
}
}
enum TargetLocation {
ThisFile(PathBuf),
ThisDirectory(PathBuf),
}
#[derive(Clone, PartialEq, Eq)]
enum ExpectedLine<T: AsRef<str>> {
Elision,
Text(T),
}
impl<T> fmt::Debug for ExpectedLine<T>
where
T: AsRef<str> + fmt::Debug,
{
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
if let &ExpectedLine::Text(ref t) = self {
write!(formatter, "{:?}", t)
} else {
write!(formatter, "\"...\" (Elision)")
}
}
}
fn normalize_mir_line(line: &str) -> String {
nocomment_mir_line(line).replace(char::is_whitespace, "")
}
fn nocomment_mir_line(line: &str) -> &str {
if let Some(idx) = line.find("//") {
let (l, _) = line.split_at(idx);
l.trim_end()
} else {
line
}
}
fn read2_abbreviated(mut child: Child) -> io::Result<Output> {
use crate::read2::read2;
use std::mem::replace;
const HEAD_LEN: usize = 160 * 1024;
const TAIL_LEN: usize = 256 * 1024;
enum ProcOutput {
Full(Vec<u8>),
Abbreviated {
head: Vec<u8>,
skipped: usize,
tail: Box<[u8]>,
},
}
impl ProcOutput {
fn extend(&mut self, data: &[u8]) {
let new_self = match *self {
ProcOutput::Full(ref mut bytes) => {
bytes.extend_from_slice(data);
let new_len = bytes.len();
if new_len <= HEAD_LEN + TAIL_LEN {
return;
}
let tail = bytes.split_off(new_len - TAIL_LEN).into_boxed_slice();
let head = replace(bytes, Vec::new());
let skipped = new_len - HEAD_LEN - TAIL_LEN;
ProcOutput::Abbreviated {
head,
skipped,
tail,
}
}
ProcOutput::Abbreviated {
ref mut skipped,
ref mut tail,
..
} => {
*skipped += data.len();
if data.len() <= TAIL_LEN {
tail[..data.len()].copy_from_slice(data);
tail.rotate_left(data.len());
} else {
tail.copy_from_slice(&data[(data.len() - TAIL_LEN)..]);
}
return;
}
};
*self = new_self;
}
fn into_bytes(self) -> Vec<u8> {
match self {
ProcOutput::Full(bytes) => bytes,
ProcOutput::Abbreviated {
mut head,
skipped,
tail,
} => {
write!(&mut head, "\n\n<<<<<< SKIPPED {} BYTES >>>>>>\n\n", skipped).unwrap();
head.extend_from_slice(&tail);
head
}
}
}
}
let mut stdout = ProcOutput::Full(Vec::new());
let mut stderr = ProcOutput::Full(Vec::new());
drop(child.stdin.take());
read2(
child.stdout.take().unwrap(),
child.stderr.take().unwrap(),
&mut |is_stdout, data, _| {
if is_stdout { &mut stdout } else { &mut stderr }.extend(data);
data.clear();
},
)?;
let status = child.wait()?;
Ok(Output {
status,
stdout: stdout.into_bytes(),
stderr: stderr.into_bytes(),
})
}
#[cfg(test)]
mod tests {
use super::TestCx;
#[test]
fn normalize_platform_differences() {
assert_eq!(
TestCx::normalize_platform_differences(r"$DIR\foo.rs"),
"$DIR/foo.rs"
);
assert_eq!(
TestCx::normalize_platform_differences(r"$BUILD_DIR\..\parser.rs"),
"$BUILD_DIR/../parser.rs"
);
assert_eq!(
TestCx::normalize_platform_differences(r"$DIR\bar.rs hello\nworld"),
r"$DIR/bar.rs hello\nworld"
);
assert_eq!(
TestCx::normalize_platform_differences(r"either bar\baz.rs or bar\baz\mod.rs"),
r"either bar/baz.rs or bar/baz/mod.rs",
);
assert_eq!(
TestCx::normalize_platform_differences(r"`.\some\path.rs`"),
r"`./some/path.rs`",
);
assert_eq!(
TestCx::normalize_platform_differences(r"`some\path.rs`"),
r"`some/path.rs`",
);
assert_eq!(
TestCx::normalize_platform_differences(r"$DIR\path-with-dashes.rs"),
r"$DIR/path-with-dashes.rs"
);
assert_eq!(
TestCx::normalize_platform_differences(r"$DIR\path_with_underscores.rs"),
r"$DIR/path_with_underscores.rs",
);
assert_eq!(
TestCx::normalize_platform_differences(r"$DIR\foo.rs:12:11"), "$DIR/foo.rs:12:11",
);
assert_eq!(
TestCx::normalize_platform_differences(r"$DIR\path with spaces 'n' quotes"),
"$DIR/path with spaces 'n' quotes",
);
assert_eq!(
TestCx::normalize_platform_differences(r"$DIR\file_with\no_extension"),
"$DIR/file_with/no_extension",
);
assert_eq!(TestCx::normalize_platform_differences(r"\n"), r"\n");
assert_eq!(TestCx::normalize_platform_differences(r"{ \n"), r"{ \n");
assert_eq!(TestCx::normalize_platform_differences(r"`\]`"), r"`\]`");
assert_eq!(TestCx::normalize_platform_differences(r#""\{""#), r#""\{""#);
assert_eq!(
TestCx::normalize_platform_differences(r#"write!(&mut v, "Hello\n")"#),
r#"write!(&mut v, "Hello\n")"#
);
assert_eq!(
TestCx::normalize_platform_differences(r#"println!("test\ntest")"#),
r#"println!("test\ntest")"#,
);
}
}
|
safe_revision
|
users-test.js
|
import { setupTest } from 'ember-qunit';
import { module, test } from 'qunit';
import fetch from 'fetch';
import setupMirage from '../helpers/setup-mirage';
module('Mirage | Users', function (hooks) {
setupTest(hooks);
setupMirage(hooks);
module('GET /api/v1/users/:id', function () {
test('returns 404 for unknown users', async function (assert) {
let response = await fetch('/api/v1/users/foo');
assert.equal(response.status, 404);
let responsePayload = await response.json();
assert.deepEqual(responsePayload, { errors: [{ detail: 'Not Found' }] });
});
test('returns a user object for known users', async function (assert) {
let user = this.server.create('user', { name: 'John Doe' });
let response = await fetch(`/api/v1/users/${user.login}`);
assert.equal(response.status, 200);
|
let responsePayload = await response.json();
assert.deepEqual(responsePayload, {
user: {
id: 1,
avatar: 'https://avatars1.githubusercontent.com/u/14631425?v=4',
login: 'john-doe',
name: 'John Doe',
url: 'https://github.com/john-doe',
},
});
});
});
module('PUT /api/v1/users/:id', function () {
test('updates the user with a new email address', async function (assert) {
let user = this.server.create('user', { email: '[email protected]' });
this.server.create('mirage-session', { user });
let body = JSON.stringify({ user: { email: '[email protected]' } });
let response = await fetch(`/api/v1/users/${user.id}`, { method: 'PUT', body });
assert.equal(response.status, 200);
let responsePayload = await response.json();
assert.deepEqual(responsePayload, { ok: true });
user.reload();
assert.strictEqual(user.email, '[email protected]');
assert.strictEqual(user.emailVerified, false);
assert.strictEqual(user.emailVerificationToken, 'secret123');
});
test('returns 403 when not logged in', async function (assert) {
let user = this.server.create('user', { email: '[email protected]' });
let body = JSON.stringify({ user: { email: '[email protected]' } });
let response = await fetch(`/api/v1/users/${user.id}`, { method: 'PUT', body });
assert.equal(response.status, 403);
let responsePayload = await response.json();
assert.deepEqual(responsePayload, { errors: [{ detail: 'must be logged in to perform that action' }] });
user.reload();
assert.strictEqual(user.email, '[email protected]');
});
test('returns 400 when requesting the wrong user id', async function (assert) {
let user = this.server.create('user', { email: '[email protected]' });
this.server.create('mirage-session', { user });
let body = JSON.stringify({ user: { email: '[email protected]' } });
let response = await fetch(`/api/v1/users/wrong-id`, { method: 'PUT', body });
assert.equal(response.status, 400);
let responsePayload = await response.json();
assert.deepEqual(responsePayload, { errors: [{ detail: 'current user does not match requested user' }] });
user.reload();
assert.strictEqual(user.email, '[email protected]');
});
test('returns 400 when sending an invalid payload', async function (assert) {
let user = this.server.create('user', { email: '[email protected]' });
this.server.create('mirage-session', { user });
let body = JSON.stringify({});
let response = await fetch(`/api/v1/users/${user.id}`, { method: 'PUT', body });
assert.equal(response.status, 400);
let responsePayload = await response.json();
assert.deepEqual(responsePayload, { errors: [{ detail: 'invalid json request' }] });
user.reload();
assert.strictEqual(user.email, '[email protected]');
});
test('returns 400 when sending an empty email address', async function (assert) {
let user = this.server.create('user', { email: '[email protected]' });
this.server.create('mirage-session', { user });
let body = JSON.stringify({ user: { email: '' } });
let response = await fetch(`/api/v1/users/${user.id}`, { method: 'PUT', body });
assert.equal(response.status, 400);
let responsePayload = await response.json();
assert.deepEqual(responsePayload, { errors: [{ detail: 'empty email rejected' }] });
user.reload();
assert.strictEqual(user.email, '[email protected]');
});
});
module('PUT /api/v1/users/:id/resend', function () {
test('returns `ok`', async function (assert) {
let user = this.server.create('user');
this.server.create('mirage-session', { user });
let response = await fetch(`/api/v1/users/${user.id}/resend`, { method: 'PUT' });
assert.equal(response.status, 200);
let responsePayload = await response.json();
assert.deepEqual(responsePayload, { ok: true });
});
test('returns 403 when not logged in', async function (assert) {
let user = this.server.create('user');
let response = await fetch(`/api/v1/users/${user.id}/resend`, { method: 'PUT' });
assert.equal(response.status, 403);
let responsePayload = await response.json();
assert.deepEqual(responsePayload, { errors: [{ detail: 'must be logged in to perform that action' }] });
});
test('returns 400 when requesting the wrong user id', async function (assert) {
let user = this.server.create('user');
this.server.create('mirage-session', { user });
let response = await fetch(`/api/v1/users/wrong-id/resend`, { method: 'PUT' });
assert.equal(response.status, 400);
let responsePayload = await response.json();
assert.deepEqual(responsePayload, { errors: [{ detail: 'current user does not match requested user' }] });
});
});
});
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.