hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 9,
"code_window": [
"golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\n",
"golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\n",
"golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=\n",
"golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=\n",
"golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=\n",
"golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=\n",
"golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=\n",
"golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=\n",
"golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=\n",
"golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n",
"golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n",
"golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "go.sum",
"type": "replace",
"edit_start_line_idx": 1221
} | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
cloud.google.com/go/kms v1.15.5 h1:pj1sRfut2eRbD9pFRjNnPNg/CzJPuQAzUujMIM1vVeM=
cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/pubsub v1.33.0 h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g=
cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8=
cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk=
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw=
github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
github.com/Jeffail/gabs/v2 v2.5.1 h1:ANfZYjpMlfTTKebycu4X1AgkVWumFVDYQl7JwOr4mDk=
github.com/Jeffail/gabs/v2 v2.5.1/go.mod h1:xCn81vdHKxFUuWWAaD5jCTQDNPBMh5pPs9IJ+NcziBI=
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8=
github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
github.com/Shopify/sarama v1.29.0 h1:ARid8o8oieau9XrHI55f/L3EoRAhm9px6sonbD7yuUE=
github.com/Shopify/sarama v1.29.0/go.mod h1:2QpgD79wpdAESqNQMxNc0KYMkycd4slxGdV3TWSVqrU=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/YangKeao/ldap/v3 v3.4.5-0.20230421065457-369a3bab1117 h1:+OqGGFc2YHFd82aSHmjlILVt1t4JWJjrNIfV8cVEPow=
github.com/YangKeao/ldap/v3 v3.4.5-0.20230421065457-369a3bab1117/go.mod h1:bMGIq3AGbytbaMwf8wdv5Phdxz0FWHTIYMSzyrYgnQs=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA=
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581 h1:Q/yk4z/cHUVZfgTqtD09qeYBxHwshQAjVRX73qs8UH0=
github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apache/skywalking-eyes v0.4.0 h1:O13kdRU6FCEZevfD01mdhTgCZLLfPZIQ0GXZrLl7FpQ=
github.com/apache/skywalking-eyes v0.4.0/go.mod h1:WblDbBgOLsLN0FJEBa9xj6PhuUA/J6spKYVTG4/F8Ls=
github.com/apache/thrift v0.0.0-20181112125854-24918abba929/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714 h1:Jz3KVLYY5+JO7rDiX0sAuRGtuv2vG01r17Y9nLMWNUw=
github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=
github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
github.com/aws/aws-sdk-go v1.30.19/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.44.204/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4=
github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/bazelbuild/buildtools v0.0.0-20230926111657-7d855c59baeb h1:4k69c5E7Sa7jmNtv9itBHYA4Z5pfurInuRrtgohxZeA=
github.com/bazelbuild/buildtools v0.0.0-20230926111657-7d855c59baeb/go.mod h1:689QdV3hBP7Vo9dJMmzhoYIyo/9iMhEmHkJcnaPRCbo=
github.com/bazelbuild/rules_go v0.42.1-0.20231101215950-df20c987afcb h1:CPn7VHaV3czTgk4LdEO+Od5DyYb6HXLL5CUIPignRLE=
github.com/bazelbuild/rules_go v0.42.1-0.20231101215950-df20c987afcb/go.mod h1:TFLfii8e49kTgn329knh1lsJFKdxyp/hKlWObY66xwY=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blacktear23/go-proxyprotocol v1.0.6 h1:eTt6UMpEnq59NjON49b3Cay8Dm0sCs1nDliwgkyEsRM=
github.com/blacktear23/go-proxyprotocol v1.0.6/go.mod h1:FSCbgnRZrQXazBLL5snfBbrcFSMtcmUDhSRb9OfFA1o=
github.com/bmatcuk/doublestar/v2 v2.0.4 h1:6I6oUiT/sU27eE2OFcWqBhL1SwjyvQuOssxT4a1yidI=
github.com/bmatcuk/doublestar/v2 v2.0.4/go.mod h1:QMmcs3H2AUQICWhfzLXz+IYln8lRQmTZRptLie8RgRw=
github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI=
github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE=
github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g=
github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets=
github.com/carlmjohnson/flagext v0.21.0 h1:/c4uK3ie786Z7caXLcIMvePNSSiH3bQVGDvmGLMme60=
github.com/carlmjohnson/flagext v0.21.0/go.mod h1:Eenv0epIUAr4NuedNmkzI8WmBmjIxZC239XcKxYS2ac=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4=
github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ=
github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc=
github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww=
github.com/cheggaaa/pb/v3 v3.0.8 h1:bC8oemdChbke2FHIIGy9mn4DPJ2caZYQnfbRqwmdCoA=
github.com/cheggaaa/pb/v3 v3.0.8/go.mod h1:UICbiLec/XO6Hw6k+BHEtHeQFzzBH4i2/qk/ow1EJTA=
github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54=
github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudfoundry/gosigar v1.3.6 h1:gIc08FbB3QPb+nAQhINIK/qhf5REKkY0FTGgRGXkcVc=
github.com/cloudfoundry/gosigar v1.3.6/go.mod h1:lNWstu5g5gw59O09Y+wsMNFzBSnU8a0u+Sfx4dq360E=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4=
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM=
github.com/cockroachdb/errors v1.8.1 h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y=
github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/cockroachdb/pebble v0.0.0-20220415182917-06c9d3be25b3 h1:snjwkhKc/ZtYIC/hg6UoT5PrhXcZmCsaB+z0bonMDcU=
github.com/cockroachdb/pebble v0.0.0-20220415182917-06c9d3be25b3/go.mod h1:buxOO9GBtOcq1DiXDpIPYrmxY020K2A8lOrwno5FetU=
github.com/cockroachdb/redact v1.0.8 h1:8QG/764wK+vmEYoOlfobpe12EQcS81ukx/a4hdVMxNw=
github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM=
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/colinmarc/hdfs/v2 v2.1.1/go.mod h1:M3x+k8UKKmxtFu++uAZ0OtDU8jR3jnaZIAc6yK4Ue0c=
github.com/coocood/bbloom v0.0.0-20190830030839-58deb6228d64 h1:W1SHiII3e0jVwvaQFglwu3kS9NLxOeTpvik7MbKCyuQ=
github.com/coocood/bbloom v0.0.0-20190830030839-58deb6228d64/go.mod h1:F86k/6c7aDUdwSUevnLpHS/3Q9hzYCE99jGk2xsHnt0=
github.com/coocood/freecache v1.2.1 h1:/v1CqMq45NFH9mp/Pt142reundeBM0dVUD3osQBeu/U=
github.com/coocood/freecache v1.2.1/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk=
github.com/coocood/rtutil v0.0.0-20190304133409-c84515f646f2 h1:NnLfQ77q0G4k2Of2c1ceQ0ec6MkLQyDp+IGdVM0D8XM=
github.com/coocood/rtutil v0.0.0-20190304133409-c84515f646f2/go.mod h1:7qG7YFnOALvsx6tKTNmQot8d7cGFXM9TidzvRFLWYwM=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso=
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
github.com/daixiang0/gci v0.11.2 h1:Oji+oPsp3bQ6bNNgX30NBAVT18P4uBH4sRZnlOlTj7Y=
github.com/daixiang0/gci v0.11.2/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37 h1:X6mKGhCFOxrKeeHAjv/3UvT6e5RRxW6wRdlqlV6/H4w=
github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37/go.mod h1:DC3JtzuG7kxMvJ6dZmf2ymjNyoXwgtklr7FN+Um2B0U=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=
github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ=
github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4=
github.com/dolthub/swiss v0.2.1 h1:gs2osYs5SJkAaH5/ggVJqXQxRXtWshF6uE0lgR/Y3Gw=
github.com/dolthub/swiss v0.2.1/go.mod h1:8AhKZZ1HK7g18j7v7k6c5cYIGEZJcPn0ARsai8cUrh0=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
github.com/fatanugraha/noloopclosure v0.1.1 h1:AhepjAikNpk50qTZoipHZqeZtnyKT/C2Tk5dGn7nC+A=
github.com/fatanugraha/noloopclosure v0.1.1/go.mod h1:Mi9CiG5QvEgvPLtZLsTzjYwjIDnWAbo10r0BG7JpJII=
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/form3tech-oss/jwt-go v3.2.6-0.20210809144907-32ab6a8243d7+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fsouza/fake-gcs-server v1.44.0 h1:Lw/mrvs45AfCUPVpry6qFkZnZPqe9thpLQHW+ZwHRLs=
github.com/fsouza/fake-gcs-server v1.44.0/go.mod h1:M02aKoTv9Tnlf+gmWnTok1PWVCUHDntVbHxpd0krTfo=
github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E=
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A=
github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g=
github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM=
github.com/golangci/golangci-lint v1.55.2 h1:yllEIsSJ7MtlDBwDJ9IMBkyEUz2fYE0b5B8IUgO1oP8=
github.com/golangci/golangci-lint v1.55.2/go.mod h1:H60CZ0fuqoTwlTvnbyjhpZPWp7KmsjwV2yupIMiMXbM=
github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb h1:Bi7BYmZVg4C+mKGi8LeohcP2GGUl2XJD4xCkJoZSaYc=
github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb/go.mod h1:ON/c2UR0VAAv6ZEAFKhjCLplESSmRFfZcDLASbI1GWo=
github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g=
github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI=
github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us=
github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI=
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github/v33 v33.0.0/go.mod h1:GMdDnVZY/2TsWgp/lkYnpSAh6TrzhANBBwm6k6TTEXg=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/licensecheck v0.3.1 h1:QoxgoDkaeC4nFrtGN1jV7IPmDCHFNIVh54e5hSt6sPs=
github.com/google/licensecheck v0.3.1/go.mod h1:ORkR35t/JjW+emNKtfJDII0zlciG9JgbT7SmsohlHmY=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 h1:WzfWbQz/Ze8v6l++GGbGNFZnUShVpP/0xffCPLL+ax8=
github.com/google/pprof v0.0.0-20240117000934-35fc243c5815/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg=
github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
github.com/google/skylark v0.0.0-20181101142754-a5f7082aabed h1:rZdD1GeRTHD1aG+VIvhQEYXurx6Wfg4QIT5YVl2tSC8=
github.com/google/skylark v0.0.0-20181101142754-a5f7082aabed/go.mod h1:CKSX6SxHW1vp20ZNaeGe3TFFBIwCG6vaYrpAiOzX+NA=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 h1:mrEEilTAUmaAORhssPPkxj84TsHrPMLBGW2Z4SoTxm8=
github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0=
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk=
github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc=
github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q=
github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM=
github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70=
github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak=
github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M=
github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY=
github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/hashicorp/go-uuid v0.0.0-20180228145832-27454136f036/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs=
github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/influxdata/tdigest v0.0.1 h1:XpFptwYmnEKUqmkcDjrzffswZ3nvNeevbUSLPP/ZzIY=
github.com/influxdata/tdigest v0.0.1/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y=
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI=
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA=
github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc=
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jedib0t/go-pretty/v6 v6.2.2 h1:o3McN0rQ4X+IU+HduppSp9TwRdGLRW2rhJXy9CJaCRw=
github.com/jedib0t/go-pretty/v6 v6.2.2/go.mod h1:+nE9fyyHGil+PuISTCrp7avEdo6bqoMwqZnuiK2r2a0=
github.com/jellydator/ttlcache/v3 v3.0.1 h1:cHgCSMS7TdQcoprXnWUptJZzyFsqs18Lt8VVhRuZYVU=
github.com/jellydator/ttlcache/v3 v3.0.1/go.mod h1:WwTaEmcXQ3MTjOm4bsZoDFiCu/hMvNWLO1w67RXz6h4=
github.com/jfcg/opt v0.3.1 h1:6zgKvv3fR5OlX2nxUYJC4wtosY30N4vypILgXmRNr34=
github.com/jfcg/opt v0.3.1/go.mod h1:3ZUYQhiqKM6vVjMRYV1fVZ9a91EQ47b5kg7KsnfRClk=
github.com/jfcg/rng v1.0.4 h1:wCAgNN4UaNAL7pMHNkXjHzPuNkNmvVa0vzk5ntYl9gY=
github.com/jfcg/rng v1.0.4/go.mod h1:Il7SBjGd15fCUKgoKrz1ULfeBemBqS3HbUqRIcNGLvE=
github.com/jfcg/sixb v1.3.8 h1:BKPp/mIFCkKnnqhbgasI4wO/BYas6NHNcUCowUfTzSI=
github.com/jfcg/sixb v1.3.8/go.mod h1:UWrAr1q9s7pSPPqZNccmQM4N75p8GvuBYdFuq+09Qns=
github.com/jfcg/sorty/v2 v2.1.0 h1:EjrVSL3cDRxBt/ehiYCIv10F7YHYbTzEmdv7WbkkN1k=
github.com/jfcg/sorty/v2 v2.1.0/go.mod h1:JpcSKlmtGOOAGyTdWN2ErjvxeMSJVYBsylAKepIxmNg=
github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=
github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/johannesboyne/gofakes3 v0.0.0-20230506070712-04da935ef877 h1:O7syWuYGzre3s73s+NkgB8e0ZvsIVhT/zxNU7V1gHK8=
github.com/johannesboyne/gofakes3 v0.0.0-20230506070712-04da935ef877/go.mod h1:AxgWC4DDX54O2WDoQO1Ceabtn6IbktjU/7bigor+66g=
github.com/joho/sqltocsv v0.0.0-20210428211105-a6d6801d59df h1:Zrb0IbuLOGHL7nrO2WrcuNWgDTlzFv3zY69QMx4ggQE=
github.com/joho/sqltocsv v0.0.0-20210428211105-a6d6801d59df/go.mod h1:mAVCUAYtW9NG31eB30umMSLKcDt6mCUWSjoSn5qBh0k=
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk=
github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U=
github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw=
github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8=
github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g=
github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/ks3sdklib/aws-sdk-go v1.2.9 h1:Eg0fM56r4Gjp9PiK1Bg9agJUxCAWCk236qq9DItfLcw=
github.com/ks3sdklib/aws-sdk-go v1.2.9/go.mod h1:xBNbOrxSnd36AQpZ8o99mGGu+blblUd9rI0MKGmeufo=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ=
github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA=
github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k=
github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU=
github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE=
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
github.com/lestrrat-go/httprc v1.0.4 h1:bAZymwoZQb+Oq8MEbyipag7iSq6YIga8Wj6GOiJGdI8=
github.com/lestrrat-go/httprc v1.0.4/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo=
github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI=
github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4=
github.com/lestrrat-go/jwx/v2 v2.0.17 h1:+WavkdKVWO90ECnIzUetOnjY+kcqqw4WXEUmil7sMCE=
github.com/lestrrat-go/jwx/v2 v2.0.17/go.mod h1:G8randPHLGAqhcNCqtt6/V/7E6fvJRl3Sf9z777eTQ0=
github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU=
github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a h1:N9zuLhTvBSRt0gWSiJswwQ2HqDmtX/ZCDJURnKUt1Ik=
github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
github.com/mgechev/revive v1.3.4 h1:k/tO3XTaWY4DEHal9tWBkkUMJYO/dLDVyMmAQxmIMDc=
github.com/mgechev/revive v1.3.4/go.mod h1:W+pZCMu9qj8Uhfs1iJMQsEFLRozUfvwFwqVvRbSNLVw=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM=
github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
github.com/ncw/directio v1.0.4/go.mod h1:CKGdcN7StAaqjT7Qack3lAXeX4pjnyc46YeqZH1yWVY=
github.com/ncw/directio v1.0.5 h1:JSUBhdjEvVaJvOoyPAbcW0fnd0tvRXD76wEfZ1KcQz4=
github.com/ncw/directio v1.0.5/go.mod h1:rX/pKEYkOXBGOggmcyJeJGloCkleSvphPx2eV3t6ROk=
github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c=
github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI=
github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k=
github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=
github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k=
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
github.com/otiai10/mint v1.3.1 h1:BCmzIS3n71sGfHB5NMNDB3lHYPz8fWSkCAErHed//qc=
github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67 h1:jik8PHtAIsPlCRJjJzl4udgEf7hawInF9texMeO2jrU=
github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc=
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pingcap/badger v1.5.1-0.20230103063557-828f39b09b6d h1:AEcvKyVM8CUII3bYzgz8haFXtGiqcrtXW1csu/5UELY=
github.com/pingcap/badger v1.5.1-0.20230103063557-828f39b09b6d/go.mod h1:p8QnkZnmyV8L/M/jzYb8rT7kv3bz9m7bn1Ju94wDifs=
github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pingcap/errors v0.11.5-0.20231212100244-799fae176cfb h1:yqyP+k0mgRPpXJQDOCrtaG2YZym0ZDD+vt5JzlBUkrw=
github.com/pingcap/errors v0.11.5-0.20231212100244-799fae176cfb/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg=
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c h1:CgbKAHto5CQgWM9fSBIvaxsJHuGP0uM74HXtv3MyyGQ=
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew=
github.com/pingcap/fn v1.0.0 h1:CyA6AxcOZkQh52wIqYlAmaVmF6EvrcqFywP463pjA8g=
github.com/pingcap/fn v1.0.0/go.mod h1:u9WZ1ZiOD1RpNhcI42RucFh/lBuzTu6rw88a+oF2Z24=
github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E=
github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw=
github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w=
github.com/pingcap/kvproto v0.0.0-20240109063850-932639606bcf h1:n3FMveYjc2VuETjo6YhmsgkDx0P/yLJTvk96BJdCq6Y=
github.com/pingcap/kvproto v0.0.0-20240109063850-932639606bcf/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8=
github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM=
github.com/pingcap/log v1.1.0/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4=
github.com/pingcap/log v1.1.1-0.20230317032135-a0d097d16e22 h1:2SOzvGvE8beiC1Y4g9Onkvu6UmuBBOeWRGQEjJaT/JY=
github.com/pingcap/log v1.1.1-0.20230317032135-a0d097d16e22/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4=
github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 h1:QV6jqlfOkh8hqvEAgwBZa+4bSgO0EeKC7s5c6Luam2I=
github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21/go.mod h1:QYnjfA95ZaMefyl1NO8oPtKeb8pYUdnDVhQgf+qdpjM=
github.com/pingcap/tipb v0.0.0-20230919054518-dfd7d194838f h1:NCiI4Wyu4GkViLGTu6cYcxt79LZ1SenBBQX1OwEV6Jg=
github.com/pingcap/tipb v0.0.0-20230919054518-dfd7d194838f/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig=
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/prometheus v0.48.1 h1:CTszphSNTXkuCG6O0IfpKdHcJkvvnAAE1GbELKS+NFk=
github.com/prometheus/prometheus v0.48.1/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0=
github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 h1:WnNuhiq+FOY3jNj6JXFT+eLN3CQ/oPIsDPRanvwsmbI=
github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0=
github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8ufT6fPQLdJzA=
github.com/shirou/gopsutil/v3 v3.23.10 h1:/N42opWlYzegYaVkWejXWJpbzKv2JDy3mrgGzKsh9hM=
github.com/shirou/gopsutil/v3 v3.23.10/go.mod h1:JIE26kpucQi+innVlAUnIEOSBhBUkirr5b44yr55+WE=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs=
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M=
github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0 h1:mj/nMDAwTBiaCqMEs4cYCqF7pO6Np7vhy1D1wcQGz+E=
github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0 h1:IJ3DuWHPTJrsqtIqjfdmPTELdTFGefvrOa2eTeRBleQ=
github.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:V952P4GGl1v/MMynLwxVdWEbSZJx+n0oOO3ljnez+WU=
github.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 h1:8ZnTA26bBOoPkAbbitKPgNlpw0Bwt7ZlpYgZWHWJR/w=
github.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:tNZjgbYncKL5HxvDULAr/mWDmFz4B7H8yrXEDlnoIiw=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spkg/bom v1.0.0 h1:S939THe0ukL5WcTGiGqkgtaW5JW+O6ITaIlpJXTYY64=
github.com/spkg/bom v1.0.0/go.mod h1:lAz2VbTuYNcvs7iaFF8WW0ufXrHShJ7ck1fYFFbVXJs=
github.com/stathat/consistent v1.0.0 h1:ZFJ1QTRn8npNBKW065raSZ8xfOqhpb8vLOkfp4CcL/U=
github.com/stathat/consistent v1.0.0/go.mod h1:uajTPbgSygZBJ+V+0mY7meZ8i0XAcZs7AQ6V121XSxw=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM=
github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg=
github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA=
github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag=
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ=
github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU=
github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a h1:J/YdBZ46WKpXsxsW93SG+q0F8KI+yFrcIDT4c/RNoc4=
github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a/go.mod h1:h4xBhSNtOeEosLJ4P7JyKXX7Cabg7AVkWCK5gV2vOrM=
github.com/tikv/client-go/v2 v2.0.8-0.20240123055405-3480b5ed7ce1 h1:y0of+EkoUDL1N1k6JItuA0ZMMrDU71vKhQ2p+pA0Fw4=
github.com/tikv/client-go/v2 v2.0.8-0.20240123055405-3480b5ed7ce1/go.mod h1:byff6zglNXgereADRRJmKQnurwy1Z9hthX2I5ObKMNE=
github.com/tikv/pd/client v0.0.0-20240109100024-dd8df25316e9 h1:LnNWRdtxryzxl31GmxOJEFKUmwiG8nph9f5Wqdv8olY=
github.com/tikv/pd/client v0.0.0-20240109100024-dd8df25316e9/go.mod h1:ZilHJZR8wgqENRi26gtnPoKIXAB1EqytFweUhzxetx0=
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M=
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ=
github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg=
github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
github.com/uber/jaeger-client-go v2.22.1+incompatible h1:NHcubEkVbahf9t3p75TOCR83gdUHXjRJvjoBh1yACsM=
github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/vbauerster/mpb/v7 v7.5.3 h1:BkGfmb6nMrrBQDFECR/Q7RkKCw7ylMetCb4079CGs4w=
github.com/vbauerster/mpb/v7 v7.5.3/go.mod h1:i+h4QY6lmLvBNK2ah1fSreiw3ajskRlBp9AhY/PnuOE=
github.com/wangjohn/quickselect v0.0.0-20161129230411-ed8402a42d5f h1:9DDCDwOyEy/gId+IEMrFHLuQ5R/WV0KNxWLler8X2OY=
github.com/wangjohn/quickselect v0.0.0-20161129230411-ed8402a42d5f/go.mod h1:8sdOQnirw1PrcnTJYkmW1iOHtUmblMmGdUOHyWYycLI=
github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xitongsys/parquet-go v1.5.1/go.mod h1:xUxwM8ELydxh4edHGegYq1pA8NnMKDx0K/GyB0o2bww=
github.com/xitongsys/parquet-go v1.5.5-0.20201110004701-b09c49d6d457 h1:tBbuFCtyJNKT+BFAv6qjvTFpVdy97IYNaBwGUXifIUs=
github.com/xitongsys/parquet-go v1.5.5-0.20201110004701-b09c49d6d457/go.mod h1:pheqtXeHQFzxJk45lRQ0UIGIivKnLXvialZSFWs81A8=
github.com/xitongsys/parquet-go-source v0.0.0-20190524061010-2b72cbee77d5/go.mod h1:xxCx7Wpym/3QCo6JhujJX51dzSXrwmb0oH6FQb39SEA=
github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0 h1:a742S4V5A15F93smuVxA60LQWsrCnN8bKeWDBARU1/k=
github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0/go.mod h1:HYhIKsdns7xz80OgkbgJYrtQY7FjHWHKH6cvN7+czGE=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k=
go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI=
go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0=
go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U=
go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao=
go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc=
go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs=
go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc=
go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=
go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo=
go.etcd.io/etcd/tests/v3 v3.5.10 h1:F1pbXwKxwZ58aBT2+CSL/r8WUCAVhob0y1y8OVJ204s=
go.etcd.io/etcd/tests/v3 v3.5.10/go.mod h1:vVMWDv9OhopxfJCd+CMI4pih0zUDqlkJj6JcBNlUVXI=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I=
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.starlark.net v0.0.0-20210223155950-e043a3d3c984/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 h1:jWGQJV4niP+CCmFW9ekjA9Zx8vYORzOUH2/Nl5WPuLQ=
golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220517181318-183a9ca12b87/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210909193231-528a39cd75f3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM=
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.156.0 h1:yloYcGbBtVYjLKQe4enCunxvwn3s2w/XPrrhVf6MsvQ=
google.golang.org/api v0.156.0/go.mod h1:bUSmn4KFO0Q+69zo9CNIDp4Psi6BqM0np0CbzKRSiSY=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20240108191215-35c7eff3a6b1 h1:/IWabOtPziuXTEtI1KYCpM6Ss7vaAkeMxk+uXV/xvZs=
google.golang.org/genproto v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k=
google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 h1:OPXtXn7fNMaXwO3JvOmF1QyTc00jsSFFz1vXXBOdCDo=
google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA=
google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
google.golang.org/grpc/examples v0.0.0-20231221225426-4f03f3ff32c9 h1:ATnmU8nL2NfIyTSiBvJVDIDIr3qBmeW+c7z7XU21eWs=
google.golang.org/grpc/examples v0.0.0-20231221225426-4f03f3ff32c9/go.mod h1:j5uROIAAgi3YmtiETMt1LW0d/lHqQ7wwrIY4uGRXLQ4=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20220512140231-539c8e751b99/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8=
honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0=
k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw=
k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg=
k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ=
k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU=
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc=
k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk=
sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
stathat.com/c/consistent v1.0.0 h1:ezyc51EGcRPJUxfHGSgJjWzJdj3NiMU9pNfLNGiXV0c=
stathat.com/c/consistent v1.0.0/go.mod h1:QkzMWzcbB+yQBL2AttO6sgsQS/JSTapcDISJalmCDS0=
| go.sum | 1 | https://github.com/pingcap/tidb/commit/eca5f209faabe9a64fccfb3e1588b7a60ef4d88a | [
0.04918360337615013,
0.049183595925569534,
0.049183525145053864,
0.04918360337615013,
2.7966983395799616e-8
] |
{
"id": 9,
"code_window": [
"golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\n",
"golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\n",
"golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=\n",
"golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=\n",
"golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=\n",
"golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=\n",
"golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=\n",
"golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=\n",
"golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=\n",
"golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n",
"golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n",
"golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "go.sum",
"type": "replace",
"edit_start_line_idx": 1221
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package distsql
import (
"testing"
"github.com/pingcap/tidb/pkg/domain/resourcegroup"
"github.com/pingcap/tidb/pkg/kv"
"github.com/pingcap/tidb/pkg/sessionctx/stmtctx"
"github.com/pingcap/tidb/pkg/sessionctx/variable"
"github.com/pingcap/tidb/pkg/tablecodec"
"github.com/pingcap/tidb/pkg/types"
"github.com/pingcap/tidb/pkg/util/codec"
"github.com/pingcap/tidb/pkg/util/collate"
"github.com/pingcap/tidb/pkg/util/memory"
"github.com/pingcap/tidb/pkg/util/paging"
"github.com/pingcap/tidb/pkg/util/ranger"
"github.com/pingcap/tipb/go-tipb"
"github.com/stretchr/testify/require"
)
type handleRange struct {
start int64
end int64
}
func TestTableHandlesToKVRanges(t *testing.T) {
handles := []kv.Handle{
kv.IntHandle(0),
kv.IntHandle(2),
kv.IntHandle(3),
kv.IntHandle(4),
kv.IntHandle(5),
kv.IntHandle(10),
kv.IntHandle(11),
kv.IntHandle(100),
kv.IntHandle(9223372036854775806),
kv.IntHandle(9223372036854775807),
} // Build expected key ranges.
hrs := make([]*handleRange, 0, len(handles))
hrs = append(hrs, &handleRange{start: 0, end: 0})
hrs = append(hrs, &handleRange{start: 2, end: 5})
hrs = append(hrs, &handleRange{start: 10, end: 11})
hrs = append(hrs, &handleRange{start: 100, end: 100})
hrs = append(hrs, &handleRange{start: 9223372036854775806, end: 9223372036854775807})
// Build key ranges.
expect := getExpectedRanges(1, hrs)
actual, hints := TableHandlesToKVRanges(1, handles)
// Compare key ranges and expected key ranges.
require.Equal(t, len(expect), len(actual))
require.Equal(t, hints, []int{1, 4, 2, 1, 2})
for i := range actual {
require.Equal(t, expect[i].StartKey, actual[i].StartKey)
require.Equal(t, expect[i].EndKey, actual[i].EndKey)
}
}
func TestTableRangesToKVRanges(t *testing.T) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
}
actual := TableRangesToKVRanges(13, ranges)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
}
for i := 0; i < len(expect); i++ {
require.Equal(t, expect[i], actual[i])
}
}
func TestIndexRangesToKVRanges(t *testing.T) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
}
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
}
actual, err := IndexRangesToKVRanges(stmtctx.NewStmtCtx(), 12, 15, ranges)
require.NoError(t, err)
for i := range actual.FirstPartitionRange() {
require.Equal(t, expect[i], actual.FirstPartitionRange()[i])
}
}
func TestRequestBuilder1(t *testing.T) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
}
actual, err := (&RequestBuilder{}).SetHandleRanges(nil, 12, false, ranges).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars(nil)).
Build()
require.NoError(t, err)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: kv.NewNonParitionedKeyRanges([]kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
}),
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: variable.DefDistSQLScanConcurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
ReplicaRead: kv.ReplicaReadLeader,
ReadReplicaScope: kv.GlobalReplicaScope,
ResourceGroupName: resourcegroup.DefaultResourceGroupName,
}
expect.Paging.MinPagingSize = paging.MinPagingSize
expect.Paging.MaxPagingSize = paging.MaxPagingSize
actual.ResourceGroupTagger = nil
require.Equal(t, expect, actual)
}
func TestRequestBuilder2(t *testing.T) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(2)},
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(2)},
HighVal: []types.Datum{types.NewIntDatum(4)},
LowExclude: true,
HighExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(4)},
HighVal: []types.Datum{types.NewIntDatum(19)},
HighExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(19)},
HighVal: []types.Datum{types.NewIntDatum(32)},
LowExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
{
LowVal: []types.Datum{types.NewIntDatum(34)},
HighVal: []types.Datum{types.NewIntDatum(34)},
LowExclude: true,
Collators: collate.GetBinaryCollatorSlice(1),
},
}
actual, err := (&RequestBuilder{}).SetIndexRanges(stmtctx.NewStmtCtx(), 12, 15, ranges).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars(nil)).
Build()
require.NoError(t, err)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: kv.NewNonParitionedKeyRanges([]kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x13},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x23},
},
}),
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: variable.DefDistSQLScanConcurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
ReplicaRead: kv.ReplicaReadLeader,
ReadReplicaScope: kv.GlobalReplicaScope,
ResourceGroupName: resourcegroup.DefaultResourceGroupName,
}
expect.Paging.MinPagingSize = paging.MinPagingSize
expect.Paging.MaxPagingSize = paging.MaxPagingSize
actual.ResourceGroupTagger = nil
require.Equal(t, expect, actual)
}
func TestRequestBuilder3(t *testing.T) {
handles := []kv.Handle{kv.IntHandle(0), kv.IntHandle(2), kv.IntHandle(3), kv.IntHandle(4),
kv.IntHandle(5), kv.IntHandle(10), kv.IntHandle(11), kv.IntHandle(100)}
actual, err := (&RequestBuilder{}).SetTableHandles(15, handles).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars(nil)).
Build()
require.NoError(t, err)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: kv.NewNonParitionedKeyRangesWithHint([]kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
}, []int{1, 4, 2, 1}),
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: variable.DefDistSQLScanConcurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
ReplicaRead: kv.ReplicaReadLeader,
ReadReplicaScope: kv.GlobalReplicaScope,
ResourceGroupName: resourcegroup.DefaultResourceGroupName,
}
expect.Paging.MinPagingSize = paging.MinPagingSize
expect.Paging.MaxPagingSize = paging.MaxPagingSize
actual.ResourceGroupTagger = nil
require.Equal(t, expect, actual)
}
func TestRequestBuilder4(t *testing.T) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
}
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetDAGRequest(&tipb.DAGRequest{}).
SetDesc(false).
SetKeepOrder(false).
SetFromSessionVars(variable.NewSessionVars(nil)).
Build()
require.NoError(t, err)
expect := &kv.Request{
Tp: 103,
StartTs: 0x0,
Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0},
KeyRanges: kv.NewNonParitionedKeyRanges(keyRanges),
Cacheable: true,
KeepOrder: false,
Desc: false,
Concurrency: variable.DefDistSQLScanConcurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
ReplicaRead: kv.ReplicaReadLeader,
ReadReplicaScope: kv.GlobalReplicaScope,
ResourceGroupName: resourcegroup.DefaultResourceGroupName,
}
expect.Paging.MinPagingSize = paging.MinPagingSize
expect.Paging.MaxPagingSize = paging.MaxPagingSize
actual.ResourceGroupTagger = nil
require.Equal(t, expect, actual)
}
func TestRequestBuilder5(t *testing.T) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc},
},
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x65},
},
}
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetAnalyzeRequest(&tipb.AnalyzeReq{}, kv.RC).
SetKeepOrder(true).
SetConcurrency(15).
Build()
require.NoError(t, err)
expect := &kv.Request{
Tp: 104,
StartTs: 0x0,
Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0},
KeyRanges: kv.NewNonParitionedKeyRanges(keyRanges),
KeepOrder: true,
Desc: false,
Concurrency: 15,
IsolationLevel: kv.RC,
Priority: 1,
NotFillCache: true,
ReadReplicaScope: kv.GlobalReplicaScope,
}
require.Equal(t, expect, actual)
}
func TestRequestBuilder6(t *testing.T) {
keyRanges := []kv.KeyRange{
{
StartKey: kv.Key{0x00, 0x01},
EndKey: kv.Key{0x02, 0x03},
},
}
concurrency := 10
actual, err := (&RequestBuilder{}).SetKeyRanges(keyRanges).
SetChecksumRequest(&tipb.ChecksumRequest{}).
SetConcurrency(concurrency).
Build()
require.NoError(t, err)
expect := &kv.Request{
Tp: 105,
StartTs: 0x0,
Data: []uint8{0x10, 0x0, 0x18, 0x0},
KeyRanges: kv.NewNonParitionedKeyRanges(keyRanges),
KeepOrder: false,
Desc: false,
Concurrency: concurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: true,
ReadReplicaScope: kv.GlobalReplicaScope,
}
require.Equal(t, expect, actual)
}
func TestRequestBuilder7(t *testing.T) {
for _, replicaRead := range []struct {
replicaReadType kv.ReplicaReadType
src string
}{
{kv.ReplicaReadLeader, "Leader"},
{kv.ReplicaReadFollower, "Follower"},
{kv.ReplicaReadMixed, "Mixed"},
} {
// copy iterator variable into a new variable, see issue #27779
replicaRead := replicaRead
t.Run(replicaRead.src, func(t *testing.T) {
vars := variable.NewSessionVars(nil)
vars.SetReplicaRead(replicaRead.replicaReadType)
concurrency := 10
actual, err := (&RequestBuilder{}).
SetFromSessionVars(vars).
SetConcurrency(concurrency).
Build()
require.NoError(t, err)
expect := &kv.Request{
Tp: 0,
StartTs: 0x0,
KeepOrder: false,
KeyRanges: kv.NewNonParitionedKeyRanges(nil),
Desc: false,
Concurrency: concurrency,
IsolationLevel: 0,
Priority: 0,
NotFillCache: false,
ReplicaRead: replicaRead.replicaReadType,
ReadReplicaScope: kv.GlobalReplicaScope,
ResourceGroupName: resourcegroup.DefaultResourceGroupName,
}
expect.Paging.MinPagingSize = paging.MinPagingSize
expect.Paging.MaxPagingSize = paging.MaxPagingSize
actual.ResourceGroupTagger = nil
require.Equal(t, expect, actual)
})
}
}
func TestRequestBuilder8(t *testing.T) {
sv := variable.NewSessionVars(nil)
sv.StmtCtx.ResourceGroupName = "test"
actual, err := (&RequestBuilder{}).
SetFromSessionVars(sv).
Build()
require.NoError(t, err)
expect := &kv.Request{
Tp: 0,
StartTs: 0x0,
Data: []uint8(nil),
KeyRanges: kv.NewNonParitionedKeyRanges(nil),
Concurrency: variable.DefDistSQLScanConcurrency,
IsolationLevel: 0,
Priority: 0,
MemTracker: (*memory.Tracker)(nil),
SchemaVar: 0,
ReadReplicaScope: kv.GlobalReplicaScope,
ResourceGroupName: "test",
}
expect.Paging.MinPagingSize = paging.MinPagingSize
expect.Paging.MaxPagingSize = paging.MaxPagingSize
actual.ResourceGroupTagger = nil
require.Equal(t, expect, actual)
}
func TestRequestBuilderTiKVClientReadTimeout(t *testing.T) {
sv := variable.NewSessionVars(nil)
sv.TiKVClientReadTimeout = 100
actual, err := (&RequestBuilder{}).
SetFromSessionVars(sv).
Build()
require.NoError(t, err)
expect := &kv.Request{
Tp: 0,
StartTs: 0x0,
Data: []uint8(nil),
KeyRanges: kv.NewNonParitionedKeyRanges(nil),
Concurrency: variable.DefDistSQLScanConcurrency,
IsolationLevel: 0,
Priority: 0,
MemTracker: (*memory.Tracker)(nil),
SchemaVar: 0,
ReadReplicaScope: kv.GlobalReplicaScope,
TiKVClientReadTimeout: 100,
ResourceGroupName: resourcegroup.DefaultResourceGroupName,
}
expect.Paging.MinPagingSize = paging.MinPagingSize
expect.Paging.MaxPagingSize = paging.MaxPagingSize
actual.ResourceGroupTagger = nil
require.Equal(t, expect, actual)
}
func TestTableRangesToKVRangesWithFbs(t *testing.T) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(4)},
Collators: collate.GetBinaryCollatorSlice(1),
},
}
actual := TableRangesToKVRanges(0, ranges)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5},
},
}
for i := 0; i < len(actual); i++ {
require.Equal(t, expect[i], actual[i])
}
}
func TestIndexRangesToKVRangesWithFbs(t *testing.T) {
ranges := []*ranger.Range{
{
LowVal: []types.Datum{types.NewIntDatum(1)},
HighVal: []types.Datum{types.NewIntDatum(4)},
Collators: collate.GetBinaryCollatorSlice(1),
},
}
actual, err := IndexRangesToKVRanges(stmtctx.NewStmtCtx(), 0, 0, ranges)
require.NoError(t, err)
expect := []kv.KeyRange{
{
StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
EndKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5},
},
}
for i := 0; i < len(actual.FirstPartitionRange()); i++ {
require.Equal(t, expect[i], actual.FirstPartitionRange()[i])
}
}
func TestScanLimitConcurrency(t *testing.T) {
vars := variable.NewSessionVars(nil)
for _, tt := range []struct {
tp tipb.ExecType
limit uint64
concurrency int
src string
}{
{tipb.ExecType_TypeTableScan, 1, 1, "TblScan_Def"},
{tipb.ExecType_TypeIndexScan, 1, 1, "IdxScan_Def"},
{tipb.ExecType_TypeTableScan, 1000000, vars.Concurrency.DistSQLScanConcurrency(), "TblScan_SessionVars"},
{tipb.ExecType_TypeIndexScan, 1000000, vars.Concurrency.DistSQLScanConcurrency(), "IdxScan_SessionVars"},
} {
// copy iterator variable into a new variable, see issue #27779
tt := tt
t.Run(tt.src, func(t *testing.T) {
firstExec := &tipb.Executor{Tp: tt.tp}
switch tt.tp {
case tipb.ExecType_TypeTableScan:
firstExec.TblScan = &tipb.TableScan{}
case tipb.ExecType_TypeIndexScan:
firstExec.IdxScan = &tipb.IndexScan{}
}
limitExec := &tipb.Executor{Tp: tipb.ExecType_TypeLimit, Limit: &tipb.Limit{Limit: tt.limit}}
dag := &tipb.DAGRequest{Executors: []*tipb.Executor{firstExec, limitExec}}
actual, err := (&RequestBuilder{}).
SetDAGRequest(dag).
SetFromSessionVars(vars).
Build()
require.NoError(t, err)
require.Equal(t, tt.concurrency, actual.Concurrency)
require.Equal(t, actual.LimitSize, tt.limit)
})
}
}
func getExpectedRanges(tid int64, hrs []*handleRange) []kv.KeyRange {
krs := make([]kv.KeyRange, 0, len(hrs))
for _, hr := range hrs {
low := codec.EncodeInt(nil, hr.start)
high := codec.EncodeInt(nil, hr.end)
high = kv.Key(high).PrefixNext()
startKey := tablecodec.EncodeRowKey(tid, low)
endKey := tablecodec.EncodeRowKey(tid, high)
krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
}
return krs
}
| pkg/distsql/request_builder_test.go | 0 | https://github.com/pingcap/tidb/commit/eca5f209faabe9a64fccfb3e1588b7a60ef4d88a | [
0.04918360337615013,
0.049183592200279236,
0.04918348789215088,
0.04918360337615013,
3.809060444837087e-8
] |
{
"id": 9,
"code_window": [
"golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\n",
"golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\n",
"golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=\n",
"golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=\n",
"golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=\n",
"golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=\n",
"golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=\n",
"golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=\n",
"golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=\n",
"golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n",
"golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n",
"golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "go.sum",
"type": "replace",
"edit_start_line_idx": 1221
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package querywatch
import (
"testing"
"github.com/pingcap/tidb/pkg/config"
"github.com/pingcap/tidb/pkg/meta/autoid"
"github.com/pingcap/tidb/pkg/testkit/testsetup"
"github.com/tikv/client-go/v2/tikv"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
testsetup.SetupForCommonTest()
autoid.SetStep(5000)
config.UpdateGlobal(func(conf *config.Config) {
conf.Instance.SlowThreshold = 30000 // 30s
conf.TiKVClient.AsyncCommit.SafeWindow = 0
conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0
conf.Experimental.AllowsExpressionIndex = true
})
tikv.EnableFailpoints()
opts := []goleak.Option{
goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"),
goleak.IgnoreTopFunction("github.com/bazelbuild/rules_go/go/tools/bzltestutil.RegisterTimeoutHandler.func1"),
goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"),
goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"),
goleak.IgnoreTopFunction("gopkg.in/natefinch/lumberjack%2ev2.(*Logger).millRun"),
goleak.IgnoreTopFunction("github.com/tikv/client-go/v2/txnkv/transaction.keepAlive"),
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
goleak.IgnoreTopFunction("github.com/pingcap/tidb/pkg/ttl/ttlworker.(*ttlScanWorker).loop"),
goleak.IgnoreTopFunction("github.com/pingcap/tidb/pkg/ttl/client.(*mockClient).WatchCommand.func1"),
goleak.IgnoreTopFunction("github.com/pingcap/tidb/pkg/ttl/ttlworker.(*JobManager).jobLoop"),
}
goleak.VerifyTestMain(m, opts...)
}
| pkg/executor/internal/querywatch/main_test.go | 0 | https://github.com/pingcap/tidb/commit/eca5f209faabe9a64fccfb3e1588b7a60ef4d88a | [
0.049183476716279984,
0.049183476716279984,
0.049183476716279984,
0.049183476716279984,
0
] |
{
"id": 9,
"code_window": [
"golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\n",
"golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\n",
"golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=\n",
"golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=\n",
"golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=\n",
"golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=\n",
"golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=\n",
"golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=\n",
"golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=\n",
"golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n",
"golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n",
"golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "go.sum",
"type": "replace",
"edit_start_line_idx": 1221
} | // Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testutil
import (
"context"
"fmt"
"sync"
"testing"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/pkg/disttask/framework/proto"
"github.com/pingcap/tidb/pkg/testkit"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/util"
"go.uber.org/mock/gomock"
)
// TestContext defines shared variables for disttask tests.
type TestContext struct {
sync.RWMutex
// taskID/step -> subtask map.
subtasksHasRun map[string]map[int64]struct{}
// for plan err handling tests.
CallTime int
}
// InitTestContext inits test context for disttask tests.
func InitTestContext(t *testing.T, nodeNum int) (context.Context, *gomock.Controller, *TestContext, *testkit.DistExecutionContext) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
ctx := context.Background()
ctx = util.WithInternalSourceType(ctx, "dispatcher")
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/util/cpu/mockNumCpu", "return(8)"))
t.Cleanup(func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/util/cpu/mockNumCpu"))
})
executionContext := testkit.NewDistExecutionContext(t, nodeNum)
testCtx := &TestContext{
subtasksHasRun: make(map[string]map[int64]struct{}),
}
return ctx, ctrl, testCtx, executionContext
}
// CollectSubtask collects subtask info
func (c *TestContext) CollectSubtask(subtask *proto.Subtask) {
key := getTaskStepKey(subtask.TaskID, subtask.Step)
c.Lock()
defer c.Unlock()
m, ok := c.subtasksHasRun[key]
if !ok {
m = make(map[int64]struct{})
c.subtasksHasRun[key] = m
}
m[subtask.ID] = struct{}{}
}
// CollectedSubtaskCnt returns the collected subtask count.
func (c *TestContext) CollectedSubtaskCnt(taskID int64, step proto.Step) int {
key := getTaskStepKey(taskID, step)
c.RLock()
defer c.RUnlock()
return len(c.subtasksHasRun[key])
}
// getTaskStepKey returns the key of a task step.
func getTaskStepKey(id int64, step proto.Step) string {
return fmt.Sprintf("%d/%d", id, step)
}
| pkg/disttask/framework/testutil/context.go | 0 | https://github.com/pingcap/tidb/commit/eca5f209faabe9a64fccfb3e1588b7a60ef4d88a | [
0.04918348789215088,
0.04918348789215088,
0.04918348789215088,
0.04918348789215088,
0
] |
{
"id": 1,
"code_window": [
"\t}\n",
"\n",
"\t// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html\n",
"\t// Ignore delete object errors while replying to client, since we are\n",
"\t// suppposed to reply only 204. Additionally log the error for\n",
"\t// investigation.\n",
"\tdeleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r)\n",
"\twriteSuccessNoContent(w)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tif err := deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r); err != nil {\n",
"\t\tswitch toAPIErrorCode(err) {\n",
"\t\tcase ErrNoSuchBucket:\n",
"\t\t\t// When bucket doesn't exist specially handle it.\n",
"\t\t\twriteErrorResponse(w, ErrNoSuchBucket, r.URL)\n",
"\t\t\treturn\n",
"\t\t}\n",
"\t\tlogger.LogIf(ctx, err)\n",
"\t\t// Ignore delete object errors while replying to client, since we are suppposed to reply only 204.\n",
"\t}\n"
],
"file_path": "cmd/object-handlers.go",
"type": "replace",
"edit_start_line_idx": 1012
} | /*
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"encoding/hex"
"io"
"net/http"
"path"
"strconv"
"strings"
"sync"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/mimedb"
)
// list all errors which can be ignored in object operations.
var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied)
// putObjectDir hints the bottom layer to create a new directory.
func (xl xlObjects) putObjectDir(ctx context.Context, bucket, object string, writeQuorum int) error {
var wg = &sync.WaitGroup{}
errs := make([]error, len(xl.getDisks()))
// Prepare object creation in all disks
for index, disk := range xl.getDisks() {
if disk == nil {
continue
}
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
if err := disk.MakeVol(pathJoin(bucket, object)); err != nil && err != errVolumeExists {
errs[index] = err
}
}(index, disk)
}
wg.Wait()
return reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
}
// prepareFile hints the bottom layer to optimize the creation of a new object
func (xl xlObjects) prepareFile(ctx context.Context, bucket, object string, size int64, onlineDisks []StorageAPI, blockSize int64, dataBlocks, writeQuorum int) error {
pErrs := make([]error, len(onlineDisks))
// Calculate the real size of the part in one disk.
actualSize := getErasureShardFileSize(blockSize, size, dataBlocks)
// Prepare object creation in a all disks
for index, disk := range onlineDisks {
if disk != nil {
if err := disk.PrepareFile(bucket, object, actualSize); err != nil {
// Save error to reduce it later
pErrs[index] = err
// Ignore later access to disk which generated the error
onlineDisks[index] = nil
}
}
}
return reduceWriteQuorumErrs(ctx, pErrs, objectOpIgnoredErrs, writeQuorum)
}
/// Object Operations
// CopyObject - copy object source object to destination object.
// if source object and destination object are same we only
// update metadata.
func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) {
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
// Read metadata associated with the object from all disks.
storageDisks := xl.getDisks()
metaArr, errs := readAllXLMetadata(ctx, storageDisks, srcBucket, srcObject)
// get Quorum for this object
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, xl, metaArr, errs)
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
return oi, toObjectErr(reducedErr, srcBucket, srcObject)
}
// List all online disks.
_, modTime := listOnlineDisks(storageDisks, metaArr, errs)
// Pick latest valid metadata.
xlMeta, err := pickValidXLMeta(ctx, metaArr, modTime, readQuorum)
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Length of the file to read.
length := xlMeta.Stat.Size
// Check if this request is only metadata update.
if cpSrcDstSame {
// Update `xl.json` content on each disks.
for index := range metaArr {
metaArr[index].Meta = srcInfo.UserDefined
metaArr[index].Meta["etag"] = srcInfo.ETag
}
var onlineDisks []StorageAPI
tempObj := mustGetUUID()
// Write unique `xl.json` for each disk.
if onlineDisks, err = writeUniqueXLMetadata(ctx, storageDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Rename atomically `xl.json` from tmp location to destination for each disk.
if _, err = renameXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
return xlMeta.ToObjectInfo(srcBucket, srcObject), nil
}
// Initialize pipe.
pipeReader, pipeWriter := io.Pipe()
go func() {
var startOffset int64 // Read the whole file.
if gerr := xl.getObject(ctx, srcBucket, srcObject, startOffset, length, pipeWriter, srcInfo.ETag, srcOpts); gerr != nil {
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
return
}
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
}()
hashReader, err := hash.NewReader(pipeReader, length, "", "", length)
if err != nil {
logger.LogIf(ctx, err)
return oi, toObjectErr(err, dstBucket, dstObject)
}
objInfo, err := xl.putObject(ctx, dstBucket, dstObject, hashReader, srcInfo.UserDefined, dstOpts)
if err != nil {
return oi, toObjectErr(err, dstBucket, dstObject)
}
// Explicitly close the reader.
pipeReader.Close()
return objInfo, nil
}
// GetObjectNInfo - returns object info and an object
// Read(Closer). When err != nil, the returned reader is always nil.
func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
var nsUnlocker = func() {}
// Acquire lock
if lockType != noLock {
lock := xl.nsMutex.NewNSLock(bucket, object)
switch lockType {
case writeLock:
if err = lock.GetLock(globalObjectTimeout); err != nil {
return nil, err
}
nsUnlocker = lock.Unlock
case readLock:
if err = lock.GetRLock(globalObjectTimeout); err != nil {
return nil, err
}
nsUnlocker = lock.RUnlock
}
}
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
nsUnlocker()
return nil, err
}
// Handler directory request by returning a reader that
// returns no bytes.
if hasSuffix(object, slashSeparator) {
if !xl.isObjectDir(bucket, object) {
nsUnlocker()
return nil, toObjectErr(errFileNotFound, bucket, object)
}
var objInfo ObjectInfo
if objInfo, err = xl.getObjectInfoDir(ctx, bucket, object); err != nil {
nsUnlocker()
return nil, toObjectErr(err, bucket, object)
}
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, nsUnlocker), nil
}
var objInfo ObjectInfo
objInfo, err = xl.getObjectInfo(ctx, bucket, object)
if err != nil {
nsUnlocker()
return nil, toObjectErr(err, bucket, object)
}
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, nsUnlocker)
if nErr != nil {
return nil, nErr
}
pr, pw := io.Pipe()
go func() {
err := xl.getObject(ctx, bucket, object, off, length, pw, "", opts)
pw.CloseWithError(err)
}()
// Cleanup function to cause the go routine above to exit, in
// case of incomplete read.
pipeCloser := func() { pr.Close() }
return fn(pr, h, pipeCloser)
}
// GetObject - reads an object erasured coded across multiple
// disks. Supports additional parameters like offset and length
// which are synonymous with HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (xl xlObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
// Lock the object before reading.
objectLock := xl.nsMutex.NewNSLock(bucket, object)
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
return err
}
defer objectLock.RUnlock()
return xl.getObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
// getObject wrapper for xl GetObject
func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
if err := checkGetObjArgs(ctx, bucket, object); err != nil {
return err
}
// Start offset cannot be negative.
if startOffset < 0 {
logger.LogIf(ctx, errUnexpected)
return errUnexpected
}
// Writer cannot be nil.
if writer == nil {
logger.LogIf(ctx, errUnexpected)
return errUnexpected
}
// If its a directory request, we return an empty body.
if hasSuffix(object, slashSeparator) {
_, err := writer.Write([]byte(""))
logger.LogIf(ctx, err)
return toObjectErr(err, bucket, object)
}
// Read metadata associated with the object from all disks.
metaArr, errs := readAllXLMetadata(ctx, xl.getDisks(), bucket, object)
// get Quorum for this object
readQuorum, _, err := objectQuorumFromMeta(ctx, xl, metaArr, errs)
if err != nil {
return toObjectErr(err, bucket, object)
}
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
return toObjectErr(reducedErr, bucket, object)
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(xl.getDisks(), metaArr, errs)
// Pick latest valid metadata.
xlMeta, err := pickValidXLMeta(ctx, metaArr, modTime, readQuorum)
if err != nil {
return err
}
// Reorder online disks based on erasure distribution order.
onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
// Reorder parts metadata based on erasure distribution order.
metaArr = shufflePartsMetadata(metaArr, xlMeta.Erasure.Distribution)
// For negative length read everything.
if length < 0 {
length = xlMeta.Stat.Size - startOffset
}
// Reply back invalid range if the input offset and length fall out of range.
if startOffset > xlMeta.Stat.Size || startOffset+length > xlMeta.Stat.Size {
logger.LogIf(ctx, InvalidRange{startOffset, length, xlMeta.Stat.Size})
return InvalidRange{startOffset, length, xlMeta.Stat.Size}
}
// Get start part index and offset.
partIndex, partOffset, err := xlMeta.ObjectToPartOffset(ctx, startOffset)
if err != nil {
return InvalidRange{startOffset, length, xlMeta.Stat.Size}
}
// Calculate endOffset according to length
endOffset := startOffset
if length > 0 {
endOffset += length - 1
}
// Get last part index to read given length.
lastPartIndex, _, err := xlMeta.ObjectToPartOffset(ctx, endOffset)
if err != nil {
return InvalidRange{startOffset, length, xlMeta.Stat.Size}
}
var totalBytesRead int64
erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
if err != nil {
return toObjectErr(err, bucket, object)
}
for ; partIndex <= lastPartIndex; partIndex++ {
if length == totalBytesRead {
break
}
// Save the current part name and size.
partName := xlMeta.Parts[partIndex].Name
partSize := xlMeta.Parts[partIndex].Size
partLength := partSize - partOffset
// partLength should be adjusted so that we don't write more data than what was requested.
if partLength > (length - totalBytesRead) {
partLength = length - totalBytesRead
}
// Get the checksums of the current part.
bitrotReaders := make([]*bitrotReader, len(onlineDisks))
for index, disk := range onlineDisks {
if disk == OfflineDisk {
continue
}
checksumInfo := metaArr[index].Erasure.GetChecksumInfo(partName)
endOffset := getErasureShardFileEndOffset(partOffset, partLength, partSize, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks)
bitrotReaders[index] = newBitrotReader(disk, bucket, pathJoin(object, partName), checksumInfo.Algorithm, endOffset, checksumInfo.Hash)
}
err := erasure.Decode(ctx, writer, bitrotReaders, partOffset, partLength, partSize)
if err != nil {
return toObjectErr(err, bucket, object)
}
for i, r := range bitrotReaders {
if r == nil {
onlineDisks[i] = OfflineDisk
}
}
// Track total bytes read from disk and written to the client.
totalBytesRead += partLength
// partOffset will be valid only for the first part, hence reset it to 0 for
// the remaining parts.
partOffset = 0
} // End of read all parts loop.
// Return success.
return nil
}
// getObjectInfoDir - This getObjectInfo is specific to object directory lookup.
func (xl xlObjects) getObjectInfoDir(ctx context.Context, bucket, object string) (oi ObjectInfo, err error) {
var wg = &sync.WaitGroup{}
errs := make([]error, len(xl.getDisks()))
// Prepare object creation in a all disks
for index, disk := range xl.getDisks() {
if disk == nil {
continue
}
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
if _, err := disk.StatVol(pathJoin(bucket, object)); err != nil {
// Since we are re-purposing StatVol, an object which
// is a directory if it doesn't exist should be
// returned as errFileNotFound instead, convert
// the error right here accordingly.
if err == errVolumeNotFound {
err = errFileNotFound
} else if err == errVolumeAccessDenied {
err = errFileAccessDenied
}
// Save error to reduce it later
errs[index] = err
}
}(index, disk)
}
wg.Wait()
readQuorum := len(xl.getDisks()) / 2
return dirObjectInfo(bucket, object, 0, map[string]string{}), reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum)
}
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) {
// Lock the object before reading.
objectLock := xl.nsMutex.NewNSLock(bucket, object)
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
return oi, err
}
defer objectLock.RUnlock()
if err := checkGetObjArgs(ctx, bucket, object); err != nil {
return oi, err
}
if hasSuffix(object, slashSeparator) {
if !xl.isObjectDir(bucket, object) {
return oi, toObjectErr(errFileNotFound, bucket, object)
}
if oi, e = xl.getObjectInfoDir(ctx, bucket, object); e != nil {
return oi, toObjectErr(e, bucket, object)
}
return oi, nil
}
info, err := xl.getObjectInfo(ctx, bucket, object)
if err != nil {
return oi, toObjectErr(err, bucket, object)
}
return info, nil
}
func (xl xlObjects) isObjectCorrupted(metaArr []xlMetaV1, errs []error) (validMeta xlMetaV1, ok bool) {
// We can consider an object data not reliable
// when xl.json is not found in read quorum disks.
var notFoundXLJSON int
for _, readErr := range errs {
if readErr == errFileNotFound {
notFoundXLJSON++
}
}
for _, m := range metaArr {
if !m.IsValid() {
continue
}
validMeta = m
break
}
// Return if the object is indeed corrupted.
return validMeta, len(xl.getDisks())-notFoundXLJSON < validMeta.Erasure.DataBlocks
}
const xlCorruptedSuffix = ".CORRUPTED"
// Renames the corrupted object and makes it visible.
func renameCorruptedObject(ctx context.Context, bucket, object string, validMeta xlMetaV1, disks []StorageAPI, errs []error) {
writeQuorum := validMeta.Erasure.DataBlocks + 1
// Move all existing objects into corrupted suffix.
rename(ctx, disks, bucket, object, bucket, object+xlCorruptedSuffix, true, writeQuorum, []error{errFileNotFound})
tempObj := mustGetUUID()
// Get all the disks which do not have the file.
var cdisks = make([]StorageAPI, len(disks))
for i, merr := range errs {
if merr == errFileNotFound {
cdisks[i] = disks[i]
}
}
for _, disk := range cdisks {
if disk == nil {
continue
}
// Write empty part file on missing disks.
disk.AppendFile(minioMetaTmpBucket, pathJoin(tempObj, "part.1"), []byte{})
// Write algorithm hash for empty part file.
alg := validMeta.Erasure.Checksums[0].Algorithm.New()
alg.Write([]byte{})
// Update the checksums and part info.
validMeta.Erasure.Checksums[0] = ChecksumInfo{
Name: validMeta.Erasure.Checksums[0].Name,
Algorithm: validMeta.Erasure.Checksums[0].Algorithm,
Hash: alg.Sum(nil),
}
validMeta.Parts[0] = objectPartInfo{
Number: 1,
Name: "part.1",
}
// Write the `xl.json` with the newly calculated metadata.
writeXLMetadata(ctx, disk, minioMetaTmpBucket, tempObj, validMeta)
}
// Finally rename all the parts into their respective locations.
rename(ctx, cdisks, minioMetaTmpBucket, tempObj, bucket, object+xlCorruptedSuffix, true, writeQuorum, []error{errFileNotFound})
}
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
func (xl xlObjects) getObjectInfo(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error) {
disks := xl.getDisks()
// Read metadata associated with the object from all disks.
metaArr, errs := readAllXLMetadata(ctx, disks, bucket, object)
var readQuorum int
// Having read quorum means we have xl.json in at least N/2 disks.
if !strings.HasSuffix(object, xlCorruptedSuffix) {
if validMeta, ok := xl.isObjectCorrupted(metaArr, errs); ok {
renameCorruptedObject(ctx, bucket, object, validMeta, disks, errs)
// Return err file not found since we renamed now the corrupted object
return objInfo, errFileNotFound
}
// Not a corrupted object, attempt to get readquorum properly.
readQuorum, _, err = objectQuorumFromMeta(ctx, xl, metaArr, errs)
if err != nil {
return objInfo, err
}
} else {
// If this is a corrupted object, change read quorum to N/2 disks
// so it will be visible to users, so they can delete it.
readQuorum = len(xl.getDisks()) / 2
}
// List all the file commit ids from parts metadata.
modTimes := listObjectModtimes(metaArr, errs)
// Reduce list of UUIDs to a single common value.
modTime, _ := commonTime(modTimes)
// Pick latest valid metadata.
xlMeta, err := pickValidXLMeta(ctx, metaArr, modTime, readQuorum)
if err != nil {
return objInfo, err
}
return xlMeta.ToObjectInfo(bucket, object), nil
}
func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, errs []error) {
var wg = &sync.WaitGroup{}
// Undo rename object on disks where RenameFile succeeded.
// If srcEntry/dstEntry are objects then add a trailing slash to copy
// over all the parts inside the object directory
if isDir {
srcEntry = retainSlash(srcEntry)
dstEntry = retainSlash(dstEntry)
}
for index, disk := range disks {
if disk == nil {
continue
}
// Undo rename object in parallel.
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
if errs[index] != nil {
return
}
_ = disk.RenameFile(dstBucket, dstEntry, srcBucket, srcEntry)
}(index, disk)
}
wg.Wait()
}
// rename - common function that renamePart and renameObject use to rename
// the respective underlying storage layer representations.
func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, writeQuorum int, ignoredErr []error) ([]StorageAPI, error) {
// Initialize sync waitgroup.
var wg = &sync.WaitGroup{}
// Initialize list of errors.
var errs = make([]error, len(disks))
if isDir {
dstEntry = retainSlash(dstEntry)
srcEntry = retainSlash(srcEntry)
}
// Rename file on all underlying storage disks.
for index, disk := range disks {
if disk == nil {
errs[index] = errDiskNotFound
continue
}
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
if err := disk.RenameFile(srcBucket, srcEntry, dstBucket, dstEntry); err != nil {
if !IsErrIgnored(err, ignoredErr...) {
errs[index] = err
}
}
}(index, disk)
}
// Wait for all renames to finish.
wg.Wait()
// We can safely allow RenameFile errors up to len(xl.getDisks()) - writeQuorum
// otherwise return failure. Cleanup successful renames.
err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if err == errXLWriteQuorum {
// Undo all the partial rename operations.
undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isDir, errs)
}
return evalDisks(disks, errs), err
}
// PutObject - creates an object upon reading from the input stream
// until EOF, erasure codes the data across all disk and additionally
// writes `xl.json` which carries the necessary metadata for future
// object operations.
func (xl xlObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
// Validate put object input args.
if err = checkPutObjectArgs(ctx, bucket, object, xl, data.Size()); err != nil {
return ObjectInfo{}, err
}
// Lock the object.
objectLock := xl.nsMutex.NewNSLock(bucket, object)
if err := objectLock.GetLock(globalObjectTimeout); err != nil {
return objInfo, err
}
defer objectLock.Unlock()
return xl.putObject(ctx, bucket, object, data, metadata, opts)
}
// putObject wrapper for xl PutObject
func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
uniqueID := mustGetUUID()
tempObj := uniqueID
// No metadata is set, allocate a new one.
if metadata == nil {
metadata = make(map[string]string)
}
// Get parity and data drive count based on storage class metadata
dataDrives, parityDrives := getRedundancyCount(metadata[amzStorageClass], len(xl.getDisks()))
// we now know the number of blocks this object needs for data and parity.
// writeQuorum is dataBlocks + 1
writeQuorum := dataDrives + 1
// Delete temporary object in the event of failure.
// If PutObject succeeded there would be no temporary
// object to delete.
defer xl.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum, false)
// This is a special case with size as '0' and object ends with
// a slash separator, we treat it like a valid operation and
// return success.
if isObjectDir(object, data.Size()) {
// Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock).
// -- FIXME (this also causes performance issue when disks are down).
if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
}
if err = xl.putObjectDir(ctx, minioMetaTmpBucket, tempObj, writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Rename the successfully written temporary object to final location. Ignore errFileAccessDenied
// error because it means that the target object dir exists and we want to be close to S3 specification.
if _, err = rename(ctx, xl.getDisks(), minioMetaTmpBucket, tempObj, bucket, object, true, writeQuorum, []error{errFileAccessDenied}); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
return dirObjectInfo(bucket, object, data.Size(), metadata), nil
}
// Validate put object input args.
if err = checkPutObjectArgs(ctx, bucket, object, xl, data.Size()); err != nil {
return ObjectInfo{}, err
}
// Validate input data size and it can never be less than zero.
if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument)
return ObjectInfo{}, toObjectErr(errInvalidArgument)
}
// Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock).
// -- FIXME (this also causes performance issue when disks are down).
if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
}
// Limit the reader to its provided size if specified.
var reader io.Reader = data
// Initialize parts metadata
partsMetadata := make([]xlMetaV1, len(xl.getDisks()))
xlMeta := newXLMetaV1(object, dataDrives, parityDrives)
// Initialize xl meta.
for index := range partsMetadata {
partsMetadata[index] = xlMeta
}
// Order disks according to erasure distribution
onlineDisks := shuffleDisks(xl.getDisks(), partsMetadata[0].Erasure.Distribution)
// Total size of the written object
var sizeWritten int64
erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
var buffer []byte
switch size := data.Size(); {
case size == 0:
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
case size == -1 || size >= blockSizeV1:
buffer = xl.bp.Get()
defer xl.bp.Put(buffer)
case size < blockSizeV1:
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
buffer = make([]byte, size, 2*size)
}
if len(buffer) > int(xlMeta.Erasure.BlockSize) {
buffer = buffer[:xlMeta.Erasure.BlockSize]
}
// Read data and split into parts - similar to multipart mechanism
for partIdx := 1; ; partIdx++ {
// Compute part name
partName := "part." + strconv.Itoa(partIdx)
// Compute the path of current part
tempErasureObj := pathJoin(uniqueID, partName)
// Calculate the size of the current part.
var curPartSize int64
curPartSize, err = calculatePartSizeFromIdx(ctx, data.Size(), globalPutPartSize, partIdx)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Hint the filesystem to pre-allocate one continuous large block.
// This is only an optimization.
var curPartReader io.Reader
if curPartSize >= 0 {
pErr := xl.prepareFile(ctx, minioMetaTmpBucket, tempErasureObj, curPartSize, onlineDisks, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks, writeQuorum)
if pErr != nil {
return ObjectInfo{}, toObjectErr(pErr, bucket, object)
}
}
if curPartSize < data.Size() {
curPartReader = io.LimitReader(reader, curPartSize)
} else {
curPartReader = reader
}
writers := make([]*bitrotWriter, len(onlineDisks))
for i, disk := range onlineDisks {
if disk == nil {
continue
}
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, DefaultBitrotAlgorithm)
}
n, erasureErr := erasure.Encode(ctx, curPartReader, writers, buffer, erasure.dataBlocks+1)
if erasureErr != nil {
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if n < curPartSize && data.Size() > 0 {
logger.LogIf(ctx, IncompleteBody{})
return ObjectInfo{}, IncompleteBody{}
}
if n == 0 && data.Size() == -1 {
// The last part of a compressed object will always be empty
// Since the compressed size is unpredictable.
// Hence removing the last (empty) part from all `xl.disks`.
dErr := xl.deleteObject(ctx, minioMetaTmpBucket, tempErasureObj, writeQuorum, true)
if dErr != nil {
return ObjectInfo{}, toObjectErr(dErr, minioMetaTmpBucket, tempErasureObj)
}
break
}
// Update the total written size
sizeWritten += n
for i, w := range writers {
if w == nil {
onlineDisks[i] = nil
continue
}
partsMetadata[i].AddObjectPart(partIdx, partName, "", n, data.ActualSize())
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{partName, DefaultBitrotAlgorithm, w.Sum()})
}
// We wrote everything, break out.
if sizeWritten == data.Size() {
break
}
}
// Save additional erasureMetadata.
modTime := UTCNow()
metadata["etag"] = hex.EncodeToString(data.MD5Current())
// Guess content-type from the extension if possible.
if metadata["content-type"] == "" {
metadata["content-type"] = mimedb.TypeByExtension(path.Ext(object))
}
if xl.isObject(bucket, object) {
// Deny if WORM is enabled
if globalWORMEnabled {
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
}
// Rename if an object already exists to temporary location.
newUniqueID := mustGetUUID()
// Delete successfully renamed object.
defer xl.deleteObject(ctx, minioMetaTmpBucket, newUniqueID, writeQuorum, false)
// NOTE: Do not use online disks slice here: the reason is that existing object should be purged
// regardless of `xl.json` status and rolled back in case of errors. Also allow renaming the
// existing object if it is not present in quorum disks so users can overwrite stale objects.
_, err = rename(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, newUniqueID, true, writeQuorum, []error{errFileNotFound})
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
}
// Fill all the necessary metadata.
// Update `xl.json` content on each disks.
for index := range partsMetadata {
partsMetadata[index].Meta = metadata
partsMetadata[index].Stat.Size = sizeWritten
partsMetadata[index].Stat.ModTime = modTime
}
// Write unique `xl.json` for each disk.
if onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Rename the successfully written temporary object to final location.
if _, err = rename(ctx, onlineDisks, minioMetaTmpBucket, tempObj, bucket, object, true, writeQuorum, nil); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Object info is the same in all disks, so we can pick the first meta
// of the first disk
xlMeta = partsMetadata[0]
objInfo = ObjectInfo{
IsDir: false,
Bucket: bucket,
Name: object,
Size: xlMeta.Stat.Size,
ModTime: xlMeta.Stat.ModTime,
ETag: xlMeta.Meta["etag"],
ContentType: xlMeta.Meta["content-type"],
ContentEncoding: xlMeta.Meta["content-encoding"],
UserDefined: xlMeta.Meta,
}
// Success, return object info.
return objInfo, nil
}
// deleteObject - wrapper for delete object, deletes an object from
// all the disks in parallel, including `xl.json` associated with the
// object.
func (xl xlObjects) deleteObject(ctx context.Context, bucket, object string, writeQuorum int, isDir bool) error {
var disks []StorageAPI
var err error
tmpObj := mustGetUUID()
if bucket == minioMetaTmpBucket {
tmpObj = object
disks = xl.getDisks()
} else {
// Rename the current object while requiring write quorum, but also consider
// that a non found object in a given disk as a success since it already
// confirms that the object doesn't have a part in that disk (already removed)
if isDir {
disks, err = rename(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum,
[]error{errFileNotFound, errFileAccessDenied})
} else {
disks, err = rename(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum,
[]error{errFileNotFound})
}
if err != nil {
return toObjectErr(err, bucket, object)
}
}
// Initialize sync waitgroup.
var wg = &sync.WaitGroup{}
// Initialize list of errors.
var dErrs = make([]error, len(disks))
for index, disk := range disks {
if disk == nil {
dErrs[index] = errDiskNotFound
continue
}
wg.Add(1)
go func(index int, disk StorageAPI, isDir bool) {
defer wg.Done()
var e error
if isDir {
// DeleteFile() simply tries to remove a directory
// and will succeed only if that directory is empty.
e = disk.DeleteFile(minioMetaTmpBucket, tmpObj)
} else {
e = cleanupDir(ctx, disk, minioMetaTmpBucket, tmpObj)
}
if e != nil && e != errVolumeNotFound {
dErrs[index] = e
}
}(index, disk, isDir)
}
// Wait for all routines to finish.
wg.Wait()
// return errors if any during deletion
return reduceWriteQuorumErrs(ctx, dErrs, objectOpIgnoredErrs, writeQuorum)
}
// DeleteObject - deletes an object, this call doesn't necessary reply
// any error as it is not necessary for the handler to reply back a
// response to the client request.
func (xl xlObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) {
// Acquire a write lock before deleting the object.
objectLock := xl.nsMutex.NewNSLock(bucket, object)
if perr := objectLock.GetLock(globalOperationTimeout); perr != nil {
return perr
}
defer objectLock.Unlock()
if err = checkDelObjArgs(ctx, bucket, object); err != nil {
return err
}
if !xl.isObject(bucket, object) && !xl.isObjectDir(bucket, object) {
return ObjectNotFound{bucket, object}
}
var writeQuorum int
var isObjectDir = hasSuffix(object, slashSeparator)
if isObjectDir {
writeQuorum = len(xl.getDisks())/2 + 1
} else {
// Read metadata associated with the object from all disks.
partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), bucket, object)
// get Quorum for this object
_, writeQuorum, err = objectQuorumFromMeta(ctx, xl, partsMetadata, errs)
if err != nil {
return toObjectErr(err, bucket, object)
}
}
// Delete the object on all disks.
if err = xl.deleteObject(ctx, bucket, object, writeQuorum, isObjectDir); err != nil {
return toObjectErr(err, bucket, object)
}
// Success.
return nil
}
// ListObjectsV2 lists all blobs in bucket filtered by prefix
func (xl xlObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
marker := continuationToken
if marker == "" {
marker = startAfter
}
loi, err := xl.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return result, err
}
listObjectsV2Info := ListObjectsV2Info{
IsTruncated: loi.IsTruncated,
ContinuationToken: continuationToken,
NextContinuationToken: loi.NextMarker,
Objects: loi.Objects,
Prefixes: loi.Prefixes,
}
return listObjectsV2Info, err
}
| cmd/xl-v1-object.go | 1 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.02204224467277527,
0.0010086955735459924,
0.00016311966464854777,
0.00028386490885168314,
0.0025950458366423845
] |
{
"id": 1,
"code_window": [
"\t}\n",
"\n",
"\t// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html\n",
"\t// Ignore delete object errors while replying to client, since we are\n",
"\t// suppposed to reply only 204. Additionally log the error for\n",
"\t// investigation.\n",
"\tdeleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r)\n",
"\twriteSuccessNoContent(w)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tif err := deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r); err != nil {\n",
"\t\tswitch toAPIErrorCode(err) {\n",
"\t\tcase ErrNoSuchBucket:\n",
"\t\t\t// When bucket doesn't exist specially handle it.\n",
"\t\t\twriteErrorResponse(w, ErrNoSuchBucket, r.URL)\n",
"\t\t\treturn\n",
"\t\t}\n",
"\t\tlogger.LogIf(ctx, err)\n",
"\t\t// Ignore delete object errors while replying to client, since we are suppposed to reply only 204.\n",
"\t}\n"
],
"file_path": "cmd/object-handlers.go",
"type": "replace",
"edit_start_line_idx": 1012
} | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// This program generates fixedhuff.go
// Invoke as
//
// go run gen.go -output fixedhuff.go
package main
import (
"bytes"
"flag"
"fmt"
"go/format"
"io/ioutil"
"log"
)
var filename = flag.String("output", "fixedhuff.go", "output file name")
const maxCodeLen = 16
// Note: the definition of the huffmanDecoder struct is copied from
// inflate.go, as it is private to the implementation.
// chunk & 15 is number of bits
// chunk >> 4 is value, including table link
const (
huffmanChunkBits = 9
huffmanNumChunks = 1 << huffmanChunkBits
huffmanCountMask = 15
huffmanValueShift = 4
)
type huffmanDecoder struct {
min int // the minimum code length
chunks [huffmanNumChunks]uint32 // chunks as described above
links [][]uint32 // overflow links
linkMask uint32 // mask the width of the link table
}
// Initialize Huffman decoding tables from array of code lengths.
// Following this function, h is guaranteed to be initialized into a complete
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
// degenerate case where the tree has only a single symbol with length 1. Empty
// trees are permitted.
func (h *huffmanDecoder) init(bits []int) bool {
// Sanity enables additional runtime tests during Huffman
// table construction. It's intended to be used during
// development to supplement the currently ad-hoc unit tests.
const sanity = false
if h.min != 0 {
*h = huffmanDecoder{}
}
// Count number of codes of each length,
// compute min and max length.
var count [maxCodeLen]int
var min, max int
for _, n := range bits {
if n == 0 {
continue
}
if min == 0 || n < min {
min = n
}
if n > max {
max = n
}
count[n]++
}
// Empty tree. The decompressor.huffSym function will fail later if the tree
// is used. Technically, an empty tree is only valid for the HDIST tree and
// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
// is guaranteed to fail since it will attempt to use the tree to decode the
// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
// guaranteed to fail later since the compressed data section must be
// composed of at least one symbol (the end-of-block marker).
if max == 0 {
return true
}
code := 0
var nextcode [maxCodeLen]int
for i := min; i <= max; i++ {
code <<= 1
nextcode[i] = code
code += count[i]
}
// Check that the coding is complete (i.e., that we've
// assigned all 2-to-the-max possible bit sequences).
// Exception: To be compatible with zlib, we also need to
// accept degenerate single-code codings. See also
// TestDegenerateHuffmanCoding.
if code != 1<<uint(max) && !(code == 1 && max == 1) {
return false
}
h.min = min
if max > huffmanChunkBits {
numLinks := 1 << (uint(max) - huffmanChunkBits)
h.linkMask = uint32(numLinks - 1)
// create link tables
link := nextcode[huffmanChunkBits+1] >> 1
h.links = make([][]uint32, huffmanNumChunks-link)
for j := uint(link); j < huffmanNumChunks; j++ {
reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
reverse >>= uint(16 - huffmanChunkBits)
off := j - uint(link)
if sanity && h.chunks[reverse] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
h.links[off] = make([]uint32, numLinks)
}
}
for i, n := range bits {
if n == 0 {
continue
}
code := nextcode[n]
nextcode[n]++
chunk := uint32(i<<huffmanValueShift | n)
reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8
reverse >>= uint(16 - n)
if n <= huffmanChunkBits {
for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
// We should never need to overwrite
// an existing chunk. Also, 0 is
// never a valid chunk, because the
// lower 4 "count" bits should be
// between 1 and 15.
if sanity && h.chunks[off] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[off] = chunk
}
} else {
j := reverse & (huffmanNumChunks - 1)
if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
// Longer codes should have been
// associated with a link table above.
panic("impossible: not an indirect chunk")
}
value := h.chunks[j] >> huffmanValueShift
linktab := h.links[value]
reverse >>= huffmanChunkBits
for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
if sanity && linktab[off] != 0 {
panic("impossible: overwriting existing chunk")
}
linktab[off] = chunk
}
}
}
if sanity {
// Above we've sanity checked that we never overwrote
// an existing entry. Here we additionally check that
// we filled the tables completely.
for i, chunk := range h.chunks {
if chunk == 0 {
// As an exception, in the degenerate
// single-code case, we allow odd
// chunks to be missing.
if code == 1 && i%2 == 1 {
continue
}
panic("impossible: missing chunk")
}
}
for _, linktab := range h.links {
for _, chunk := range linktab {
if chunk == 0 {
panic("impossible: missing chunk")
}
}
}
}
return true
}
func main() {
flag.Parse()
var h huffmanDecoder
var bits [288]int
initReverseByte()
for i := 0; i < 144; i++ {
bits[i] = 8
}
for i := 144; i < 256; i++ {
bits[i] = 9
}
for i := 256; i < 280; i++ {
bits[i] = 7
}
for i := 280; i < 288; i++ {
bits[i] = 8
}
h.init(bits[:])
if h.links != nil {
log.Fatal("Unexpected links table in fixed Huffman decoder")
}
var buf bytes.Buffer
fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.`+"\n\n")
fmt.Fprintln(&buf, "package flate")
fmt.Fprintln(&buf)
fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT")
fmt.Fprintln(&buf)
fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{")
fmt.Fprintf(&buf, "\t%d,\n", h.min)
fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{")
for i := 0; i < huffmanNumChunks; i++ {
if i&7 == 0 {
fmt.Fprintf(&buf, "\t\t")
} else {
fmt.Fprintf(&buf, " ")
}
fmt.Fprintf(&buf, "0x%04x,", h.chunks[i])
if i&7 == 7 {
fmt.Fprintln(&buf)
}
}
fmt.Fprintln(&buf, "\t},")
fmt.Fprintln(&buf, "\tnil, 0,")
fmt.Fprintln(&buf, "}")
data, err := format.Source(buf.Bytes())
if err != nil {
log.Fatal(err)
}
err = ioutil.WriteFile(*filename, data, 0644)
if err != nil {
log.Fatal(err)
}
}
var reverseByte [256]byte
func initReverseByte() {
for x := 0; x < 256; x++ {
var result byte
for i := uint(0); i < 8; i++ {
result |= byte(((x >> i) & 1) << (7 - i))
}
reverseByte[x] = result
}
}
| vendor/github.com/klauspost/compress/flate/gen.go | 0 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.00017914731870405376,
0.00017302761261817068,
0.0001664242590777576,
0.0001729672512738034,
0.0000032388579711550847
] |
{
"id": 1,
"code_window": [
"\t}\n",
"\n",
"\t// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html\n",
"\t// Ignore delete object errors while replying to client, since we are\n",
"\t// suppposed to reply only 204. Additionally log the error for\n",
"\t// investigation.\n",
"\tdeleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r)\n",
"\twriteSuccessNoContent(w)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tif err := deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r); err != nil {\n",
"\t\tswitch toAPIErrorCode(err) {\n",
"\t\tcase ErrNoSuchBucket:\n",
"\t\t\t// When bucket doesn't exist specially handle it.\n",
"\t\t\twriteErrorResponse(w, ErrNoSuchBucket, r.URL)\n",
"\t\t\treturn\n",
"\t\t}\n",
"\t\tlogger.LogIf(ctx, err)\n",
"\t\t// Ignore delete object errors while replying to client, since we are suppposed to reply only 204.\n",
"\t}\n"
],
"file_path": "cmd/object-handlers.go",
"type": "replace",
"edit_start_line_idx": 1012
} | CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).
| vendor/github.com/coreos/pkg/NOTICE | 0 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.00017468369333073497,
0.00017468369333073497,
0.00017468369333073497,
0.00017468369333073497,
0
] |
{
"id": 1,
"code_window": [
"\t}\n",
"\n",
"\t// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html\n",
"\t// Ignore delete object errors while replying to client, since we are\n",
"\t// suppposed to reply only 204. Additionally log the error for\n",
"\t// investigation.\n",
"\tdeleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r)\n",
"\twriteSuccessNoContent(w)\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tif err := deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r); err != nil {\n",
"\t\tswitch toAPIErrorCode(err) {\n",
"\t\tcase ErrNoSuchBucket:\n",
"\t\t\t// When bucket doesn't exist specially handle it.\n",
"\t\t\twriteErrorResponse(w, ErrNoSuchBucket, r.URL)\n",
"\t\t\treturn\n",
"\t\t}\n",
"\t\tlogger.LogIf(ctx, err)\n",
"\t\t// Ignore delete object errors while replying to client, since we are suppposed to reply only 204.\n",
"\t}\n"
],
"file_path": "cmd/object-handlers.go",
"type": "replace",
"edit_start_line_idx": 1012
} | // Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"fmt"
"strings"
)
// SimpleQueryStringQuery is a query that uses the SimpleQueryParser
// to parse its context. Unlike the regular query_string query,
// the simple_query_string query will never throw an exception,
// and discards invalid parts of the query.
//
// For more details, see
// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-simple-query-string-query.html
type SimpleQueryStringQuery struct {
queryText string
analyzer string
operator string
fields []string
fieldBoosts map[string]*float64
minimumShouldMatch string
flags string
boost *float64
lowercaseExpandedTerms *bool
lenient *bool
analyzeWildcard *bool
locale string
queryName string
}
// NewSimpleQueryStringQuery creates and initializes a new SimpleQueryStringQuery.
func NewSimpleQueryStringQuery(text string) *SimpleQueryStringQuery {
return &SimpleQueryStringQuery{
queryText: text,
fields: make([]string, 0),
fieldBoosts: make(map[string]*float64),
}
}
// Field adds a field to run the query against.
func (q *SimpleQueryStringQuery) Field(field string) *SimpleQueryStringQuery {
q.fields = append(q.fields, field)
return q
}
// Field adds a field to run the query against with a specific boost.
func (q *SimpleQueryStringQuery) FieldWithBoost(field string, boost float64) *SimpleQueryStringQuery {
q.fields = append(q.fields, field)
q.fieldBoosts[field] = &boost
return q
}
// Boost sets the boost for this query.
func (q *SimpleQueryStringQuery) Boost(boost float64) *SimpleQueryStringQuery {
q.boost = &boost
return q
}
// QueryName sets the query name for the filter that can be used when
// searching for matched_filters per hit.
func (q *SimpleQueryStringQuery) QueryName(queryName string) *SimpleQueryStringQuery {
q.queryName = queryName
return q
}
// Analyzer specifies the analyzer to use for the query.
func (q *SimpleQueryStringQuery) Analyzer(analyzer string) *SimpleQueryStringQuery {
q.analyzer = analyzer
return q
}
// DefaultOperator specifies the default operator for the query.
func (q *SimpleQueryStringQuery) DefaultOperator(defaultOperator string) *SimpleQueryStringQuery {
q.operator = defaultOperator
return q
}
// Flags sets the flags for the query.
func (q *SimpleQueryStringQuery) Flags(flags string) *SimpleQueryStringQuery {
q.flags = flags
return q
}
// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy
// and range queries are automatically lower-cased or not. Default is true.
func (q *SimpleQueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *SimpleQueryStringQuery {
q.lowercaseExpandedTerms = &lowercaseExpandedTerms
return q
}
func (q *SimpleQueryStringQuery) Locale(locale string) *SimpleQueryStringQuery {
q.locale = locale
return q
}
// Lenient indicates whether the query string parser should be lenient
// when parsing field values. It defaults to the index setting and if not
// set, defaults to false.
func (q *SimpleQueryStringQuery) Lenient(lenient bool) *SimpleQueryStringQuery {
q.lenient = &lenient
return q
}
// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries.
func (q *SimpleQueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *SimpleQueryStringQuery {
q.analyzeWildcard = &analyzeWildcard
return q
}
func (q *SimpleQueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *SimpleQueryStringQuery {
q.minimumShouldMatch = minimumShouldMatch
return q
}
// Source returns JSON for the query.
func (q *SimpleQueryStringQuery) Source() (interface{}, error) {
// {
// "simple_query_string" : {
// "query" : "\"fried eggs\" +(eggplant | potato) -frittata",
// "analyzer" : "snowball",
// "fields" : ["body^5","_all"],
// "default_operator" : "and"
// }
// }
source := make(map[string]interface{})
query := make(map[string]interface{})
source["simple_query_string"] = query
query["query"] = q.queryText
if len(q.fields) > 0 {
var fields []string
for _, field := range q.fields {
if boost, found := q.fieldBoosts[field]; found {
if boost != nil {
fields = append(fields, fmt.Sprintf("%s^%f", field, *boost))
} else {
fields = append(fields, field)
}
} else {
fields = append(fields, field)
}
}
query["fields"] = fields
}
if q.flags != "" {
query["flags"] = q.flags
}
if q.analyzer != "" {
query["analyzer"] = q.analyzer
}
if q.operator != "" {
query["default_operator"] = strings.ToLower(q.operator)
}
if q.lowercaseExpandedTerms != nil {
query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms
}
if q.lenient != nil {
query["lenient"] = *q.lenient
}
if q.analyzeWildcard != nil {
query["analyze_wildcard"] = *q.analyzeWildcard
}
if q.locale != "" {
query["locale"] = q.locale
}
if q.queryName != "" {
query["_name"] = q.queryName
}
if q.minimumShouldMatch != "" {
query["minimum_should_match"] = q.minimumShouldMatch
}
if q.boost != nil {
query["boost"] = *q.boost
}
return source, nil
}
| vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string.go | 0 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.0001980527158593759,
0.00017165718600153923,
0.00016217595839407295,
0.00017010628653224558,
0.000007751675184408668
] |
{
"id": 2,
"code_window": [
"\tif err = checkDelObjArgs(ctx, bucket, object); err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\tif !xl.isObject(bucket, object) && !xl.isObjectDir(bucket, object) {\n",
"\t\treturn ObjectNotFound{bucket, object}\n",
"\t}\n",
"\n",
"\tvar writeQuorum int\n",
"\tvar isObjectDir = hasSuffix(object, slashSeparator)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/xl-v1-object.go",
"type": "replace",
"edit_start_line_idx": 986
} | /*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/hex"
"errors"
"hash/crc32"
"path"
"sync"
"time"
"github.com/minio/minio/cmd/logger"
"github.com/tidwall/gjson"
)
// Returns number of errors that occurred the most (incl. nil) and the
// corresponding error value. NB When there is more than one error value that
// occurs maximum number of times, the error value returned depends on how
// golang's map orders keys. This doesn't affect correctness as long as quorum
// value is greater than or equal to simple majority, since none of the equally
// maximal values would occur quorum or more number of times.
func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error) {
errorCounts := make(map[error]int)
for _, err := range errs {
if IsErrIgnored(err, ignoredErrs...) {
continue
}
errorCounts[err]++
}
max := 0
for err, count := range errorCounts {
switch {
case max < count:
max = count
maxErr = err
// Prefer `nil` over other error values with the same
// number of occurrences.
case max == count && err == nil:
maxErr = err
}
}
return max, maxErr
}
// reduceQuorumErrs behaves like reduceErrs by only for returning
// values of maximally occurring errors validated against a generic
// quorum number that can be read or write quorum depending on usage.
func reduceQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, quorum int, quorumErr error) error {
maxCount, maxErr := reduceErrs(errs, ignoredErrs)
if maxCount >= quorum {
return maxErr
}
return quorumErr
}
// reduceReadQuorumErrs behaves like reduceErrs but only for returning
// values of maximally occurring errors validated against readQuorum.
func reduceReadQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, readQuorum int) (maxErr error) {
return reduceQuorumErrs(ctx, errs, ignoredErrs, readQuorum, errXLReadQuorum)
}
// reduceWriteQuorumErrs behaves like reduceErrs but only for returning
// values of maximally occurring errors validated against writeQuorum.
func reduceWriteQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, writeQuorum int) (maxErr error) {
return reduceQuorumErrs(ctx, errs, ignoredErrs, writeQuorum, errXLWriteQuorum)
}
// Similar to 'len(slice)' but returns the actual elements count
// skipping the unallocated elements.
func diskCount(disks []StorageAPI) int {
diskCount := 0
for _, disk := range disks {
if disk == nil {
continue
}
diskCount++
}
return diskCount
}
// hashOrder - hashes input key to return consistent
// hashed integer slice. Returned integer order is salted
// with an input key. This results in consistent order.
// NOTE: collisions are fine, we are not looking for uniqueness
// in the slices returned.
func hashOrder(key string, cardinality int) []int {
if cardinality <= 0 {
// Returns an empty int slice for cardinality < 0.
return nil
}
nums := make([]int, cardinality)
keyCrc := crc32.Checksum([]byte(key), crc32.IEEETable)
start := int(keyCrc % uint32(cardinality))
for i := 1; i <= cardinality; i++ {
nums[i-1] = 1 + ((start + i) % cardinality)
}
return nums
}
func parseXLStat(xlMetaBuf []byte) (si statInfo, e error) {
// obtain stat info.
stat := statInfo{}
// fetching modTime.
modTime, err := time.Parse(time.RFC3339, gjson.GetBytes(xlMetaBuf, "stat.modTime").String())
if err != nil {
return si, err
}
stat.ModTime = modTime
// obtain Stat.Size .
stat.Size = gjson.GetBytes(xlMetaBuf, "stat.size").Int()
return stat, nil
}
func parseXLVersion(xlMetaBuf []byte) string {
return gjson.GetBytes(xlMetaBuf, "version").String()
}
func parseXLFormat(xlMetaBuf []byte) string {
return gjson.GetBytes(xlMetaBuf, "format").String()
}
func parseXLRelease(xlMetaBuf []byte) string {
return gjson.GetBytes(xlMetaBuf, "minio.release").String()
}
func parseXLErasureInfo(ctx context.Context, xlMetaBuf []byte) (ErasureInfo, error) {
erasure := ErasureInfo{}
erasureResult := gjson.GetBytes(xlMetaBuf, "erasure")
// parse the xlV1Meta.Erasure.Distribution.
disResult := erasureResult.Get("distribution").Array()
distribution := make([]int, len(disResult))
for i, dis := range disResult {
distribution[i] = int(dis.Int())
}
erasure.Distribution = distribution
erasure.Algorithm = erasureResult.Get("algorithm").String()
erasure.DataBlocks = int(erasureResult.Get("data").Int())
erasure.ParityBlocks = int(erasureResult.Get("parity").Int())
erasure.BlockSize = erasureResult.Get("blockSize").Int()
erasure.Index = int(erasureResult.Get("index").Int())
checkSumsResult := erasureResult.Get("checksum").Array()
// Check for scenario where checksum information missing for some parts.
partsResult := gjson.GetBytes(xlMetaBuf, "parts").Array()
if len(checkSumsResult) != len(partsResult) {
return erasure, errCorruptedFormat
}
// Parse xlMetaV1.Erasure.Checksum array.
checkSums := make([]ChecksumInfo, len(checkSumsResult))
for i, v := range checkSumsResult {
algorithm := BitrotAlgorithmFromString(v.Get("algorithm").String())
if !algorithm.Available() {
logger.LogIf(ctx, errBitrotHashAlgoInvalid)
return erasure, errBitrotHashAlgoInvalid
}
hash, err := hex.DecodeString(v.Get("hash").String())
if err != nil {
logger.LogIf(ctx, err)
return erasure, err
}
name := v.Get("name").String()
if name == "" {
return erasure, errCorruptedFormat
}
checkSums[i] = ChecksumInfo{Name: name, Algorithm: algorithm, Hash: hash}
}
erasure.Checksums = checkSums
return erasure, nil
}
func parseXLParts(xlMetaBuf []byte) []objectPartInfo {
// Parse the XL Parts.
partsResult := gjson.GetBytes(xlMetaBuf, "parts").Array()
partInfo := make([]objectPartInfo, len(partsResult))
for i, p := range partsResult {
info := objectPartInfo{}
info.Number = int(p.Get("number").Int())
info.Name = p.Get("name").String()
info.ETag = p.Get("etag").String()
info.Size = p.Get("size").Int()
info.ActualSize = p.Get("actualSize").Int()
partInfo[i] = info
}
return partInfo
}
func parseXLMetaMap(xlMetaBuf []byte) map[string]string {
// Get xlMetaV1.Meta map.
metaMapResult := gjson.GetBytes(xlMetaBuf, "meta").Map()
metaMap := make(map[string]string)
for key, valResult := range metaMapResult {
metaMap[key] = valResult.String()
}
return metaMap
}
// Constructs XLMetaV1 using `gjson` lib to retrieve each field.
func xlMetaV1UnmarshalJSON(ctx context.Context, xlMetaBuf []byte) (xlMeta xlMetaV1, e error) {
// obtain version.
xlMeta.Version = parseXLVersion(xlMetaBuf)
// obtain format.
xlMeta.Format = parseXLFormat(xlMetaBuf)
// Parse xlMetaV1.Stat .
stat, err := parseXLStat(xlMetaBuf)
if err != nil {
logger.LogIf(ctx, err)
return xlMeta, err
}
xlMeta.Stat = stat
// parse the xlV1Meta.Erasure fields.
xlMeta.Erasure, err = parseXLErasureInfo(ctx, xlMetaBuf)
if err != nil {
return xlMeta, err
}
// Parse the XL Parts.
xlMeta.Parts = parseXLParts(xlMetaBuf)
// Get the xlMetaV1.Realse field.
xlMeta.Minio.Release = parseXLRelease(xlMetaBuf)
// parse xlMetaV1.
xlMeta.Meta = parseXLMetaMap(xlMetaBuf)
return xlMeta, nil
}
// read xl.json from the given disk, parse and return xlV1MetaV1.Parts.
func readXLMetaParts(ctx context.Context, disk StorageAPI, bucket string, object string) ([]objectPartInfo, map[string]string, error) {
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
logger.LogIf(ctx, err)
return nil, nil, err
}
// obtain xlMetaV1{}.Partsusing `github.com/tidwall/gjson`.
xlMetaParts := parseXLParts(xlMetaBuf)
xlMetaMap := parseXLMetaMap(xlMetaBuf)
return xlMetaParts, xlMetaMap, nil
}
// read xl.json from the given disk and parse xlV1Meta.Stat and xlV1Meta.Meta using gjson.
func readXLMetaStat(ctx context.Context, disk StorageAPI, bucket string, object string) (si statInfo, mp map[string]string, e error) {
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
logger.LogIf(ctx, err)
return si, nil, err
}
// obtain version.
xlVersion := parseXLVersion(xlMetaBuf)
// obtain format.
xlFormat := parseXLFormat(xlMetaBuf)
// Validate if the xl.json we read is sane, return corrupted format.
if !isXLMetaFormatValid(xlVersion, xlFormat) {
// For version mismatchs and unrecognized format, return corrupted format.
logger.LogIf(ctx, errCorruptedFormat)
return si, nil, errCorruptedFormat
}
// obtain xlMetaV1{}.Meta using `github.com/tidwall/gjson`.
xlMetaMap := parseXLMetaMap(xlMetaBuf)
// obtain xlMetaV1{}.Stat using `github.com/tidwall/gjson`.
xlStat, err := parseXLStat(xlMetaBuf)
if err != nil {
logger.LogIf(ctx, err)
return si, nil, err
}
// Return structured `xl.json`.
return xlStat, xlMetaMap, nil
}
// readXLMeta reads `xl.json` and returns back XL metadata structure.
func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1, err error) {
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
if err != errFileNotFound {
logger.GetReqInfo(ctx).AppendTags("disk", disk.String())
logger.LogIf(ctx, err)
}
return xlMetaV1{}, err
}
// obtain xlMetaV1{} using `github.com/tidwall/gjson`.
xlMeta, err = xlMetaV1UnmarshalJSON(ctx, xlMetaBuf)
if err != nil {
logger.GetReqInfo(ctx).AppendTags("disk", disk.String())
logger.LogIf(ctx, err)
return xlMetaV1{}, err
}
// Return structured `xl.json`.
return xlMeta, nil
}
// Reads all `xl.json` metadata as a xlMetaV1 slice.
// Returns error slice indicating the failed metadata reads.
func readAllXLMetadata(ctx context.Context, disks []StorageAPI, bucket, object string) ([]xlMetaV1, []error) {
errs := make([]error, len(disks))
metadataArray := make([]xlMetaV1, len(disks))
var wg = &sync.WaitGroup{}
// Read `xl.json` parallelly across disks.
for index, disk := range disks {
if disk == nil {
errs[index] = errDiskNotFound
continue
}
wg.Add(1)
// Read `xl.json` in routine.
go func(index int, disk StorageAPI) {
defer wg.Done()
var err error
metadataArray[index], err = readXLMeta(ctx, disk, bucket, object)
if err != nil {
errs[index] = err
return
}
}(index, disk)
}
// Wait for all the routines to finish.
wg.Wait()
// Return all the metadata.
return metadataArray, errs
}
// Return shuffled partsMetadata depending on distribution.
func shufflePartsMetadata(partsMetadata []xlMetaV1, distribution []int) (shuffledPartsMetadata []xlMetaV1) {
if distribution == nil {
return partsMetadata
}
shuffledPartsMetadata = make([]xlMetaV1, len(partsMetadata))
// Shuffle slice xl metadata for expected distribution.
for index := range partsMetadata {
blockIndex := distribution[index]
shuffledPartsMetadata[blockIndex-1] = partsMetadata[index]
}
return shuffledPartsMetadata
}
// shuffleDisks - shuffle input disks slice depending on the
// erasure distribution. Return shuffled slice of disks with
// their expected distribution.
func shuffleDisks(disks []StorageAPI, distribution []int) (shuffledDisks []StorageAPI) {
if distribution == nil {
return disks
}
shuffledDisks = make([]StorageAPI, len(disks))
// Shuffle disks for expected distribution.
for index := range disks {
blockIndex := distribution[index]
shuffledDisks[blockIndex-1] = disks[index]
}
return shuffledDisks
}
// evalDisks - returns a new slice of disks where nil is set if
// the corresponding error in errs slice is not nil
func evalDisks(disks []StorageAPI, errs []error) []StorageAPI {
if len(errs) != len(disks) {
logger.LogIf(context.Background(), errors.New("unexpected disks/errors slice length"))
return nil
}
newDisks := make([]StorageAPI, len(disks))
for index := range errs {
if errs[index] == nil {
newDisks[index] = disks[index]
} else {
newDisks[index] = nil
}
}
return newDisks
}
// Errors specifically generated by calculatePartSizeFromIdx function.
var (
errPartSizeZero = errors.New("Part size cannot be zero")
errPartSizeIndex = errors.New("Part index cannot be smaller than 1")
)
// calculatePartSizeFromIdx calculates the part size according to input index.
// returns error if totalSize is -1, partSize is 0, partIndex is 0.
func calculatePartSizeFromIdx(ctx context.Context, totalSize int64, partSize int64, partIndex int) (currPartSize int64, err error) {
if totalSize < -1 {
logger.LogIf(ctx, errInvalidArgument)
return 0, errInvalidArgument
}
if partSize == 0 {
logger.LogIf(ctx, errPartSizeZero)
return 0, errPartSizeZero
}
if partIndex < 1 {
logger.LogIf(ctx, errPartSizeIndex)
return 0, errPartSizeIndex
}
if totalSize > 0 {
// Compute the total count of parts
partsCount := totalSize/partSize + 1
// Return the part's size
switch {
case int64(partIndex) < partsCount:
currPartSize = partSize
case int64(partIndex) == partsCount:
// Size of last part
currPartSize = totalSize % partSize
default:
currPartSize = 0
}
}
return currPartSize, nil
}
| cmd/xl-v1-utils.go | 1 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.047296080738306046,
0.0019577296916395426,
0.00015562224143650383,
0.00017590048082638532,
0.007240649312734604
] |
{
"id": 2,
"code_window": [
"\tif err = checkDelObjArgs(ctx, bucket, object); err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\tif !xl.isObject(bucket, object) && !xl.isObjectDir(bucket, object) {\n",
"\t\treturn ObjectNotFound{bucket, object}\n",
"\t}\n",
"\n",
"\tvar writeQuorum int\n",
"\tvar isObjectDir = hasSuffix(object, slashSeparator)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/xl-v1-object.go",
"type": "replace",
"edit_start_line_idx": 986
} | // Copyright 2013 Joshua Tacoma. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uritemplates is a level 4 implementation of RFC 6570 (URI
// Template, http://tools.ietf.org/html/rfc6570).
//
// To use uritemplates, parse a template string and expand it with a value
// map:
//
// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}")
// values := make(map[string]interface{})
// values["user"] = "jtacoma"
// values["repo"] = "uritemplates"
// expanded, _ := template.ExpandString(values)
// fmt.Printf(expanded)
//
package uritemplates
import (
"bytes"
"errors"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
hex = []byte("0123456789ABCDEF")
)
func pctEncode(src []byte) []byte {
dst := make([]byte, len(src)*3)
for i, b := range src {
buf := dst[i*3 : i*3+3]
buf[0] = 0x25
buf[1] = hex[b/16]
buf[2] = hex[b%16]
}
return dst
}
func escape(s string, allowReserved bool) (escaped string) {
if allowReserved {
escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
} else {
escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
}
return escaped
}
// A UriTemplate is a parsed representation of a URI template.
type UriTemplate struct {
raw string
parts []templatePart
}
// Parse parses a URI template string into a UriTemplate object.
func Parse(rawtemplate string) (template *UriTemplate, err error) {
template = new(UriTemplate)
template.raw = rawtemplate
split := strings.Split(rawtemplate, "{")
template.parts = make([]templatePart, len(split)*2-1)
for i, s := range split {
if i == 0 {
if strings.Contains(s, "}") {
err = errors.New("unexpected }")
break
}
template.parts[i].raw = s
} else {
subsplit := strings.Split(s, "}")
if len(subsplit) != 2 {
err = errors.New("malformed template")
break
}
expression := subsplit[0]
template.parts[i*2-1], err = parseExpression(expression)
if err != nil {
break
}
template.parts[i*2].raw = subsplit[1]
}
}
if err != nil {
template = nil
}
return template, err
}
type templatePart struct {
raw string
terms []templateTerm
first string
sep string
named bool
ifemp string
allowReserved bool
}
type templateTerm struct {
name string
explode bool
truncate int
}
func parseExpression(expression string) (result templatePart, err error) {
switch expression[0] {
case '+':
result.sep = ","
result.allowReserved = true
expression = expression[1:]
case '.':
result.first = "."
result.sep = "."
expression = expression[1:]
case '/':
result.first = "/"
result.sep = "/"
expression = expression[1:]
case ';':
result.first = ";"
result.sep = ";"
result.named = true
expression = expression[1:]
case '?':
result.first = "?"
result.sep = "&"
result.named = true
result.ifemp = "="
expression = expression[1:]
case '&':
result.first = "&"
result.sep = "&"
result.named = true
result.ifemp = "="
expression = expression[1:]
case '#':
result.first = "#"
result.sep = ","
result.allowReserved = true
expression = expression[1:]
default:
result.sep = ","
}
rawterms := strings.Split(expression, ",")
result.terms = make([]templateTerm, len(rawterms))
for i, raw := range rawterms {
result.terms[i], err = parseTerm(raw)
if err != nil {
break
}
}
return result, err
}
func parseTerm(term string) (result templateTerm, err error) {
if strings.HasSuffix(term, "*") {
result.explode = true
term = term[:len(term)-1]
}
split := strings.Split(term, ":")
if len(split) == 1 {
result.name = term
} else if len(split) == 2 {
result.name = split[0]
var parsed int64
parsed, err = strconv.ParseInt(split[1], 10, 0)
result.truncate = int(parsed)
} else {
err = errors.New("multiple colons in same term")
}
if !validname.MatchString(result.name) {
err = errors.New("not a valid name: " + result.name)
}
if result.explode && result.truncate > 0 {
err = errors.New("both explode and prefix modifers on same term")
}
return result, err
}
// Expand expands a URI template with a set of values to produce a string.
func (self *UriTemplate) Expand(value interface{}) (string, error) {
values, ismap := value.(map[string]interface{})
if !ismap {
if m, ismap := struct2map(value); !ismap {
return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.")
} else {
return self.Expand(m)
}
}
var buf bytes.Buffer
for _, p := range self.parts {
err := p.expand(&buf, values)
if err != nil {
return "", err
}
}
return buf.String(), nil
}
func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error {
if len(self.raw) > 0 {
buf.WriteString(self.raw)
return nil
}
var zeroLen = buf.Len()
buf.WriteString(self.first)
var firstLen = buf.Len()
for _, term := range self.terms {
value, exists := values[term.name]
if !exists {
continue
}
if buf.Len() != firstLen {
buf.WriteString(self.sep)
}
switch v := value.(type) {
case string:
self.expandString(buf, term, v)
case []interface{}:
self.expandArray(buf, term, v)
case map[string]interface{}:
if term.truncate > 0 {
return errors.New("cannot truncate a map expansion")
}
self.expandMap(buf, term, v)
default:
if m, ismap := struct2map(value); ismap {
if term.truncate > 0 {
return errors.New("cannot truncate a map expansion")
}
self.expandMap(buf, term, m)
} else {
str := fmt.Sprintf("%v", value)
self.expandString(buf, term, str)
}
}
}
if buf.Len() == firstLen {
original := buf.Bytes()[:zeroLen]
buf.Reset()
buf.Write(original)
}
return nil
}
func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
if self.named {
buf.WriteString(name)
if empty {
buf.WriteString(self.ifemp)
} else {
buf.WriteString("=")
}
}
}
func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
if len(s) > t.truncate && t.truncate > 0 {
s = s[:t.truncate]
}
self.expandName(buf, t.name, len(s) == 0)
buf.WriteString(escape(s, self.allowReserved))
}
func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) {
if len(a) == 0 {
return
} else if !t.explode {
self.expandName(buf, t.name, false)
}
for i, value := range a {
if t.explode && i > 0 {
buf.WriteString(self.sep)
} else if i > 0 {
buf.WriteString(",")
}
var s string
switch v := value.(type) {
case string:
s = v
default:
s = fmt.Sprintf("%v", v)
}
if len(s) > t.truncate && t.truncate > 0 {
s = s[:t.truncate]
}
if self.named && t.explode {
self.expandName(buf, t.name, len(s) == 0)
}
buf.WriteString(escape(s, self.allowReserved))
}
}
func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) {
if len(m) == 0 {
return
}
if !t.explode {
self.expandName(buf, t.name, len(m) == 0)
}
var firstLen = buf.Len()
for k, value := range m {
if firstLen != buf.Len() {
if t.explode {
buf.WriteString(self.sep)
} else {
buf.WriteString(",")
}
}
var s string
switch v := value.(type) {
case string:
s = v
default:
s = fmt.Sprintf("%v", v)
}
if t.explode {
buf.WriteString(escape(k, self.allowReserved))
buf.WriteRune('=')
buf.WriteString(escape(s, self.allowReserved))
} else {
buf.WriteString(escape(k, self.allowReserved))
buf.WriteRune(',')
buf.WriteString(escape(s, self.allowReserved))
}
}
}
func struct2map(v interface{}) (map[string]interface{}, bool) {
value := reflect.ValueOf(v)
switch value.Type().Kind() {
case reflect.Ptr:
return struct2map(value.Elem().Interface())
case reflect.Struct:
m := make(map[string]interface{})
for i := 0; i < value.NumField(); i++ {
tag := value.Type().Field(i).Tag
var name string
if strings.Contains(string(tag), ":") {
name = tag.Get("uri")
} else {
name = strings.TrimSpace(string(tag))
}
if len(name) == 0 {
name = value.Type().Field(i).Name
}
m[name] = value.Field(i).Interface()
}
return m, true
}
return nil, false
}
| vendor/gopkg.in/olivere/elastic.v5/uritemplates/uritemplates.go | 0 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.0003407803305890411,
0.00017815057071857154,
0.00015811821504030377,
0.00017397697956766933,
0.000028380021831253543
] |
{
"id": 2,
"code_window": [
"\tif err = checkDelObjArgs(ctx, bucket, object); err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\tif !xl.isObject(bucket, object) && !xl.isObjectDir(bucket, object) {\n",
"\t\treturn ObjectNotFound{bucket, object}\n",
"\t}\n",
"\n",
"\tvar writeQuorum int\n",
"\tvar isObjectDir = hasSuffix(object, slashSeparator)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/xl-v1-object.go",
"type": "replace",
"edit_start_line_idx": 986
} | //+build !noasm !appengine
// SHA256 implementation for AVX2
//
// Minio Cloud Storage, (C) 2016 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
// This code is based on an Intel White-Paper:
// "Fast SHA-256 Implementations on Intel Architecture Processors"
//
// together with the reference implementation from the following authors:
// James Guilford <[email protected]>
// Kirk Yap <[email protected]>
// Tim Chen <[email protected]>
//
// For Golang it has been converted to Plan 9 assembly with the help of
// github.com/minio/asm2plan9s to assemble Intel instructions to their Plan9
// equivalents
//
#include "textflag.h"
DATA K256<>+0x000(SB)/8, $0x71374491428a2f98
DATA K256<>+0x008(SB)/8, $0xe9b5dba5b5c0fbcf
DATA K256<>+0x010(SB)/8, $0x71374491428a2f98
DATA K256<>+0x018(SB)/8, $0xe9b5dba5b5c0fbcf
DATA K256<>+0x020(SB)/8, $0x59f111f13956c25b
DATA K256<>+0x028(SB)/8, $0xab1c5ed5923f82a4
DATA K256<>+0x030(SB)/8, $0x59f111f13956c25b
DATA K256<>+0x038(SB)/8, $0xab1c5ed5923f82a4
DATA K256<>+0x040(SB)/8, $0x12835b01d807aa98
DATA K256<>+0x048(SB)/8, $0x550c7dc3243185be
DATA K256<>+0x050(SB)/8, $0x12835b01d807aa98
DATA K256<>+0x058(SB)/8, $0x550c7dc3243185be
DATA K256<>+0x060(SB)/8, $0x80deb1fe72be5d74
DATA K256<>+0x068(SB)/8, $0xc19bf1749bdc06a7
DATA K256<>+0x070(SB)/8, $0x80deb1fe72be5d74
DATA K256<>+0x078(SB)/8, $0xc19bf1749bdc06a7
DATA K256<>+0x080(SB)/8, $0xefbe4786e49b69c1
DATA K256<>+0x088(SB)/8, $0x240ca1cc0fc19dc6
DATA K256<>+0x090(SB)/8, $0xefbe4786e49b69c1
DATA K256<>+0x098(SB)/8, $0x240ca1cc0fc19dc6
DATA K256<>+0x0a0(SB)/8, $0x4a7484aa2de92c6f
DATA K256<>+0x0a8(SB)/8, $0x76f988da5cb0a9dc
DATA K256<>+0x0b0(SB)/8, $0x4a7484aa2de92c6f
DATA K256<>+0x0b8(SB)/8, $0x76f988da5cb0a9dc
DATA K256<>+0x0c0(SB)/8, $0xa831c66d983e5152
DATA K256<>+0x0c8(SB)/8, $0xbf597fc7b00327c8
DATA K256<>+0x0d0(SB)/8, $0xa831c66d983e5152
DATA K256<>+0x0d8(SB)/8, $0xbf597fc7b00327c8
DATA K256<>+0x0e0(SB)/8, $0xd5a79147c6e00bf3
DATA K256<>+0x0e8(SB)/8, $0x1429296706ca6351
DATA K256<>+0x0f0(SB)/8, $0xd5a79147c6e00bf3
DATA K256<>+0x0f8(SB)/8, $0x1429296706ca6351
DATA K256<>+0x100(SB)/8, $0x2e1b213827b70a85
DATA K256<>+0x108(SB)/8, $0x53380d134d2c6dfc
DATA K256<>+0x110(SB)/8, $0x2e1b213827b70a85
DATA K256<>+0x118(SB)/8, $0x53380d134d2c6dfc
DATA K256<>+0x120(SB)/8, $0x766a0abb650a7354
DATA K256<>+0x128(SB)/8, $0x92722c8581c2c92e
DATA K256<>+0x130(SB)/8, $0x766a0abb650a7354
DATA K256<>+0x138(SB)/8, $0x92722c8581c2c92e
DATA K256<>+0x140(SB)/8, $0xa81a664ba2bfe8a1
DATA K256<>+0x148(SB)/8, $0xc76c51a3c24b8b70
DATA K256<>+0x150(SB)/8, $0xa81a664ba2bfe8a1
DATA K256<>+0x158(SB)/8, $0xc76c51a3c24b8b70
DATA K256<>+0x160(SB)/8, $0xd6990624d192e819
DATA K256<>+0x168(SB)/8, $0x106aa070f40e3585
DATA K256<>+0x170(SB)/8, $0xd6990624d192e819
DATA K256<>+0x178(SB)/8, $0x106aa070f40e3585
DATA K256<>+0x180(SB)/8, $0x1e376c0819a4c116
DATA K256<>+0x188(SB)/8, $0x34b0bcb52748774c
DATA K256<>+0x190(SB)/8, $0x1e376c0819a4c116
DATA K256<>+0x198(SB)/8, $0x34b0bcb52748774c
DATA K256<>+0x1a0(SB)/8, $0x4ed8aa4a391c0cb3
DATA K256<>+0x1a8(SB)/8, $0x682e6ff35b9cca4f
DATA K256<>+0x1b0(SB)/8, $0x4ed8aa4a391c0cb3
DATA K256<>+0x1b8(SB)/8, $0x682e6ff35b9cca4f
DATA K256<>+0x1c0(SB)/8, $0x78a5636f748f82ee
DATA K256<>+0x1c8(SB)/8, $0x8cc7020884c87814
DATA K256<>+0x1d0(SB)/8, $0x78a5636f748f82ee
DATA K256<>+0x1d8(SB)/8, $0x8cc7020884c87814
DATA K256<>+0x1e0(SB)/8, $0xa4506ceb90befffa
DATA K256<>+0x1e8(SB)/8, $0xc67178f2bef9a3f7
DATA K256<>+0x1f0(SB)/8, $0xa4506ceb90befffa
DATA K256<>+0x1f8(SB)/8, $0xc67178f2bef9a3f7
DATA K256<>+0x200(SB)/8, $0x0405060700010203
DATA K256<>+0x208(SB)/8, $0x0c0d0e0f08090a0b
DATA K256<>+0x210(SB)/8, $0x0405060700010203
DATA K256<>+0x218(SB)/8, $0x0c0d0e0f08090a0b
DATA K256<>+0x220(SB)/8, $0x0b0a090803020100
DATA K256<>+0x228(SB)/8, $0xffffffffffffffff
DATA K256<>+0x230(SB)/8, $0x0b0a090803020100
DATA K256<>+0x238(SB)/8, $0xffffffffffffffff
DATA K256<>+0x240(SB)/8, $0xffffffffffffffff
DATA K256<>+0x248(SB)/8, $0x0b0a090803020100
DATA K256<>+0x250(SB)/8, $0xffffffffffffffff
DATA K256<>+0x258(SB)/8, $0x0b0a090803020100
GLOBL K256<>(SB), 8, $608
// func blockAvx2(h []uint32, message []uint8)
TEXT ·blockAvx2(SB), 7, $0
MOVQ ctx+0(FP), DI // DI: &h
MOVQ inp+24(FP), SI // SI: &message
MOVQ inplength+32(FP), DX // len(message)
ADDQ SI, DX // end pointer of input
MOVQ SP, R11 // copy stack pointer
SUBQ $0x220, SP // sp -= 0x220
ANDQ $0xfffffffffffffc00, SP // align stack frame
ADDQ $0x1c0, SP
MOVQ DI, 0x40(SP) // save ctx
MOVQ SI, 0x48(SP) // save input
MOVQ DX, 0x50(SP) // save end pointer
MOVQ R11, 0x58(SP) // save copy of stack pointer
WORD $0xf8c5; BYTE $0x77 // vzeroupper
ADDQ $0x40, SI // input++
MOVL (DI), AX
MOVQ SI, R12 // borrow $T1
MOVL 4(DI), BX
CMPQ SI, DX // $_end
MOVL 8(DI), CX
LONG $0xe4440f4c // cmove r12,rsp /* next block or random data */
MOVL 12(DI), DX
MOVL 16(DI), R8
MOVL 20(DI), R9
MOVL 24(DI), R10
MOVL 28(DI), R11
LEAQ K256<>(SB), BP
LONG $0x856f7dc5; LONG $0x00000220 // VMOVDQA YMM8, 0x220[rbp] /* vmovdqa ymm8,YMMWORD PTR [rip+0x220] */
LONG $0x8d6f7dc5; LONG $0x00000240 // VMOVDQA YMM9, 0x240[rbp] /* vmovdqa ymm9,YMMWORD PTR [rip+0x240] */
LONG $0x956f7dc5; LONG $0x00000200 // VMOVDQA YMM10, 0x200[rbp] /* vmovdqa ymm7,YMMWORD PTR [rip+0x200] */
loop0:
LONG $0x6f7dc1c4; BYTE $0xfa // VMOVDQA YMM7, YMM10
// Load first 16 dwords from two blocks
MOVOU -64(SI), X0 // vmovdqu xmm0,XMMWORD PTR [rsi-0x40]
MOVOU -48(SI), X1 // vmovdqu xmm1,XMMWORD PTR [rsi-0x30]
MOVOU -32(SI), X2 // vmovdqu xmm2,XMMWORD PTR [rsi-0x20]
MOVOU -16(SI), X3 // vmovdqu xmm3,XMMWORD PTR [rsi-0x10]
// Byte swap data and transpose data into high/low
LONG $0x387dc3c4; WORD $0x2404; BYTE $0x01 // vinserti128 ymm0,ymm0,[r12],0x1
LONG $0x3875c3c4; LONG $0x0110244c // vinserti128 ymm1,ymm1,0x10[r12],0x1
LONG $0x007de2c4; BYTE $0xc7 // vpshufb ymm0,ymm0,ymm7
LONG $0x386dc3c4; LONG $0x01202454 // vinserti128 ymm2,ymm2,0x20[r12],0x1
LONG $0x0075e2c4; BYTE $0xcf // vpshufb ymm1,ymm1,ymm7
LONG $0x3865c3c4; LONG $0x0130245c // vinserti128 ymm3,ymm3,0x30[r12],0x1
LEAQ K256<>(SB), BP
LONG $0x006de2c4; BYTE $0xd7 // vpshufb ymm2,ymm2,ymm7
LONG $0x65fefdc5; BYTE $0x00 // vpaddd ymm4,ymm0,[rbp]
LONG $0x0065e2c4; BYTE $0xdf // vpshufb ymm3,ymm3,ymm7
LONG $0x6dfef5c5; BYTE $0x20 // vpaddd ymm5,ymm1,0x20[rbp]
LONG $0x75feedc5; BYTE $0x40 // vpaddd ymm6,ymm2,0x40[rbp]
LONG $0x7dfee5c5; BYTE $0x60 // vpaddd ymm7,ymm3,0x60[rbp]
LONG $0x247ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm4
XORQ R14, R14
LONG $0x6c7ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm5
ADDQ $-0x40, SP
MOVQ BX, DI
LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6
XORQ CX, DI // magic
LONG $0x7c7ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm7
MOVQ R9, R12
ADDQ $0x80,BP
loop1:
// Schedule 48 input dwords, by doing 3 rounds of 12 each
// Note: SIMD instructions are interleaved with the SHA calculations
ADDQ $-0x40, SP
LONG $0x0f75e3c4; WORD $0x04e0 // vpalignr ymm4,ymm1,ymm0,0x4
// ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x80)
LONG $0x249c0344; LONG $0x00000080 // add r11d,[rsp+0x80]
WORD $0x2145; BYTE $0xc4 // and r12d,r8d
LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19
LONG $0x0f65e3c4; WORD $0x04fa // vpalignr ymm7,ymm3,ymm2,0x4
LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb
LONG $0x30048d42 // lea eax,[rax+r14*1]
LONG $0x231c8d47 // lea r11d,[r11+r12*1]
LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7
LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6
LONG $0xc7fefdc5 // vpaddd ymm0,ymm0,ymm7
LONG $0x231c8d47 // lea r11d,[r11+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8941; BYTE $0xc7 // mov r15d,eax
LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3
LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16
LONG $0x2b1c8d47 // lea r11d,[r11+r13*1]
WORD $0x3141; BYTE $0xdf // xor r15d,ebx
LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe
LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd
LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2
LONG $0x1a148d42 // lea edx,[rdx+r11*1]
LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0xdf31 // xor edi,ebx
LONG $0xfb70fdc5; BYTE $0xfa // vpshufd ymm7,ymm3,0xfa
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1]
WORD $0x8945; BYTE $0xc4 // mov r12d,r8d
LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb
// ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x84)
LONG $0x24940344; LONG $0x00000084 // add r10d,[rsp+0x84]
WORD $0x2141; BYTE $0xd4 // and r12d,edx
LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19
LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5
LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb
LONG $0x331c8d47 // lea r11d,[r11+r14*1]
LONG $0x22148d47 // lea r10d,[r10+r12*1]
LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb
LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6
LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6
LONG $0x22148d47 // lea r10d,[r10+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8944; BYTE $0xdf // mov edi,r11d
LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa
LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16
LONG $0x2a148d47 // lea r10d,[r10+r13*1]
WORD $0xc731 // xor edi,eax
LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5
LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd
LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2
LONG $0x110c8d42 // lea ecx,[rcx+r10*1]
LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3141; BYTE $0xc7 // xor r15d,eax
LONG $0xc4fefdc5 // vpaddd ymm0,ymm0,ymm4
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x3a148d47 // lea r10d,[r10+r15*1]
WORD $0x8941; BYTE $0xd4 // mov r12d,edx
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
// ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x88)
LONG $0x248c0344; LONG $0x00000088 // add r9d,[rsp+0x88]
WORD $0x2141; BYTE $0xcc // and r12d,ecx
LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19
LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2
LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb
LONG $0x32148d47 // lea r10d,[r10+r14*1]
LONG $0x210c8d47 // lea r9d,[r9+r12*1]
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6
LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8
LONG $0x210c8d47 // lea r9d,[r9+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8945; BYTE $0xd7 // mov r15d,r10d
LONG $0xc6fefdc5 // vpaddd ymm0,ymm0,ymm6
LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16
LONG $0x290c8d47 // lea r9d,[r9+r13*1]
WORD $0x3145; BYTE $0xdf // xor r15d,r11d
LONG $0xf870fdc5; BYTE $0x50 // vpshufd ymm7,ymm0,0x50
LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd
LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2
LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1]
LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3144; BYTE $0xdf // xor edi,r11d
LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x390c8d45 // lea r9d,[r9+rdi*1]
WORD $0x8941; BYTE $0xcc // mov r12d,ecx
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
// ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x8c)
LONG $0x24840344; LONG $0x0000008c // add r8d,[rsp+0x8c]
WORD $0x2141; BYTE $0xdc // and r12d,ebx
LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19
LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2
LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb
LONG $0x310c8d47 // lea r9d,[r9+r14*1]
LONG $0x20048d47 // lea r8d,[r8+r12*1]
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6
LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9
LONG $0x20048d47 // lea r8d,[r8+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8944; BYTE $0xcf // mov edi,r9d
LONG $0xc6fefdc5 // vpaddd ymm0,ymm0,ymm6
LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16
LONG $0x28048d47 // lea r8d,[r8+r13*1]
WORD $0x3144; BYTE $0xd7 // xor edi,r10d
LONG $0x75fefdc5; BYTE $0x00 // vpaddd ymm6,ymm0,[rbp+0x0]
LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd
LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2
LONG $0x00048d42 // lea eax,[rax+r8*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3145; BYTE $0xd7 // xor r15d,r10d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x38048d47 // lea r8d,[r8+r15*1]
WORD $0x8941; BYTE $0xdc // mov r12d,ebx
LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6
LONG $0x0f6de3c4; WORD $0x04e1 // vpalignr ymm4,ymm2,ymm1,0x4
// ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0xa0)
LONG $0xa0249403; WORD $0x0000; BYTE $0x00 // add edx,[rsp+0xa0]
WORD $0x2141; BYTE $0xc4 // and r12d,eax
LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19
LONG $0x0f7de3c4; WORD $0x04fb // vpalignr ymm7,ymm0,ymm3,0x4
LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb
LONG $0x30048d47 // lea r8d,[r8+r14*1]
LONG $0x22148d42 // lea edx,[rdx+r12*1]
LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7
LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6
LONG $0xcffef5c5 // vpaddd ymm1,ymm1,ymm7
LONG $0x22148d42 // lea edx,[rdx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8945; BYTE $0xc7 // mov r15d,r8d
LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3
LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16
LONG $0x2a148d42 // lea edx,[rdx+r13*1]
WORD $0x3145; BYTE $0xcf // xor r15d,r9d
LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe
LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd
LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2
LONG $0x131c8d45 // lea r11d,[r11+rdx*1]
LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3144; BYTE $0xcf // xor edi,r9d
LONG $0xf870fdc5; BYTE $0xfa // vpshufd ymm7,ymm0,0xfa
WORD $0x3145; BYTE $0xee // xor r14d,r13d
WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1]
WORD $0x8941; BYTE $0xc4 // mov r12d,eax
LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb
// ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0xa4)
LONG $0xa4248c03; WORD $0x0000; BYTE $0x00 // add ecx,[rsp+0xa4]
WORD $0x2145; BYTE $0xdc // and r12d,r11d
LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19
LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5
LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb
LONG $0x32148d42 // lea edx,[rdx+r14*1]
LONG $0x210c8d42 // lea ecx,[rcx+r12*1]
LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb
LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6
LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6
LONG $0x210c8d42 // lea ecx,[rcx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0xd789 // mov edi,edx
LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa
LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16
LONG $0x290c8d42 // lea ecx,[rcx+r13*1]
WORD $0x3144; BYTE $0xc7 // xor edi,r8d
LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5
LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd
LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2
LONG $0x0a148d45 // lea r10d,[r10+rcx*1]
LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3145; BYTE $0xc7 // xor r15d,r8d
LONG $0xccfef5c5 // vpaddd ymm1,ymm1,ymm4
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x390c8d42 // lea ecx,[rcx+r15*1]
WORD $0x8945; BYTE $0xdc // mov r12d,r11d
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
// ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0xa8)
LONG $0xa8249c03; WORD $0x0000; BYTE $0x00 // add ebx,[rsp+0xa8]
WORD $0x2145; BYTE $0xd4 // and r12d,r10d
LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19
LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2
LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb
LONG $0x310c8d42 // lea ecx,[rcx+r14*1]
LONG $0x231c8d42 // lea ebx,[rbx+r12*1]
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6
LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8
LONG $0x231c8d42 // lea ebx,[rbx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8941; BYTE $0xcf // mov r15d,ecx
LONG $0xcefef5c5 // vpaddd ymm1,ymm1,ymm6
LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16
LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1]
WORD $0x3141; BYTE $0xd7 // xor r15d,edx
LONG $0xf970fdc5; BYTE $0x50 // vpshufd ymm7,ymm1,0x50
LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd
LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2
LONG $0x190c8d45 // lea r9d,[r9+rbx*1]
LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0xd731 // xor edi,edx
LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11
WORD $0x3145; BYTE $0xee // xor r14d,r13d
WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1]
WORD $0x8945; BYTE $0xd4 // mov r12d,r10d
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
// ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0xac)
LONG $0xac248403; WORD $0x0000; BYTE $0x00 // add eax,[rsp+0xac]
WORD $0x2145; BYTE $0xcc // and r12d,r9d
LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19
LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2
LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb
LONG $0x331c8d42 // lea ebx,[rbx+r14*1]
LONG $0x20048d42 // lea eax,[rax+r12*1]
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6
LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9
LONG $0x20048d42 // lea eax,[rax+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0xdf89 // mov edi,ebx
LONG $0xcefef5c5 // vpaddd ymm1,ymm1,ymm6
LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16
LONG $0x28048d42 // lea eax,[rax+r13*1]
WORD $0xcf31 // xor edi,ecx
LONG $0x75fef5c5; BYTE $0x20 // vpaddd ymm6,ymm1,[rbp+0x20]
LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd
LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2
LONG $0x00048d45 // lea r8d,[r8+rax*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3141; BYTE $0xcf // xor r15d,ecx
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x38048d42 // lea eax,[rax+r15*1]
WORD $0x8945; BYTE $0xcc // mov r12d,r9d
LONG $0x747ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm6
LONG $0x24648d48; BYTE $0xc0 // lea rsp,[rsp-0x40]
LONG $0x0f65e3c4; WORD $0x04e2 // vpalignr ymm4,ymm3,ymm2,0x4
// ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x80)
LONG $0x249c0344; LONG $0x00000080 // add r11d,[rsp+0x80]
WORD $0x2145; BYTE $0xc4 // and r12d,r8d
LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19
LONG $0x0f75e3c4; WORD $0x04f8 // vpalignr ymm7,ymm1,ymm0,0x4
LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb
LONG $0x30048d42 // lea eax,[rax+r14*1]
LONG $0x231c8d47 // lea r11d,[r11+r12*1]
LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7
LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6
LONG $0xd7feedc5 // vpaddd ymm2,ymm2,ymm7
LONG $0x231c8d47 // lea r11d,[r11+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8941; BYTE $0xc7 // mov r15d,eax
LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3
LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16
LONG $0x2b1c8d47 // lea r11d,[r11+r13*1]
WORD $0x3141; BYTE $0xdf // xor r15d,ebx
LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe
LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd
LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2
LONG $0x1a148d42 // lea edx,[rdx+r11*1]
LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0xdf31 // xor edi,ebx
LONG $0xf970fdc5; BYTE $0xfa // vpshufd ymm7,ymm1,0xfa
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1]
WORD $0x8945; BYTE $0xc4 // mov r12d,r8d
LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb
// ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x84)
LONG $0x24940344; LONG $0x00000084 // add r10d,[rsp+0x84]
WORD $0x2141; BYTE $0xd4 // and r12d,edx
LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19
LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5
LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb
LONG $0x331c8d47 // lea r11d,[r11+r14*1]
LONG $0x22148d47 // lea r10d,[r10+r12*1]
LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb
LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6
LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6
LONG $0x22148d47 // lea r10d,[r10+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8944; BYTE $0xdf // mov edi,r11d
LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa
LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16
LONG $0x2a148d47 // lea r10d,[r10+r13*1]
WORD $0xc731 // xor edi,eax
LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5
LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd
LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2
LONG $0x110c8d42 // lea ecx,[rcx+r10*1]
LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3141; BYTE $0xc7 // xor r15d,eax
LONG $0xd4feedc5 // vpaddd ymm2,ymm2,ymm4
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x3a148d47 // lea r10d,[r10+r15*1]
WORD $0x8941; BYTE $0xd4 // mov r12d,edx
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
// ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x88)
LONG $0x248c0344; LONG $0x00000088 // add r9d,[rsp+0x88]
WORD $0x2141; BYTE $0xcc // and r12d,ecx
LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19
LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2
LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb
LONG $0x32148d47 // lea r10d,[r10+r14*1]
LONG $0x210c8d47 // lea r9d,[r9+r12*1]
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6
LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8
LONG $0x210c8d47 // lea r9d,[r9+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8945; BYTE $0xd7 // mov r15d,r10d
LONG $0xd6feedc5 // vpaddd ymm2,ymm2,ymm6
LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16
LONG $0x290c8d47 // lea r9d,[r9+r13*1]
WORD $0x3145; BYTE $0xdf // xor r15d,r11d
LONG $0xfa70fdc5; BYTE $0x50 // vpshufd ymm7,ymm2,0x50
LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd
LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2
LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1]
LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3144; BYTE $0xdf // xor edi,r11d
LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x390c8d45 // lea r9d,[r9+rdi*1]
WORD $0x8941; BYTE $0xcc // mov r12d,ecx
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
// ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x8c)
LONG $0x24840344; LONG $0x0000008c // add r8d,[rsp+0x8c]
WORD $0x2141; BYTE $0xdc // and r12d,ebx
LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19
LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2
LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb
LONG $0x310c8d47 // lea r9d,[r9+r14*1]
LONG $0x20048d47 // lea r8d,[r8+r12*1]
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6
LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9
LONG $0x20048d47 // lea r8d,[r8+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8944; BYTE $0xcf // mov edi,r9d
LONG $0xd6feedc5 // vpaddd ymm2,ymm2,ymm6
LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16
LONG $0x28048d47 // lea r8d,[r8+r13*1]
WORD $0x3144; BYTE $0xd7 // xor edi,r10d
LONG $0x75feedc5; BYTE $0x40 // vpaddd ymm6,ymm2,[rbp+0x40]
LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd
LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2
LONG $0x00048d42 // lea eax,[rax+r8*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3145; BYTE $0xd7 // xor r15d,r10d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x38048d47 // lea r8d,[r8+r15*1]
WORD $0x8941; BYTE $0xdc // mov r12d,ebx
LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6
LONG $0x0f7de3c4; WORD $0x04e3 // vpalignr ymm4,ymm0,ymm3,0x4
// ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0xa0)
LONG $0xa0249403; WORD $0x0000; BYTE $0x00 // add edx,[rsp+0xa0]
WORD $0x2141; BYTE $0xc4 // and r12d,eax
LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19
LONG $0x0f6de3c4; WORD $0x04f9 // vpalignr ymm7,ymm2,ymm1,0x4
LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb
LONG $0x30048d47 // lea r8d,[r8+r14*1]
LONG $0x22148d42 // lea edx,[rdx+r12*1]
LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7
LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6
LONG $0xdffee5c5 // vpaddd ymm3,ymm3,ymm7
LONG $0x22148d42 // lea edx,[rdx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8945; BYTE $0xc7 // mov r15d,r8d
LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3
LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16
LONG $0x2a148d42 // lea edx,[rdx+r13*1]
WORD $0x3145; BYTE $0xcf // xor r15d,r9d
LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe
LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd
LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2
LONG $0x131c8d45 // lea r11d,[r11+rdx*1]
LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3144; BYTE $0xcf // xor edi,r9d
LONG $0xfa70fdc5; BYTE $0xfa // vpshufd ymm7,ymm2,0xfa
WORD $0x3145; BYTE $0xee // xor r14d,r13d
WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1]
WORD $0x8941; BYTE $0xc4 // mov r12d,eax
LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb
// ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0xa4)
LONG $0xa4248c03; WORD $0x0000; BYTE $0x00 // add ecx,[rsp+0xa4]
WORD $0x2145; BYTE $0xdc // and r12d,r11d
LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19
LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5
LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb
LONG $0x32148d42 // lea edx,[rdx+r14*1]
LONG $0x210c8d42 // lea ecx,[rcx+r12*1]
LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb
LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6
LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6
LONG $0x210c8d42 // lea ecx,[rcx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0xd789 // mov edi,edx
LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa
LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16
LONG $0x290c8d42 // lea ecx,[rcx+r13*1]
WORD $0x3144; BYTE $0xc7 // xor edi,r8d
LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5
LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd
LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2
LONG $0x0a148d45 // lea r10d,[r10+rcx*1]
LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3145; BYTE $0xc7 // xor r15d,r8d
LONG $0xdcfee5c5 // vpaddd ymm3,ymm3,ymm4
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x390c8d42 // lea ecx,[rcx+r15*1]
WORD $0x8945; BYTE $0xdc // mov r12d,r11d
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
// ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0xa8)
LONG $0xa8249c03; WORD $0x0000; BYTE $0x00 // add ebx,[rsp+0xa8]
WORD $0x2145; BYTE $0xd4 // and r12d,r10d
LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19
LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2
LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb
LONG $0x310c8d42 // lea ecx,[rcx+r14*1]
LONG $0x231c8d42 // lea ebx,[rbx+r12*1]
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6
LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8
LONG $0x231c8d42 // lea ebx,[rbx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8941; BYTE $0xcf // mov r15d,ecx
LONG $0xdefee5c5 // vpaddd ymm3,ymm3,ymm6
LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16
LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1]
WORD $0x3141; BYTE $0xd7 // xor r15d,edx
LONG $0xfb70fdc5; BYTE $0x50 // vpshufd ymm7,ymm3,0x50
LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd
LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2
LONG $0x190c8d45 // lea r9d,[r9+rbx*1]
LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0xd731 // xor edi,edx
LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11
WORD $0x3145; BYTE $0xee // xor r14d,r13d
WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1]
WORD $0x8945; BYTE $0xd4 // mov r12d,r10d
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
// ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0xac)
LONG $0xac248403; WORD $0x0000; BYTE $0x00 // add eax,[rsp+0xac]
WORD $0x2145; BYTE $0xcc // and r12d,r9d
LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19
LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2
LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb
LONG $0x331c8d42 // lea ebx,[rbx+r14*1]
LONG $0x20048d42 // lea eax,[rax+r12*1]
LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7
LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6
LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9
LONG $0x20048d42 // lea eax,[rax+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0xdf89 // mov edi,ebx
LONG $0xdefee5c5 // vpaddd ymm3,ymm3,ymm6
LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16
LONG $0x28048d42 // lea eax,[rax+r13*1]
WORD $0xcf31 // xor edi,ecx
LONG $0x75fee5c5; BYTE $0x60 // vpaddd ymm6,ymm3,[rbp+0x60]
LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd
LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2
LONG $0x00048d45 // lea r8d,[r8+rax*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3141; BYTE $0xcf // xor r15d,ecx
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x38048d42 // lea eax,[rax+r15*1]
WORD $0x8945; BYTE $0xcc // mov r12d,r9d
LONG $0x747ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm6
ADDQ $0x80, BP
CMPB 0x3(BP),$0x0
JNE loop1
// ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x40)
LONG $0x245c0344; BYTE $0x40 // add r11d,[rsp+0x40]
WORD $0x2145; BYTE $0xc4 // and r12d,r8d
LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19
LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb
LONG $0x30048d42 // lea eax,[rax+r14*1]
LONG $0x231c8d47 // lea r11d,[r11+r12*1]
LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6
LONG $0x231c8d47 // lea r11d,[r11+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8941; BYTE $0xc7 // mov r15d,eax
LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16
LONG $0x2b1c8d47 // lea r11d,[r11+r13*1]
WORD $0x3141; BYTE $0xdf // xor r15d,ebx
LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd
LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2
LONG $0x1a148d42 // lea edx,[rdx+r11*1]
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0xdf31 // xor edi,ebx
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1]
WORD $0x8945; BYTE $0xc4 // mov r12d,r8d
// ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x44)
LONG $0x24540344; BYTE $0x44 // add r10d,[rsp+0x44]
WORD $0x2141; BYTE $0xd4 // and r12d,edx
LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19
LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb
LONG $0x331c8d47 // lea r11d,[r11+r14*1]
LONG $0x22148d47 // lea r10d,[r10+r12*1]
LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6
LONG $0x22148d47 // lea r10d,[r10+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8944; BYTE $0xdf // mov edi,r11d
LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16
LONG $0x2a148d47 // lea r10d,[r10+r13*1]
WORD $0xc731 // xor edi,eax
LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd
LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2
LONG $0x110c8d42 // lea ecx,[rcx+r10*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3141; BYTE $0xc7 // xor r15d,eax
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x3a148d47 // lea r10d,[r10+r15*1]
WORD $0x8941; BYTE $0xd4 // mov r12d,edx
// ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x48)
LONG $0x244c0344; BYTE $0x48 // add r9d,[rsp+0x48]
WORD $0x2141; BYTE $0xcc // and r12d,ecx
LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19
LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb
LONG $0x32148d47 // lea r10d,[r10+r14*1]
LONG $0x210c8d47 // lea r9d,[r9+r12*1]
LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6
LONG $0x210c8d47 // lea r9d,[r9+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8945; BYTE $0xd7 // mov r15d,r10d
LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16
LONG $0x290c8d47 // lea r9d,[r9+r13*1]
WORD $0x3145; BYTE $0xdf // xor r15d,r11d
LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd
LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2
LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1]
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3144; BYTE $0xdf // xor edi,r11d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x390c8d45 // lea r9d,[r9+rdi*1]
WORD $0x8941; BYTE $0xcc // mov r12d,ecx
// ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x4c)
LONG $0x24440344; BYTE $0x4c // add r8d,[rsp+0x4c]
WORD $0x2141; BYTE $0xdc // and r12d,ebx
LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19
LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb
LONG $0x310c8d47 // lea r9d,[r9+r14*1]
LONG $0x20048d47 // lea r8d,[r8+r12*1]
LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6
LONG $0x20048d47 // lea r8d,[r8+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8944; BYTE $0xcf // mov edi,r9d
LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16
LONG $0x28048d47 // lea r8d,[r8+r13*1]
WORD $0x3144; BYTE $0xd7 // xor edi,r10d
LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd
LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2
LONG $0x00048d42 // lea eax,[rax+r8*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3145; BYTE $0xd7 // xor r15d,r10d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x38048d47 // lea r8d,[r8+r15*1]
WORD $0x8941; BYTE $0xdc // mov r12d,ebx
// ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0x60)
LONG $0x60245403 // add edx,[rsp+0x60]
WORD $0x2141; BYTE $0xc4 // and r12d,eax
LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19
LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb
LONG $0x30048d47 // lea r8d,[r8+r14*1]
LONG $0x22148d42 // lea edx,[rdx+r12*1]
LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6
LONG $0x22148d42 // lea edx,[rdx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8945; BYTE $0xc7 // mov r15d,r8d
LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16
LONG $0x2a148d42 // lea edx,[rdx+r13*1]
WORD $0x3145; BYTE $0xcf // xor r15d,r9d
LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd
LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2
LONG $0x131c8d45 // lea r11d,[r11+rdx*1]
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3144; BYTE $0xcf // xor edi,r9d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1]
WORD $0x8941; BYTE $0xc4 // mov r12d,eax
// ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0x64)
LONG $0x64244c03 // add ecx,[rsp+0x64]
WORD $0x2145; BYTE $0xdc // and r12d,r11d
LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19
LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb
LONG $0x32148d42 // lea edx,[rdx+r14*1]
LONG $0x210c8d42 // lea ecx,[rcx+r12*1]
LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6
LONG $0x210c8d42 // lea ecx,[rcx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0xd789 // mov edi,edx
LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16
LONG $0x290c8d42 // lea ecx,[rcx+r13*1]
WORD $0x3144; BYTE $0xc7 // xor edi,r8d
LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd
LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2
LONG $0x0a148d45 // lea r10d,[r10+rcx*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3145; BYTE $0xc7 // xor r15d,r8d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x390c8d42 // lea ecx,[rcx+r15*1]
WORD $0x8945; BYTE $0xdc // mov r12d,r11d
// ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0x68)
LONG $0x68245c03 // add ebx,[rsp+0x68]
WORD $0x2145; BYTE $0xd4 // and r12d,r10d
LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19
LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb
LONG $0x310c8d42 // lea ecx,[rcx+r14*1]
LONG $0x231c8d42 // lea ebx,[rbx+r12*1]
LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6
LONG $0x231c8d42 // lea ebx,[rbx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8941; BYTE $0xcf // mov r15d,ecx
LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16
LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1]
WORD $0x3141; BYTE $0xd7 // xor r15d,edx
LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd
LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2
LONG $0x190c8d45 // lea r9d,[r9+rbx*1]
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0xd731 // xor edi,edx
WORD $0x3145; BYTE $0xee // xor r14d,r13d
WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1]
WORD $0x8945; BYTE $0xd4 // mov r12d,r10d
// ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0x6c)
LONG $0x6c244403 // add eax,[rsp+0x6c]
WORD $0x2145; BYTE $0xcc // and r12d,r9d
LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19
LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb
LONG $0x331c8d42 // lea ebx,[rbx+r14*1]
LONG $0x20048d42 // lea eax,[rax+r12*1]
LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6
LONG $0x20048d42 // lea eax,[rax+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0xdf89 // mov edi,ebx
LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16
LONG $0x28048d42 // lea eax,[rax+r13*1]
WORD $0xcf31 // xor edi,ecx
LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd
LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2
LONG $0x00048d45 // lea r8d,[r8+rax*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3141; BYTE $0xcf // xor r15d,ecx
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x38048d42 // lea eax,[rax+r15*1]
WORD $0x8945; BYTE $0xcc // mov r12d,r9d
// ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x00)
LONG $0x241c0344 // add r11d,[rsp]
WORD $0x2145; BYTE $0xc4 // and r12d,r8d
LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19
LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb
LONG $0x30048d42 // lea eax,[rax+r14*1]
LONG $0x231c8d47 // lea r11d,[r11+r12*1]
LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6
LONG $0x231c8d47 // lea r11d,[r11+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8941; BYTE $0xc7 // mov r15d,eax
LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16
LONG $0x2b1c8d47 // lea r11d,[r11+r13*1]
WORD $0x3141; BYTE $0xdf // xor r15d,ebx
LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd
LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2
LONG $0x1a148d42 // lea edx,[rdx+r11*1]
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0xdf31 // xor edi,ebx
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1]
WORD $0x8945; BYTE $0xc4 // mov r12d,r8d
// ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x04)
LONG $0x24540344; BYTE $0x04 // add r10d,[rsp+0x4]
WORD $0x2141; BYTE $0xd4 // and r12d,edx
LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19
LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb
LONG $0x331c8d47 // lea r11d,[r11+r14*1]
LONG $0x22148d47 // lea r10d,[r10+r12*1]
LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6
LONG $0x22148d47 // lea r10d,[r10+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8944; BYTE $0xdf // mov edi,r11d
LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16
LONG $0x2a148d47 // lea r10d,[r10+r13*1]
WORD $0xc731 // xor edi,eax
LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd
LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2
LONG $0x110c8d42 // lea ecx,[rcx+r10*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3141; BYTE $0xc7 // xor r15d,eax
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x3a148d47 // lea r10d,[r10+r15*1]
WORD $0x8941; BYTE $0xd4 // mov r12d,edx
// ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x08)
LONG $0x244c0344; BYTE $0x08 // add r9d,[rsp+0x8]
WORD $0x2141; BYTE $0xcc // and r12d,ecx
LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19
LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb
LONG $0x32148d47 // lea r10d,[r10+r14*1]
LONG $0x210c8d47 // lea r9d,[r9+r12*1]
LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6
LONG $0x210c8d47 // lea r9d,[r9+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8945; BYTE $0xd7 // mov r15d,r10d
LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16
LONG $0x290c8d47 // lea r9d,[r9+r13*1]
WORD $0x3145; BYTE $0xdf // xor r15d,r11d
LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd
LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2
LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1]
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3144; BYTE $0xdf // xor edi,r11d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x390c8d45 // lea r9d,[r9+rdi*1]
WORD $0x8941; BYTE $0xcc // mov r12d,ecx
// ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x0c)
LONG $0x24440344; BYTE $0x0c // add r8d,[rsp+0xc]
WORD $0x2141; BYTE $0xdc // and r12d,ebx
LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19
LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb
LONG $0x310c8d47 // lea r9d,[r9+r14*1]
LONG $0x20048d47 // lea r8d,[r8+r12*1]
LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6
LONG $0x20048d47 // lea r8d,[r8+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8944; BYTE $0xcf // mov edi,r9d
LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16
LONG $0x28048d47 // lea r8d,[r8+r13*1]
WORD $0x3144; BYTE $0xd7 // xor edi,r10d
LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd
LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2
LONG $0x00048d42 // lea eax,[rax+r8*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3145; BYTE $0xd7 // xor r15d,r10d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x38048d47 // lea r8d,[r8+r15*1]
WORD $0x8941; BYTE $0xdc // mov r12d,ebx
// ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0x20)
LONG $0x20245403 // add edx,[rsp+0x20]
WORD $0x2141; BYTE $0xc4 // and r12d,eax
LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19
LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb
LONG $0x30048d47 // lea r8d,[r8+r14*1]
LONG $0x22148d42 // lea edx,[rdx+r12*1]
LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6
LONG $0x22148d42 // lea edx,[rdx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8945; BYTE $0xc7 // mov r15d,r8d
LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16
LONG $0x2a148d42 // lea edx,[rdx+r13*1]
WORD $0x3145; BYTE $0xcf // xor r15d,r9d
LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd
LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2
LONG $0x131c8d45 // lea r11d,[r11+rdx*1]
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3144; BYTE $0xcf // xor edi,r9d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1]
WORD $0x8941; BYTE $0xc4 // mov r12d,eax
// ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0x24)
LONG $0x24244c03 // add ecx,[rsp+0x24]
WORD $0x2145; BYTE $0xdc // and r12d,r11d
LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19
LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb
LONG $0x32148d42 // lea edx,[rdx+r14*1]
LONG $0x210c8d42 // lea ecx,[rcx+r12*1]
LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6
LONG $0x210c8d42 // lea ecx,[rcx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0xd789 // mov edi,edx
LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16
LONG $0x290c8d42 // lea ecx,[rcx+r13*1]
WORD $0x3144; BYTE $0xc7 // xor edi,r8d
LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd
LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2
LONG $0x0a148d45 // lea r10d,[r10+rcx*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3145; BYTE $0xc7 // xor r15d,r8d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x390c8d42 // lea ecx,[rcx+r15*1]
WORD $0x8945; BYTE $0xdc // mov r12d,r11d
// ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0x28)
LONG $0x28245c03 // add ebx,[rsp+0x28]
WORD $0x2145; BYTE $0xd4 // and r12d,r10d
LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19
LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb
LONG $0x310c8d42 // lea ecx,[rcx+r14*1]
LONG $0x231c8d42 // lea ebx,[rbx+r12*1]
LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6
LONG $0x231c8d42 // lea ebx,[rbx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8941; BYTE $0xcf // mov r15d,ecx
LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16
LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1]
WORD $0x3141; BYTE $0xd7 // xor r15d,edx
LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd
LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2
LONG $0x190c8d45 // lea r9d,[r9+rbx*1]
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0xd731 // xor edi,edx
WORD $0x3145; BYTE $0xee // xor r14d,r13d
WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1]
WORD $0x8945; BYTE $0xd4 // mov r12d,r10d
// ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0x2c)
LONG $0x2c244403 // add eax,[rsp+0x2c]
WORD $0x2145; BYTE $0xcc // and r12d,r9d
LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19
LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb
LONG $0x331c8d42 // lea ebx,[rbx+r14*1]
LONG $0x20048d42 // lea eax,[rax+r12*1]
LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6
LONG $0x20048d42 // lea eax,[rax+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0xdf89 // mov edi,ebx
LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16
LONG $0x28048d42 // lea eax,[rax+r13*1]
WORD $0xcf31 // xor edi,ecx
LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd
LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2
LONG $0x00048d45 // lea r8d,[r8+rax*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3141; BYTE $0xcf // xor r15d,ecx
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x38048d42 // lea eax,[rax+r15*1]
WORD $0x8945; BYTE $0xcc // mov r12d,r9d
MOVQ 0x200(SP), DI // $_ctx
ADDQ R14, AX
LEAQ 0x1c0(SP), BP
ADDL (DI), AX
ADDL 4(DI), BX
ADDL 8(DI), CX
ADDL 12(DI), DX
ADDL 16(DI), R8
ADDL 20(DI), R9
ADDL 24(DI), R10
ADDL 28(DI), R11
MOVL AX, (DI)
MOVL BX, 4(DI)
MOVL CX, 8(DI)
MOVL DX, 12(DI)
MOVL R8, 16(DI)
MOVL R9, 20(DI)
MOVL R10, 24(DI)
MOVL R11, 28(DI)
CMPQ SI, 0x50(BP) // $_end
JE done
XORQ R14, R14
MOVQ BX, DI
XORQ CX, DI // magic
MOVQ R9, R12
loop2:
// ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, BP, 0x10)
LONG $0x105d0344 // add r11d,[rbp+0x10]
WORD $0x2145; BYTE $0xc4 // and r12d,r8d
LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19
LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb
LONG $0x30048d42 // lea eax,[rax+r14*1]
LONG $0x231c8d47 // lea r11d,[r11+r12*1]
LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6
LONG $0x231c8d47 // lea r11d,[r11+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8941; BYTE $0xc7 // mov r15d,eax
LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16
LONG $0x2b1c8d47 // lea r11d,[r11+r13*1]
WORD $0x3141; BYTE $0xdf // xor r15d,ebx
LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd
LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2
LONG $0x1a148d42 // lea edx,[rdx+r11*1]
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0xdf31 // xor edi,ebx
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1]
WORD $0x8945; BYTE $0xc4 // mov r12d,r8d
// ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, BP, 0x14)
LONG $0x14550344 // add r10d,[rbp+0x14]
WORD $0x2141; BYTE $0xd4 // and r12d,edx
LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19
LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb
LONG $0x331c8d47 // lea r11d,[r11+r14*1]
LONG $0x22148d47 // lea r10d,[r10+r12*1]
LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6
LONG $0x22148d47 // lea r10d,[r10+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8944; BYTE $0xdf // mov edi,r11d
LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16
LONG $0x2a148d47 // lea r10d,[r10+r13*1]
WORD $0xc731 // xor edi,eax
LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd
LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2
LONG $0x110c8d42 // lea ecx,[rcx+r10*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3141; BYTE $0xc7 // xor r15d,eax
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x3a148d47 // lea r10d,[r10+r15*1]
WORD $0x8941; BYTE $0xd4 // mov r12d,edx
// ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, BP, 0x18)
LONG $0x184d0344 // add r9d,[rbp+0x18]
WORD $0x2141; BYTE $0xcc // and r12d,ecx
LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19
LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb
LONG $0x32148d47 // lea r10d,[r10+r14*1]
LONG $0x210c8d47 // lea r9d,[r9+r12*1]
LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6
LONG $0x210c8d47 // lea r9d,[r9+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8945; BYTE $0xd7 // mov r15d,r10d
LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16
LONG $0x290c8d47 // lea r9d,[r9+r13*1]
WORD $0x3145; BYTE $0xdf // xor r15d,r11d
LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd
LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2
LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1]
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3144; BYTE $0xdf // xor edi,r11d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x390c8d45 // lea r9d,[r9+rdi*1]
WORD $0x8941; BYTE $0xcc // mov r12d,ecx
// ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, BP, 0x1c)
LONG $0x1c450344 // add r8d,[rbp+0x1c]
WORD $0x2141; BYTE $0xdc // and r12d,ebx
LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19
LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb
LONG $0x310c8d47 // lea r9d,[r9+r14*1]
LONG $0x20048d47 // lea r8d,[r8+r12*1]
LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6
LONG $0x20048d47 // lea r8d,[r8+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8944; BYTE $0xcf // mov edi,r9d
LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16
LONG $0x28048d47 // lea r8d,[r8+r13*1]
WORD $0x3144; BYTE $0xd7 // xor edi,r10d
LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd
LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2
LONG $0x00048d42 // lea eax,[rax+r8*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3145; BYTE $0xd7 // xor r15d,r10d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x38048d47 // lea r8d,[r8+r15*1]
WORD $0x8941; BYTE $0xdc // mov r12d,ebx
// ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, BP, 0x30)
WORD $0x5503; BYTE $0x30 // add edx,[rbp+0x30]
WORD $0x2141; BYTE $0xc4 // and r12d,eax
LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19
LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb
LONG $0x30048d47 // lea r8d,[r8+r14*1]
LONG $0x22148d42 // lea edx,[rdx+r12*1]
LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6
LONG $0x22148d42 // lea edx,[rdx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8945; BYTE $0xc7 // mov r15d,r8d
LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16
LONG $0x2a148d42 // lea edx,[rdx+r13*1]
WORD $0x3145; BYTE $0xcf // xor r15d,r9d
LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd
LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2
LONG $0x131c8d45 // lea r11d,[r11+rdx*1]
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3144; BYTE $0xcf // xor edi,r9d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1]
WORD $0x8941; BYTE $0xc4 // mov r12d,eax
// ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, BP, 0x34)
WORD $0x4d03; BYTE $0x34 // add ecx,[rbp+0x34]
WORD $0x2145; BYTE $0xdc // and r12d,r11d
LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19
LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb
LONG $0x32148d42 // lea edx,[rdx+r14*1]
LONG $0x210c8d42 // lea ecx,[rcx+r12*1]
LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6
LONG $0x210c8d42 // lea ecx,[rcx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0xd789 // mov edi,edx
LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16
LONG $0x290c8d42 // lea ecx,[rcx+r13*1]
WORD $0x3144; BYTE $0xc7 // xor edi,r8d
LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd
LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2
LONG $0x0a148d45 // lea r10d,[r10+rcx*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3145; BYTE $0xc7 // xor r15d,r8d
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x390c8d42 // lea ecx,[rcx+r15*1]
WORD $0x8945; BYTE $0xdc // mov r12d,r11d
// ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, BP, 0x38)
WORD $0x5d03; BYTE $0x38 // add ebx,[rbp+0x38]
WORD $0x2145; BYTE $0xd4 // and r12d,r10d
LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19
LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb
LONG $0x310c8d42 // lea ecx,[rcx+r14*1]
LONG $0x231c8d42 // lea ebx,[rbx+r12*1]
LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax
WORD $0x3145; BYTE $0xfd // xor r13d,r15d
LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6
LONG $0x231c8d42 // lea ebx,[rbx+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0x8941; BYTE $0xcf // mov r15d,ecx
LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16
LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1]
WORD $0x3141; BYTE $0xd7 // xor r15d,edx
LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd
LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2
LONG $0x190c8d45 // lea r9d,[r9+rbx*1]
WORD $0x2144; BYTE $0xff // and edi,r15d
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0xd731 // xor edi,edx
WORD $0x3145; BYTE $0xee // xor r14d,r13d
WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1]
WORD $0x8945; BYTE $0xd4 // mov r12d,r10d
// ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, BP, 0x3c)
WORD $0x4503; BYTE $0x3c // add eax,[rbp+0x3c]
WORD $0x2145; BYTE $0xcc // and r12d,r9d
LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19
LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb
LONG $0x331c8d42 // lea ebx,[rbx+r14*1]
LONG $0x20048d42 // lea eax,[rax+r12*1]
LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d
WORD $0x3141; BYTE $0xfd // xor r13d,edi
LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6
LONG $0x20048d42 // lea eax,[rax+r12*1]
WORD $0x3145; BYTE $0xf5 // xor r13d,r14d
WORD $0xdf89 // mov edi,ebx
LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16
LONG $0x28048d42 // lea eax,[rax+r13*1]
WORD $0xcf31 // xor edi,ecx
LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd
LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2
LONG $0x00048d45 // lea r8d,[r8+rax*1]
WORD $0x2141; BYTE $0xff // and r15d,edi
WORD $0x3145; BYTE $0xe6 // xor r14d,r12d
WORD $0x3141; BYTE $0xcf // xor r15d,ecx
WORD $0x3145; BYTE $0xee // xor r14d,r13d
LONG $0x38048d42 // lea eax,[rax+r15*1]
WORD $0x8945; BYTE $0xcc // mov r12d,r9d
ADDQ $-0x40, BP
CMPQ BP, SP
JAE loop2
MOVQ 0x200(SP), DI // $_ctx
ADDQ R14, AX
ADDQ $0x1c0, SP
ADDL (DI), AX
ADDL 4(DI), BX
ADDL 8(DI), CX
ADDL 12(DI), DX
ADDL 16(DI), R8
ADDL 20(DI), R9
ADDQ $0x80, SI // input += 2
ADDL 24(DI), R10
MOVQ SI, R12
ADDL 28(DI), R11
CMPQ SI, 0x50(SP) // input == _end
MOVL AX, (DI)
LONG $0xe4440f4c // cmove r12,rsp /* next block or stale data */
MOVL AX, (DI)
MOVL BX, 4(DI)
MOVL CX, 8(DI)
MOVL DX, 12(DI)
MOVL R8, 16(DI)
MOVL R9, 20(DI)
MOVL R10, 24(DI)
MOVL R11, 28(DI)
JBE loop0
LEAQ (SP), BP
done:
MOVQ BP, SP
MOVQ 0x58(SP), SP
WORD $0xf8c5; BYTE $0x77 // vzeroupper
RET
| vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s | 0 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.0001800235331756994,
0.00016917956236284226,
0.00016294384840875864,
0.00016907654935494065,
0.000002844600658136187
] |
{
"id": 2,
"code_window": [
"\tif err = checkDelObjArgs(ctx, bucket, object); err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\tif !xl.isObject(bucket, object) && !xl.isObjectDir(bucket, object) {\n",
"\t\treturn ObjectNotFound{bucket, object}\n",
"\t}\n",
"\n",
"\tvar writeQuorum int\n",
"\tvar isObjectDir = hasSuffix(object, slashSeparator)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/xl-v1-object.go",
"type": "replace",
"edit_start_line_idx": 986
} | // Copyright 2012 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package redis
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
)
func ensureLen(d reflect.Value, n int) {
if n > d.Cap() {
d.Set(reflect.MakeSlice(d.Type(), n, n))
} else {
d.SetLen(n)
}
}
func cannotConvert(d reflect.Value, s interface{}) error {
var sname string
switch s.(type) {
case string:
sname = "Redis simple string"
case Error:
sname = "Redis error"
case int64:
sname = "Redis integer"
case []byte:
sname = "Redis bulk string"
case []interface{}:
sname = "Redis array"
default:
sname = reflect.TypeOf(s).String()
}
return fmt.Errorf("cannot convert from %s to %s", sname, d.Type())
}
func convertAssignBulkString(d reflect.Value, s []byte) (err error) {
switch d.Type().Kind() {
case reflect.Float32, reflect.Float64:
var x float64
x, err = strconv.ParseFloat(string(s), d.Type().Bits())
d.SetFloat(x)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
var x int64
x, err = strconv.ParseInt(string(s), 10, d.Type().Bits())
d.SetInt(x)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
var x uint64
x, err = strconv.ParseUint(string(s), 10, d.Type().Bits())
d.SetUint(x)
case reflect.Bool:
var x bool
x, err = strconv.ParseBool(string(s))
d.SetBool(x)
case reflect.String:
d.SetString(string(s))
case reflect.Slice:
if d.Type().Elem().Kind() != reflect.Uint8 {
err = cannotConvert(d, s)
} else {
d.SetBytes(s)
}
default:
err = cannotConvert(d, s)
}
return
}
func convertAssignInt(d reflect.Value, s int64) (err error) {
switch d.Type().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
d.SetInt(s)
if d.Int() != s {
err = strconv.ErrRange
d.SetInt(0)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if s < 0 {
err = strconv.ErrRange
} else {
x := uint64(s)
d.SetUint(x)
if d.Uint() != x {
err = strconv.ErrRange
d.SetUint(0)
}
}
case reflect.Bool:
d.SetBool(s != 0)
default:
err = cannotConvert(d, s)
}
return
}
func convertAssignValue(d reflect.Value, s interface{}) (err error) {
switch s := s.(type) {
case []byte:
err = convertAssignBulkString(d, s)
case int64:
err = convertAssignInt(d, s)
default:
err = cannotConvert(d, s)
}
return err
}
func convertAssignArray(d reflect.Value, s []interface{}) error {
if d.Type().Kind() != reflect.Slice {
return cannotConvert(d, s)
}
ensureLen(d, len(s))
for i := 0; i < len(s); i++ {
if err := convertAssignValue(d.Index(i), s[i]); err != nil {
return err
}
}
return nil
}
func convertAssign(d interface{}, s interface{}) (err error) {
// Handle the most common destination types using type switches and
// fall back to reflection for all other types.
switch s := s.(type) {
case nil:
// ingore
case []byte:
switch d := d.(type) {
case *string:
*d = string(s)
case *int:
*d, err = strconv.Atoi(string(s))
case *bool:
*d, err = strconv.ParseBool(string(s))
case *[]byte:
*d = s
case *interface{}:
*d = s
case nil:
// skip value
default:
if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
err = cannotConvert(d, s)
} else {
err = convertAssignBulkString(d.Elem(), s)
}
}
case int64:
switch d := d.(type) {
case *int:
x := int(s)
if int64(x) != s {
err = strconv.ErrRange
x = 0
}
*d = x
case *bool:
*d = s != 0
case *interface{}:
*d = s
case nil:
// skip value
default:
if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
err = cannotConvert(d, s)
} else {
err = convertAssignInt(d.Elem(), s)
}
}
case string:
switch d := d.(type) {
case *string:
*d = string(s)
default:
err = cannotConvert(reflect.ValueOf(d), s)
}
case []interface{}:
switch d := d.(type) {
case *[]interface{}:
*d = s
case *interface{}:
*d = s
case nil:
// skip value
default:
if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
err = cannotConvert(d, s)
} else {
err = convertAssignArray(d.Elem(), s)
}
}
case Error:
err = s
default:
err = cannotConvert(reflect.ValueOf(d), s)
}
return
}
// Scan copies from src to the values pointed at by dest.
//
// The values pointed at by dest must be an integer, float, boolean, string,
// []byte, interface{} or slices of these types. Scan uses the standard strconv
// package to convert bulk strings to numeric and boolean types.
//
// If a dest value is nil, then the corresponding src value is skipped.
//
// If a src element is nil, then the corresponding dest value is not modified.
//
// To enable easy use of Scan in a loop, Scan returns the slice of src
// following the copied values.
func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) {
if len(src) < len(dest) {
return nil, errors.New("redigo.Scan: array short")
}
var err error
for i, d := range dest {
err = convertAssign(d, src[i])
if err != nil {
err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err)
break
}
}
return src[len(dest):], err
}
type fieldSpec struct {
name string
index []int
omitEmpty bool
}
type structSpec struct {
m map[string]*fieldSpec
l []*fieldSpec
}
func (ss *structSpec) fieldSpec(name []byte) *fieldSpec {
return ss.m[string(name)]
}
func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) {
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
switch {
case f.PkgPath != "" && !f.Anonymous:
// Ignore unexported fields.
case f.Anonymous:
// TODO: Handle pointers. Requires change to decoder and
// protection against infinite recursion.
if f.Type.Kind() == reflect.Struct {
compileStructSpec(f.Type, depth, append(index, i), ss)
}
default:
fs := &fieldSpec{name: f.Name}
tag := f.Tag.Get("redis")
p := strings.Split(tag, ",")
if len(p) > 0 {
if p[0] == "-" {
continue
}
if len(p[0]) > 0 {
fs.name = p[0]
}
for _, s := range p[1:] {
switch s {
case "omitempty":
fs.omitEmpty = true
default:
panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name()))
}
}
}
d, found := depth[fs.name]
if !found {
d = 1 << 30
}
switch {
case len(index) == d:
// At same depth, remove from result.
delete(ss.m, fs.name)
j := 0
for i := 0; i < len(ss.l); i++ {
if fs.name != ss.l[i].name {
ss.l[j] = ss.l[i]
j += 1
}
}
ss.l = ss.l[:j]
case len(index) < d:
fs.index = make([]int, len(index)+1)
copy(fs.index, index)
fs.index[len(index)] = i
depth[fs.name] = len(index)
ss.m[fs.name] = fs
ss.l = append(ss.l, fs)
}
}
}
}
var (
structSpecMutex sync.RWMutex
structSpecCache = make(map[reflect.Type]*structSpec)
defaultFieldSpec = &fieldSpec{}
)
func structSpecForType(t reflect.Type) *structSpec {
structSpecMutex.RLock()
ss, found := structSpecCache[t]
structSpecMutex.RUnlock()
if found {
return ss
}
structSpecMutex.Lock()
defer structSpecMutex.Unlock()
ss, found = structSpecCache[t]
if found {
return ss
}
ss = &structSpec{m: make(map[string]*fieldSpec)}
compileStructSpec(t, make(map[string]int), nil, ss)
structSpecCache[t] = ss
return ss
}
var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct")
// ScanStruct scans alternating names and values from src to a struct. The
// HGETALL and CONFIG GET commands return replies in this format.
//
// ScanStruct uses exported field names to match values in the response. Use
// 'redis' field tag to override the name:
//
// Field int `redis:"myName"`
//
// Fields with the tag redis:"-" are ignored.
//
// Integer, float, boolean, string and []byte fields are supported. Scan uses the
// standard strconv package to convert bulk string values to numeric and
// boolean types.
//
// If a src element is nil, then the corresponding field is not modified.
func ScanStruct(src []interface{}, dest interface{}) error {
d := reflect.ValueOf(dest)
if d.Kind() != reflect.Ptr || d.IsNil() {
return errScanStructValue
}
d = d.Elem()
if d.Kind() != reflect.Struct {
return errScanStructValue
}
ss := structSpecForType(d.Type())
if len(src)%2 != 0 {
return errors.New("redigo.ScanStruct: number of values not a multiple of 2")
}
for i := 0; i < len(src); i += 2 {
s := src[i+1]
if s == nil {
continue
}
name, ok := src[i].([]byte)
if !ok {
return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i)
}
fs := ss.fieldSpec(name)
if fs == nil {
continue
}
if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err)
}
}
return nil
}
var (
errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct")
)
// ScanSlice scans src to the slice pointed to by dest. The elements the dest
// slice must be integer, float, boolean, string, struct or pointer to struct
// values.
//
// Struct fields must be integer, float, boolean or string values. All struct
// fields are used unless a subset is specified using fieldNames.
func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error {
d := reflect.ValueOf(dest)
if d.Kind() != reflect.Ptr || d.IsNil() {
return errScanSliceValue
}
d = d.Elem()
if d.Kind() != reflect.Slice {
return errScanSliceValue
}
isPtr := false
t := d.Type().Elem()
if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
isPtr = true
t = t.Elem()
}
if t.Kind() != reflect.Struct {
ensureLen(d, len(src))
for i, s := range src {
if s == nil {
continue
}
if err := convertAssignValue(d.Index(i), s); err != nil {
return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err)
}
}
return nil
}
ss := structSpecForType(t)
fss := ss.l
if len(fieldNames) > 0 {
fss = make([]*fieldSpec, len(fieldNames))
for i, name := range fieldNames {
fss[i] = ss.m[name]
if fss[i] == nil {
return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name)
}
}
}
if len(fss) == 0 {
return errors.New("redigo.ScanSlice: no struct fields")
}
n := len(src) / len(fss)
if n*len(fss) != len(src) {
return errors.New("redigo.ScanSlice: length not a multiple of struct field count")
}
ensureLen(d, n)
for i := 0; i < n; i++ {
d := d.Index(i)
if isPtr {
if d.IsNil() {
d.Set(reflect.New(t))
}
d = d.Elem()
}
for j, fs := range fss {
s := src[i*len(fss)+j]
if s == nil {
continue
}
if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err)
}
}
}
return nil
}
// Args is a helper for constructing command arguments from structured values.
type Args []interface{}
// Add returns the result of appending value to args.
func (args Args) Add(value ...interface{}) Args {
return append(args, value...)
}
// AddFlat returns the result of appending the flattened value of v to args.
//
// Maps are flattened by appending the alternating keys and map values to args.
//
// Slices are flattened by appending the slice elements to args.
//
// Structs are flattened by appending the alternating names and values of
// exported fields to args. If v is a nil struct pointer, then nothing is
// appended. The 'redis' field tag overrides struct field names. See ScanStruct
// for more information on the use of the 'redis' field tag.
//
// Other types are appended to args as is.
func (args Args) AddFlat(v interface{}) Args {
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Struct:
args = flattenStruct(args, rv)
case reflect.Slice:
for i := 0; i < rv.Len(); i++ {
args = append(args, rv.Index(i).Interface())
}
case reflect.Map:
for _, k := range rv.MapKeys() {
args = append(args, k.Interface(), rv.MapIndex(k).Interface())
}
case reflect.Ptr:
if rv.Type().Elem().Kind() == reflect.Struct {
if !rv.IsNil() {
args = flattenStruct(args, rv.Elem())
}
} else {
args = append(args, v)
}
default:
args = append(args, v)
}
return args
}
func flattenStruct(args Args, v reflect.Value) Args {
ss := structSpecForType(v.Type())
for _, fs := range ss.l {
fv := v.FieldByIndex(fs.index)
if fs.omitEmpty {
var empty = false
switch fv.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
empty = fv.Len() == 0
case reflect.Bool:
empty = !fv.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
empty = fv.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
empty = fv.Uint() == 0
case reflect.Float32, reflect.Float64:
empty = fv.Float() == 0
case reflect.Interface, reflect.Ptr:
empty = fv.IsNil()
}
if empty {
continue
}
}
args = append(args, fs.name, fv.Interface())
}
return args
}
| vendor/github.com/garyburd/redigo/redis/scan.go | 0 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.0011624348117038608,
0.00019652176706586033,
0.00015993448323570192,
0.00017199241847265512,
0.00014045835996512324
] |
{
"id": 3,
"code_window": [
"\tvar writeQuorum int\n",
"\tvar isObjectDir = hasSuffix(object, slashSeparator)\n",
"\n",
"\tif isObjectDir {\n",
"\t\twriteQuorum = len(xl.getDisks())/2 + 1\n",
"\t} else {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isObjectDir && !xl.isObjectDir(bucket, object) {\n",
"\t\treturn toObjectErr(errFileNotFound, bucket, object)\n",
"\t}\n",
"\n"
],
"file_path": "cmd/xl-v1-object.go",
"type": "add",
"edit_start_line_idx": 993
} | /*
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"encoding/hex"
"io"
"net/http"
"path"
"strconv"
"strings"
"sync"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/mimedb"
)
// list all errors which can be ignored in object operations.
var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied)
// putObjectDir hints the bottom layer to create a new directory.
func (xl xlObjects) putObjectDir(ctx context.Context, bucket, object string, writeQuorum int) error {
var wg = &sync.WaitGroup{}
errs := make([]error, len(xl.getDisks()))
// Prepare object creation in all disks
for index, disk := range xl.getDisks() {
if disk == nil {
continue
}
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
if err := disk.MakeVol(pathJoin(bucket, object)); err != nil && err != errVolumeExists {
errs[index] = err
}
}(index, disk)
}
wg.Wait()
return reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
}
// prepareFile hints the bottom layer to optimize the creation of a new object
func (xl xlObjects) prepareFile(ctx context.Context, bucket, object string, size int64, onlineDisks []StorageAPI, blockSize int64, dataBlocks, writeQuorum int) error {
pErrs := make([]error, len(onlineDisks))
// Calculate the real size of the part in one disk.
actualSize := getErasureShardFileSize(blockSize, size, dataBlocks)
// Prepare object creation in a all disks
for index, disk := range onlineDisks {
if disk != nil {
if err := disk.PrepareFile(bucket, object, actualSize); err != nil {
// Save error to reduce it later
pErrs[index] = err
// Ignore later access to disk which generated the error
onlineDisks[index] = nil
}
}
}
return reduceWriteQuorumErrs(ctx, pErrs, objectOpIgnoredErrs, writeQuorum)
}
/// Object Operations
// CopyObject - copy object source object to destination object.
// if source object and destination object are same we only
// update metadata.
func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) {
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
// Read metadata associated with the object from all disks.
storageDisks := xl.getDisks()
metaArr, errs := readAllXLMetadata(ctx, storageDisks, srcBucket, srcObject)
// get Quorum for this object
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, xl, metaArr, errs)
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
return oi, toObjectErr(reducedErr, srcBucket, srcObject)
}
// List all online disks.
_, modTime := listOnlineDisks(storageDisks, metaArr, errs)
// Pick latest valid metadata.
xlMeta, err := pickValidXLMeta(ctx, metaArr, modTime, readQuorum)
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Length of the file to read.
length := xlMeta.Stat.Size
// Check if this request is only metadata update.
if cpSrcDstSame {
// Update `xl.json` content on each disks.
for index := range metaArr {
metaArr[index].Meta = srcInfo.UserDefined
metaArr[index].Meta["etag"] = srcInfo.ETag
}
var onlineDisks []StorageAPI
tempObj := mustGetUUID()
// Write unique `xl.json` for each disk.
if onlineDisks, err = writeUniqueXLMetadata(ctx, storageDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Rename atomically `xl.json` from tmp location to destination for each disk.
if _, err = renameXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
return xlMeta.ToObjectInfo(srcBucket, srcObject), nil
}
// Initialize pipe.
pipeReader, pipeWriter := io.Pipe()
go func() {
var startOffset int64 // Read the whole file.
if gerr := xl.getObject(ctx, srcBucket, srcObject, startOffset, length, pipeWriter, srcInfo.ETag, srcOpts); gerr != nil {
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
return
}
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
}()
hashReader, err := hash.NewReader(pipeReader, length, "", "", length)
if err != nil {
logger.LogIf(ctx, err)
return oi, toObjectErr(err, dstBucket, dstObject)
}
objInfo, err := xl.putObject(ctx, dstBucket, dstObject, hashReader, srcInfo.UserDefined, dstOpts)
if err != nil {
return oi, toObjectErr(err, dstBucket, dstObject)
}
// Explicitly close the reader.
pipeReader.Close()
return objInfo, nil
}
// GetObjectNInfo - returns object info and an object
// Read(Closer). When err != nil, the returned reader is always nil.
func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
var nsUnlocker = func() {}
// Acquire lock
if lockType != noLock {
lock := xl.nsMutex.NewNSLock(bucket, object)
switch lockType {
case writeLock:
if err = lock.GetLock(globalObjectTimeout); err != nil {
return nil, err
}
nsUnlocker = lock.Unlock
case readLock:
if err = lock.GetRLock(globalObjectTimeout); err != nil {
return nil, err
}
nsUnlocker = lock.RUnlock
}
}
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
nsUnlocker()
return nil, err
}
// Handler directory request by returning a reader that
// returns no bytes.
if hasSuffix(object, slashSeparator) {
if !xl.isObjectDir(bucket, object) {
nsUnlocker()
return nil, toObjectErr(errFileNotFound, bucket, object)
}
var objInfo ObjectInfo
if objInfo, err = xl.getObjectInfoDir(ctx, bucket, object); err != nil {
nsUnlocker()
return nil, toObjectErr(err, bucket, object)
}
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, nsUnlocker), nil
}
var objInfo ObjectInfo
objInfo, err = xl.getObjectInfo(ctx, bucket, object)
if err != nil {
nsUnlocker()
return nil, toObjectErr(err, bucket, object)
}
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, nsUnlocker)
if nErr != nil {
return nil, nErr
}
pr, pw := io.Pipe()
go func() {
err := xl.getObject(ctx, bucket, object, off, length, pw, "", opts)
pw.CloseWithError(err)
}()
// Cleanup function to cause the go routine above to exit, in
// case of incomplete read.
pipeCloser := func() { pr.Close() }
return fn(pr, h, pipeCloser)
}
// GetObject - reads an object erasured coded across multiple
// disks. Supports additional parameters like offset and length
// which are synonymous with HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (xl xlObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
// Lock the object before reading.
objectLock := xl.nsMutex.NewNSLock(bucket, object)
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
return err
}
defer objectLock.RUnlock()
return xl.getObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
}
// getObject wrapper for xl GetObject
func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
if err := checkGetObjArgs(ctx, bucket, object); err != nil {
return err
}
// Start offset cannot be negative.
if startOffset < 0 {
logger.LogIf(ctx, errUnexpected)
return errUnexpected
}
// Writer cannot be nil.
if writer == nil {
logger.LogIf(ctx, errUnexpected)
return errUnexpected
}
// If its a directory request, we return an empty body.
if hasSuffix(object, slashSeparator) {
_, err := writer.Write([]byte(""))
logger.LogIf(ctx, err)
return toObjectErr(err, bucket, object)
}
// Read metadata associated with the object from all disks.
metaArr, errs := readAllXLMetadata(ctx, xl.getDisks(), bucket, object)
// get Quorum for this object
readQuorum, _, err := objectQuorumFromMeta(ctx, xl, metaArr, errs)
if err != nil {
return toObjectErr(err, bucket, object)
}
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
return toObjectErr(reducedErr, bucket, object)
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(xl.getDisks(), metaArr, errs)
// Pick latest valid metadata.
xlMeta, err := pickValidXLMeta(ctx, metaArr, modTime, readQuorum)
if err != nil {
return err
}
// Reorder online disks based on erasure distribution order.
onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
// Reorder parts metadata based on erasure distribution order.
metaArr = shufflePartsMetadata(metaArr, xlMeta.Erasure.Distribution)
// For negative length read everything.
if length < 0 {
length = xlMeta.Stat.Size - startOffset
}
// Reply back invalid range if the input offset and length fall out of range.
if startOffset > xlMeta.Stat.Size || startOffset+length > xlMeta.Stat.Size {
logger.LogIf(ctx, InvalidRange{startOffset, length, xlMeta.Stat.Size})
return InvalidRange{startOffset, length, xlMeta.Stat.Size}
}
// Get start part index and offset.
partIndex, partOffset, err := xlMeta.ObjectToPartOffset(ctx, startOffset)
if err != nil {
return InvalidRange{startOffset, length, xlMeta.Stat.Size}
}
// Calculate endOffset according to length
endOffset := startOffset
if length > 0 {
endOffset += length - 1
}
// Get last part index to read given length.
lastPartIndex, _, err := xlMeta.ObjectToPartOffset(ctx, endOffset)
if err != nil {
return InvalidRange{startOffset, length, xlMeta.Stat.Size}
}
var totalBytesRead int64
erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
if err != nil {
return toObjectErr(err, bucket, object)
}
for ; partIndex <= lastPartIndex; partIndex++ {
if length == totalBytesRead {
break
}
// Save the current part name and size.
partName := xlMeta.Parts[partIndex].Name
partSize := xlMeta.Parts[partIndex].Size
partLength := partSize - partOffset
// partLength should be adjusted so that we don't write more data than what was requested.
if partLength > (length - totalBytesRead) {
partLength = length - totalBytesRead
}
// Get the checksums of the current part.
bitrotReaders := make([]*bitrotReader, len(onlineDisks))
for index, disk := range onlineDisks {
if disk == OfflineDisk {
continue
}
checksumInfo := metaArr[index].Erasure.GetChecksumInfo(partName)
endOffset := getErasureShardFileEndOffset(partOffset, partLength, partSize, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks)
bitrotReaders[index] = newBitrotReader(disk, bucket, pathJoin(object, partName), checksumInfo.Algorithm, endOffset, checksumInfo.Hash)
}
err := erasure.Decode(ctx, writer, bitrotReaders, partOffset, partLength, partSize)
if err != nil {
return toObjectErr(err, bucket, object)
}
for i, r := range bitrotReaders {
if r == nil {
onlineDisks[i] = OfflineDisk
}
}
// Track total bytes read from disk and written to the client.
totalBytesRead += partLength
// partOffset will be valid only for the first part, hence reset it to 0 for
// the remaining parts.
partOffset = 0
} // End of read all parts loop.
// Return success.
return nil
}
// getObjectInfoDir - This getObjectInfo is specific to object directory lookup.
func (xl xlObjects) getObjectInfoDir(ctx context.Context, bucket, object string) (oi ObjectInfo, err error) {
var wg = &sync.WaitGroup{}
errs := make([]error, len(xl.getDisks()))
// Prepare object creation in a all disks
for index, disk := range xl.getDisks() {
if disk == nil {
continue
}
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
if _, err := disk.StatVol(pathJoin(bucket, object)); err != nil {
// Since we are re-purposing StatVol, an object which
// is a directory if it doesn't exist should be
// returned as errFileNotFound instead, convert
// the error right here accordingly.
if err == errVolumeNotFound {
err = errFileNotFound
} else if err == errVolumeAccessDenied {
err = errFileAccessDenied
}
// Save error to reduce it later
errs[index] = err
}
}(index, disk)
}
wg.Wait()
readQuorum := len(xl.getDisks()) / 2
return dirObjectInfo(bucket, object, 0, map[string]string{}), reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum)
}
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) {
// Lock the object before reading.
objectLock := xl.nsMutex.NewNSLock(bucket, object)
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
return oi, err
}
defer objectLock.RUnlock()
if err := checkGetObjArgs(ctx, bucket, object); err != nil {
return oi, err
}
if hasSuffix(object, slashSeparator) {
if !xl.isObjectDir(bucket, object) {
return oi, toObjectErr(errFileNotFound, bucket, object)
}
if oi, e = xl.getObjectInfoDir(ctx, bucket, object); e != nil {
return oi, toObjectErr(e, bucket, object)
}
return oi, nil
}
info, err := xl.getObjectInfo(ctx, bucket, object)
if err != nil {
return oi, toObjectErr(err, bucket, object)
}
return info, nil
}
func (xl xlObjects) isObjectCorrupted(metaArr []xlMetaV1, errs []error) (validMeta xlMetaV1, ok bool) {
// We can consider an object data not reliable
// when xl.json is not found in read quorum disks.
var notFoundXLJSON int
for _, readErr := range errs {
if readErr == errFileNotFound {
notFoundXLJSON++
}
}
for _, m := range metaArr {
if !m.IsValid() {
continue
}
validMeta = m
break
}
// Return if the object is indeed corrupted.
return validMeta, len(xl.getDisks())-notFoundXLJSON < validMeta.Erasure.DataBlocks
}
const xlCorruptedSuffix = ".CORRUPTED"
// Renames the corrupted object and makes it visible.
func renameCorruptedObject(ctx context.Context, bucket, object string, validMeta xlMetaV1, disks []StorageAPI, errs []error) {
writeQuorum := validMeta.Erasure.DataBlocks + 1
// Move all existing objects into corrupted suffix.
rename(ctx, disks, bucket, object, bucket, object+xlCorruptedSuffix, true, writeQuorum, []error{errFileNotFound})
tempObj := mustGetUUID()
// Get all the disks which do not have the file.
var cdisks = make([]StorageAPI, len(disks))
for i, merr := range errs {
if merr == errFileNotFound {
cdisks[i] = disks[i]
}
}
for _, disk := range cdisks {
if disk == nil {
continue
}
// Write empty part file on missing disks.
disk.AppendFile(minioMetaTmpBucket, pathJoin(tempObj, "part.1"), []byte{})
// Write algorithm hash for empty part file.
alg := validMeta.Erasure.Checksums[0].Algorithm.New()
alg.Write([]byte{})
// Update the checksums and part info.
validMeta.Erasure.Checksums[0] = ChecksumInfo{
Name: validMeta.Erasure.Checksums[0].Name,
Algorithm: validMeta.Erasure.Checksums[0].Algorithm,
Hash: alg.Sum(nil),
}
validMeta.Parts[0] = objectPartInfo{
Number: 1,
Name: "part.1",
}
// Write the `xl.json` with the newly calculated metadata.
writeXLMetadata(ctx, disk, minioMetaTmpBucket, tempObj, validMeta)
}
// Finally rename all the parts into their respective locations.
rename(ctx, cdisks, minioMetaTmpBucket, tempObj, bucket, object+xlCorruptedSuffix, true, writeQuorum, []error{errFileNotFound})
}
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
func (xl xlObjects) getObjectInfo(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error) {
disks := xl.getDisks()
// Read metadata associated with the object from all disks.
metaArr, errs := readAllXLMetadata(ctx, disks, bucket, object)
var readQuorum int
// Having read quorum means we have xl.json in at least N/2 disks.
if !strings.HasSuffix(object, xlCorruptedSuffix) {
if validMeta, ok := xl.isObjectCorrupted(metaArr, errs); ok {
renameCorruptedObject(ctx, bucket, object, validMeta, disks, errs)
// Return err file not found since we renamed now the corrupted object
return objInfo, errFileNotFound
}
// Not a corrupted object, attempt to get readquorum properly.
readQuorum, _, err = objectQuorumFromMeta(ctx, xl, metaArr, errs)
if err != nil {
return objInfo, err
}
} else {
// If this is a corrupted object, change read quorum to N/2 disks
// so it will be visible to users, so they can delete it.
readQuorum = len(xl.getDisks()) / 2
}
// List all the file commit ids from parts metadata.
modTimes := listObjectModtimes(metaArr, errs)
// Reduce list of UUIDs to a single common value.
modTime, _ := commonTime(modTimes)
// Pick latest valid metadata.
xlMeta, err := pickValidXLMeta(ctx, metaArr, modTime, readQuorum)
if err != nil {
return objInfo, err
}
return xlMeta.ToObjectInfo(bucket, object), nil
}
func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, errs []error) {
var wg = &sync.WaitGroup{}
// Undo rename object on disks where RenameFile succeeded.
// If srcEntry/dstEntry are objects then add a trailing slash to copy
// over all the parts inside the object directory
if isDir {
srcEntry = retainSlash(srcEntry)
dstEntry = retainSlash(dstEntry)
}
for index, disk := range disks {
if disk == nil {
continue
}
// Undo rename object in parallel.
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
if errs[index] != nil {
return
}
_ = disk.RenameFile(dstBucket, dstEntry, srcBucket, srcEntry)
}(index, disk)
}
wg.Wait()
}
// rename - common function that renamePart and renameObject use to rename
// the respective underlying storage layer representations.
func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, writeQuorum int, ignoredErr []error) ([]StorageAPI, error) {
// Initialize sync waitgroup.
var wg = &sync.WaitGroup{}
// Initialize list of errors.
var errs = make([]error, len(disks))
if isDir {
dstEntry = retainSlash(dstEntry)
srcEntry = retainSlash(srcEntry)
}
// Rename file on all underlying storage disks.
for index, disk := range disks {
if disk == nil {
errs[index] = errDiskNotFound
continue
}
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
if err := disk.RenameFile(srcBucket, srcEntry, dstBucket, dstEntry); err != nil {
if !IsErrIgnored(err, ignoredErr...) {
errs[index] = err
}
}
}(index, disk)
}
// Wait for all renames to finish.
wg.Wait()
// We can safely allow RenameFile errors up to len(xl.getDisks()) - writeQuorum
// otherwise return failure. Cleanup successful renames.
err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if err == errXLWriteQuorum {
// Undo all the partial rename operations.
undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isDir, errs)
}
return evalDisks(disks, errs), err
}
// PutObject - creates an object upon reading from the input stream
// until EOF, erasure codes the data across all disk and additionally
// writes `xl.json` which carries the necessary metadata for future
// object operations.
func (xl xlObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
// Validate put object input args.
if err = checkPutObjectArgs(ctx, bucket, object, xl, data.Size()); err != nil {
return ObjectInfo{}, err
}
// Lock the object.
objectLock := xl.nsMutex.NewNSLock(bucket, object)
if err := objectLock.GetLock(globalObjectTimeout); err != nil {
return objInfo, err
}
defer objectLock.Unlock()
return xl.putObject(ctx, bucket, object, data, metadata, opts)
}
// putObject wrapper for xl PutObject
func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
uniqueID := mustGetUUID()
tempObj := uniqueID
// No metadata is set, allocate a new one.
if metadata == nil {
metadata = make(map[string]string)
}
// Get parity and data drive count based on storage class metadata
dataDrives, parityDrives := getRedundancyCount(metadata[amzStorageClass], len(xl.getDisks()))
// we now know the number of blocks this object needs for data and parity.
// writeQuorum is dataBlocks + 1
writeQuorum := dataDrives + 1
// Delete temporary object in the event of failure.
// If PutObject succeeded there would be no temporary
// object to delete.
defer xl.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum, false)
// This is a special case with size as '0' and object ends with
// a slash separator, we treat it like a valid operation and
// return success.
if isObjectDir(object, data.Size()) {
// Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock).
// -- FIXME (this also causes performance issue when disks are down).
if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
}
if err = xl.putObjectDir(ctx, minioMetaTmpBucket, tempObj, writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Rename the successfully written temporary object to final location. Ignore errFileAccessDenied
// error because it means that the target object dir exists and we want to be close to S3 specification.
if _, err = rename(ctx, xl.getDisks(), minioMetaTmpBucket, tempObj, bucket, object, true, writeQuorum, []error{errFileAccessDenied}); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
return dirObjectInfo(bucket, object, data.Size(), metadata), nil
}
// Validate put object input args.
if err = checkPutObjectArgs(ctx, bucket, object, xl, data.Size()); err != nil {
return ObjectInfo{}, err
}
// Validate input data size and it can never be less than zero.
if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument)
return ObjectInfo{}, toObjectErr(errInvalidArgument)
}
// Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock).
// -- FIXME (this also causes performance issue when disks are down).
if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
}
// Limit the reader to its provided size if specified.
var reader io.Reader = data
// Initialize parts metadata
partsMetadata := make([]xlMetaV1, len(xl.getDisks()))
xlMeta := newXLMetaV1(object, dataDrives, parityDrives)
// Initialize xl meta.
for index := range partsMetadata {
partsMetadata[index] = xlMeta
}
// Order disks according to erasure distribution
onlineDisks := shuffleDisks(xl.getDisks(), partsMetadata[0].Erasure.Distribution)
// Total size of the written object
var sizeWritten int64
erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
var buffer []byte
switch size := data.Size(); {
case size == 0:
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
case size == -1 || size >= blockSizeV1:
buffer = xl.bp.Get()
defer xl.bp.Put(buffer)
case size < blockSizeV1:
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
buffer = make([]byte, size, 2*size)
}
if len(buffer) > int(xlMeta.Erasure.BlockSize) {
buffer = buffer[:xlMeta.Erasure.BlockSize]
}
// Read data and split into parts - similar to multipart mechanism
for partIdx := 1; ; partIdx++ {
// Compute part name
partName := "part." + strconv.Itoa(partIdx)
// Compute the path of current part
tempErasureObj := pathJoin(uniqueID, partName)
// Calculate the size of the current part.
var curPartSize int64
curPartSize, err = calculatePartSizeFromIdx(ctx, data.Size(), globalPutPartSize, partIdx)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Hint the filesystem to pre-allocate one continuous large block.
// This is only an optimization.
var curPartReader io.Reader
if curPartSize >= 0 {
pErr := xl.prepareFile(ctx, minioMetaTmpBucket, tempErasureObj, curPartSize, onlineDisks, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks, writeQuorum)
if pErr != nil {
return ObjectInfo{}, toObjectErr(pErr, bucket, object)
}
}
if curPartSize < data.Size() {
curPartReader = io.LimitReader(reader, curPartSize)
} else {
curPartReader = reader
}
writers := make([]*bitrotWriter, len(onlineDisks))
for i, disk := range onlineDisks {
if disk == nil {
continue
}
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, DefaultBitrotAlgorithm)
}
n, erasureErr := erasure.Encode(ctx, curPartReader, writers, buffer, erasure.dataBlocks+1)
if erasureErr != nil {
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if n < curPartSize && data.Size() > 0 {
logger.LogIf(ctx, IncompleteBody{})
return ObjectInfo{}, IncompleteBody{}
}
if n == 0 && data.Size() == -1 {
// The last part of a compressed object will always be empty
// Since the compressed size is unpredictable.
// Hence removing the last (empty) part from all `xl.disks`.
dErr := xl.deleteObject(ctx, minioMetaTmpBucket, tempErasureObj, writeQuorum, true)
if dErr != nil {
return ObjectInfo{}, toObjectErr(dErr, minioMetaTmpBucket, tempErasureObj)
}
break
}
// Update the total written size
sizeWritten += n
for i, w := range writers {
if w == nil {
onlineDisks[i] = nil
continue
}
partsMetadata[i].AddObjectPart(partIdx, partName, "", n, data.ActualSize())
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{partName, DefaultBitrotAlgorithm, w.Sum()})
}
// We wrote everything, break out.
if sizeWritten == data.Size() {
break
}
}
// Save additional erasureMetadata.
modTime := UTCNow()
metadata["etag"] = hex.EncodeToString(data.MD5Current())
// Guess content-type from the extension if possible.
if metadata["content-type"] == "" {
metadata["content-type"] = mimedb.TypeByExtension(path.Ext(object))
}
if xl.isObject(bucket, object) {
// Deny if WORM is enabled
if globalWORMEnabled {
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
}
// Rename if an object already exists to temporary location.
newUniqueID := mustGetUUID()
// Delete successfully renamed object.
defer xl.deleteObject(ctx, minioMetaTmpBucket, newUniqueID, writeQuorum, false)
// NOTE: Do not use online disks slice here: the reason is that existing object should be purged
// regardless of `xl.json` status and rolled back in case of errors. Also allow renaming the
// existing object if it is not present in quorum disks so users can overwrite stale objects.
_, err = rename(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, newUniqueID, true, writeQuorum, []error{errFileNotFound})
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
}
// Fill all the necessary metadata.
// Update `xl.json` content on each disks.
for index := range partsMetadata {
partsMetadata[index].Meta = metadata
partsMetadata[index].Stat.Size = sizeWritten
partsMetadata[index].Stat.ModTime = modTime
}
// Write unique `xl.json` for each disk.
if onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Rename the successfully written temporary object to final location.
if _, err = rename(ctx, onlineDisks, minioMetaTmpBucket, tempObj, bucket, object, true, writeQuorum, nil); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Object info is the same in all disks, so we can pick the first meta
// of the first disk
xlMeta = partsMetadata[0]
objInfo = ObjectInfo{
IsDir: false,
Bucket: bucket,
Name: object,
Size: xlMeta.Stat.Size,
ModTime: xlMeta.Stat.ModTime,
ETag: xlMeta.Meta["etag"],
ContentType: xlMeta.Meta["content-type"],
ContentEncoding: xlMeta.Meta["content-encoding"],
UserDefined: xlMeta.Meta,
}
// Success, return object info.
return objInfo, nil
}
// deleteObject - wrapper for delete object, deletes an object from
// all the disks in parallel, including `xl.json` associated with the
// object.
func (xl xlObjects) deleteObject(ctx context.Context, bucket, object string, writeQuorum int, isDir bool) error {
var disks []StorageAPI
var err error
tmpObj := mustGetUUID()
if bucket == minioMetaTmpBucket {
tmpObj = object
disks = xl.getDisks()
} else {
// Rename the current object while requiring write quorum, but also consider
// that a non found object in a given disk as a success since it already
// confirms that the object doesn't have a part in that disk (already removed)
if isDir {
disks, err = rename(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum,
[]error{errFileNotFound, errFileAccessDenied})
} else {
disks, err = rename(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum,
[]error{errFileNotFound})
}
if err != nil {
return toObjectErr(err, bucket, object)
}
}
// Initialize sync waitgroup.
var wg = &sync.WaitGroup{}
// Initialize list of errors.
var dErrs = make([]error, len(disks))
for index, disk := range disks {
if disk == nil {
dErrs[index] = errDiskNotFound
continue
}
wg.Add(1)
go func(index int, disk StorageAPI, isDir bool) {
defer wg.Done()
var e error
if isDir {
// DeleteFile() simply tries to remove a directory
// and will succeed only if that directory is empty.
e = disk.DeleteFile(minioMetaTmpBucket, tmpObj)
} else {
e = cleanupDir(ctx, disk, minioMetaTmpBucket, tmpObj)
}
if e != nil && e != errVolumeNotFound {
dErrs[index] = e
}
}(index, disk, isDir)
}
// Wait for all routines to finish.
wg.Wait()
// return errors if any during deletion
return reduceWriteQuorumErrs(ctx, dErrs, objectOpIgnoredErrs, writeQuorum)
}
// DeleteObject - deletes an object, this call doesn't necessary reply
// any error as it is not necessary for the handler to reply back a
// response to the client request.
func (xl xlObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) {
// Acquire a write lock before deleting the object.
objectLock := xl.nsMutex.NewNSLock(bucket, object)
if perr := objectLock.GetLock(globalOperationTimeout); perr != nil {
return perr
}
defer objectLock.Unlock()
if err = checkDelObjArgs(ctx, bucket, object); err != nil {
return err
}
if !xl.isObject(bucket, object) && !xl.isObjectDir(bucket, object) {
return ObjectNotFound{bucket, object}
}
var writeQuorum int
var isObjectDir = hasSuffix(object, slashSeparator)
if isObjectDir {
writeQuorum = len(xl.getDisks())/2 + 1
} else {
// Read metadata associated with the object from all disks.
partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), bucket, object)
// get Quorum for this object
_, writeQuorum, err = objectQuorumFromMeta(ctx, xl, partsMetadata, errs)
if err != nil {
return toObjectErr(err, bucket, object)
}
}
// Delete the object on all disks.
if err = xl.deleteObject(ctx, bucket, object, writeQuorum, isObjectDir); err != nil {
return toObjectErr(err, bucket, object)
}
// Success.
return nil
}
// ListObjectsV2 lists all blobs in bucket filtered by prefix
func (xl xlObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
marker := continuationToken
if marker == "" {
marker = startAfter
}
loi, err := xl.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return result, err
}
listObjectsV2Info := ListObjectsV2Info{
IsTruncated: loi.IsTruncated,
ContinuationToken: continuationToken,
NextContinuationToken: loi.NextMarker,
Objects: loi.Objects,
Prefixes: loi.Prefixes,
}
return listObjectsV2Info, err
}
| cmd/xl-v1-object.go | 1 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.9991239905357361,
0.2670118808746338,
0.0001633867941563949,
0.0019881001207977533,
0.4301723837852478
] |
{
"id": 3,
"code_window": [
"\tvar writeQuorum int\n",
"\tvar isObjectDir = hasSuffix(object, slashSeparator)\n",
"\n",
"\tif isObjectDir {\n",
"\t\twriteQuorum = len(xl.getDisks())/2 + 1\n",
"\t} else {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isObjectDir && !xl.isObjectDir(bucket, object) {\n",
"\t\treturn toObjectErr(errFileNotFound, bucket, object)\n",
"\t}\n",
"\n"
],
"file_path": "cmd/xl-v1-object.go",
"type": "add",
"edit_start_line_idx": 993
} | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build arm,freebsd
package unix
import (
"syscall"
"unsafe"
)
func setTimespec(sec, nsec int64) Timespec {
return Timespec{Sec: sec, Nsec: int32(nsec)}
}
func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: sec, Usec: int32(usec)}
}
func SetKevent(k *Kevent_t, fd, mode, flags int) {
k.Ident = uint32(fd)
k.Filter = int16(mode)
k.Flags = uint16(flags)
}
func (iov *Iovec) SetLen(length int) {
iov.Len = uint32(length)
}
func (msghdr *Msghdr) SetControllen(length int) {
msghdr.Controllen = uint32(length)
}
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
var writtenOut uint64 = 0
_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0)
written = int(writtenOut)
if e1 != 0 {
err = e1
}
return
}
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
| vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go | 0 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.00023725471692159772,
0.00018399204418528825,
0.00016839613090269268,
0.00017404509708285332,
0.000024046226826612838
] |
{
"id": 3,
"code_window": [
"\tvar writeQuorum int\n",
"\tvar isObjectDir = hasSuffix(object, slashSeparator)\n",
"\n",
"\tif isObjectDir {\n",
"\t\twriteQuorum = len(xl.getDisks())/2 + 1\n",
"\t} else {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isObjectDir && !xl.isObjectDir(bucket, object) {\n",
"\t\treturn toObjectErr(errFileNotFound, bucket, object)\n",
"\t}\n",
"\n"
],
"file_path": "cmd/xl-v1-object.go",
"type": "add",
"edit_start_line_idx": 993
} | package update
import (
"io"
"github.com/inconshreveable/go-update/internal/binarydist"
)
// Patcher defines an interface for applying binary patches to an old item to get an updated item.
type Patcher interface {
Patch(old io.Reader, new io.Writer, patch io.Reader) error
}
type patchFn func(io.Reader, io.Writer, io.Reader) error
func (fn patchFn) Patch(old io.Reader, new io.Writer, patch io.Reader) error {
return fn(old, new, patch)
}
// NewBSDifferPatcher returns a new Patcher that applies binary patches using
// the bsdiff algorithm. See http://www.daemonology.net/bsdiff/
func NewBSDiffPatcher() Patcher {
return patchFn(binarydist.Patch)
}
| vendor/github.com/inconshreveable/go-update/patcher.go | 0 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.00031208249856717885,
0.00022653209452982992,
0.00016664927534293383,
0.00020086449512746185,
0.00006208502600202337
] |
{
"id": 3,
"code_window": [
"\tvar writeQuorum int\n",
"\tvar isObjectDir = hasSuffix(object, slashSeparator)\n",
"\n",
"\tif isObjectDir {\n",
"\t\twriteQuorum = len(xl.getDisks())/2 + 1\n",
"\t} else {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif isObjectDir && !xl.isObjectDir(bucket, object) {\n",
"\t\treturn toObjectErr(errFileNotFound, bucket, object)\n",
"\t}\n",
"\n"
],
"file_path": "cmd/xl-v1-object.go",
"type": "add",
"edit_start_line_idx": 993
} | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build openbsd
// +build 386 amd64 arm
package unix
import (
"syscall"
"unsafe"
)
const (
SYS_PLEDGE = 108
)
// Pledge implements the pledge syscall. For more information see pledge(2).
func Pledge(promises string, paths []string) error {
promisesPtr, err := syscall.BytePtrFromString(promises)
if err != nil {
return err
}
promisesUnsafe, pathsUnsafe := unsafe.Pointer(promisesPtr), unsafe.Pointer(nil)
if paths != nil {
var pathsPtr []*byte
if pathsPtr, err = syscall.SlicePtrFromStrings(paths); err != nil {
return err
}
pathsUnsafe = unsafe.Pointer(&pathsPtr[0])
}
_, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(promisesUnsafe), uintptr(pathsUnsafe), 0)
if e != 0 {
return e
}
return nil
}
| vendor/golang.org/x/sys/unix/openbsd_pledge.go | 0 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.0005277733434922993,
0.0002609146758913994,
0.00016654745559208095,
0.00017466895224060863,
0.00015413067012559623
] |
{
"id": 4,
"code_window": [
"func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1, err error) {\n",
"\t// Reads entire `xl.json`.\n",
"\txlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))\n",
"\tif err != nil {\n",
"\t\tif err != errFileNotFound {\n",
"\t\t\tlogger.GetReqInfo(ctx).AppendTags(\"disk\", disk.String())\n",
"\t\t\tlogger.LogIf(ctx, err)\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif err != errFileNotFound && err != errVolumeNotFound {\n"
],
"file_path": "cmd/xl-v1-utils.go",
"type": "replace",
"edit_start_line_idx": 307
} | /*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/hex"
"errors"
"hash/crc32"
"path"
"sync"
"time"
"github.com/minio/minio/cmd/logger"
"github.com/tidwall/gjson"
)
// Returns number of errors that occurred the most (incl. nil) and the
// corresponding error value. NB When there is more than one error value that
// occurs maximum number of times, the error value returned depends on how
// golang's map orders keys. This doesn't affect correctness as long as quorum
// value is greater than or equal to simple majority, since none of the equally
// maximal values would occur quorum or more number of times.
func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error) {
errorCounts := make(map[error]int)
for _, err := range errs {
if IsErrIgnored(err, ignoredErrs...) {
continue
}
errorCounts[err]++
}
max := 0
for err, count := range errorCounts {
switch {
case max < count:
max = count
maxErr = err
// Prefer `nil` over other error values with the same
// number of occurrences.
case max == count && err == nil:
maxErr = err
}
}
return max, maxErr
}
// reduceQuorumErrs behaves like reduceErrs by only for returning
// values of maximally occurring errors validated against a generic
// quorum number that can be read or write quorum depending on usage.
func reduceQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, quorum int, quorumErr error) error {
maxCount, maxErr := reduceErrs(errs, ignoredErrs)
if maxCount >= quorum {
return maxErr
}
return quorumErr
}
// reduceReadQuorumErrs behaves like reduceErrs but only for returning
// values of maximally occurring errors validated against readQuorum.
func reduceReadQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, readQuorum int) (maxErr error) {
return reduceQuorumErrs(ctx, errs, ignoredErrs, readQuorum, errXLReadQuorum)
}
// reduceWriteQuorumErrs behaves like reduceErrs but only for returning
// values of maximally occurring errors validated against writeQuorum.
func reduceWriteQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, writeQuorum int) (maxErr error) {
return reduceQuorumErrs(ctx, errs, ignoredErrs, writeQuorum, errXLWriteQuorum)
}
// Similar to 'len(slice)' but returns the actual elements count
// skipping the unallocated elements.
func diskCount(disks []StorageAPI) int {
diskCount := 0
for _, disk := range disks {
if disk == nil {
continue
}
diskCount++
}
return diskCount
}
// hashOrder - hashes input key to return consistent
// hashed integer slice. Returned integer order is salted
// with an input key. This results in consistent order.
// NOTE: collisions are fine, we are not looking for uniqueness
// in the slices returned.
func hashOrder(key string, cardinality int) []int {
if cardinality <= 0 {
// Returns an empty int slice for cardinality < 0.
return nil
}
nums := make([]int, cardinality)
keyCrc := crc32.Checksum([]byte(key), crc32.IEEETable)
start := int(keyCrc % uint32(cardinality))
for i := 1; i <= cardinality; i++ {
nums[i-1] = 1 + ((start + i) % cardinality)
}
return nums
}
func parseXLStat(xlMetaBuf []byte) (si statInfo, e error) {
// obtain stat info.
stat := statInfo{}
// fetching modTime.
modTime, err := time.Parse(time.RFC3339, gjson.GetBytes(xlMetaBuf, "stat.modTime").String())
if err != nil {
return si, err
}
stat.ModTime = modTime
// obtain Stat.Size .
stat.Size = gjson.GetBytes(xlMetaBuf, "stat.size").Int()
return stat, nil
}
func parseXLVersion(xlMetaBuf []byte) string {
return gjson.GetBytes(xlMetaBuf, "version").String()
}
func parseXLFormat(xlMetaBuf []byte) string {
return gjson.GetBytes(xlMetaBuf, "format").String()
}
func parseXLRelease(xlMetaBuf []byte) string {
return gjson.GetBytes(xlMetaBuf, "minio.release").String()
}
func parseXLErasureInfo(ctx context.Context, xlMetaBuf []byte) (ErasureInfo, error) {
erasure := ErasureInfo{}
erasureResult := gjson.GetBytes(xlMetaBuf, "erasure")
// parse the xlV1Meta.Erasure.Distribution.
disResult := erasureResult.Get("distribution").Array()
distribution := make([]int, len(disResult))
for i, dis := range disResult {
distribution[i] = int(dis.Int())
}
erasure.Distribution = distribution
erasure.Algorithm = erasureResult.Get("algorithm").String()
erasure.DataBlocks = int(erasureResult.Get("data").Int())
erasure.ParityBlocks = int(erasureResult.Get("parity").Int())
erasure.BlockSize = erasureResult.Get("blockSize").Int()
erasure.Index = int(erasureResult.Get("index").Int())
checkSumsResult := erasureResult.Get("checksum").Array()
// Check for scenario where checksum information missing for some parts.
partsResult := gjson.GetBytes(xlMetaBuf, "parts").Array()
if len(checkSumsResult) != len(partsResult) {
return erasure, errCorruptedFormat
}
// Parse xlMetaV1.Erasure.Checksum array.
checkSums := make([]ChecksumInfo, len(checkSumsResult))
for i, v := range checkSumsResult {
algorithm := BitrotAlgorithmFromString(v.Get("algorithm").String())
if !algorithm.Available() {
logger.LogIf(ctx, errBitrotHashAlgoInvalid)
return erasure, errBitrotHashAlgoInvalid
}
hash, err := hex.DecodeString(v.Get("hash").String())
if err != nil {
logger.LogIf(ctx, err)
return erasure, err
}
name := v.Get("name").String()
if name == "" {
return erasure, errCorruptedFormat
}
checkSums[i] = ChecksumInfo{Name: name, Algorithm: algorithm, Hash: hash}
}
erasure.Checksums = checkSums
return erasure, nil
}
func parseXLParts(xlMetaBuf []byte) []objectPartInfo {
// Parse the XL Parts.
partsResult := gjson.GetBytes(xlMetaBuf, "parts").Array()
partInfo := make([]objectPartInfo, len(partsResult))
for i, p := range partsResult {
info := objectPartInfo{}
info.Number = int(p.Get("number").Int())
info.Name = p.Get("name").String()
info.ETag = p.Get("etag").String()
info.Size = p.Get("size").Int()
info.ActualSize = p.Get("actualSize").Int()
partInfo[i] = info
}
return partInfo
}
func parseXLMetaMap(xlMetaBuf []byte) map[string]string {
// Get xlMetaV1.Meta map.
metaMapResult := gjson.GetBytes(xlMetaBuf, "meta").Map()
metaMap := make(map[string]string)
for key, valResult := range metaMapResult {
metaMap[key] = valResult.String()
}
return metaMap
}
// Constructs XLMetaV1 using `gjson` lib to retrieve each field.
func xlMetaV1UnmarshalJSON(ctx context.Context, xlMetaBuf []byte) (xlMeta xlMetaV1, e error) {
// obtain version.
xlMeta.Version = parseXLVersion(xlMetaBuf)
// obtain format.
xlMeta.Format = parseXLFormat(xlMetaBuf)
// Parse xlMetaV1.Stat .
stat, err := parseXLStat(xlMetaBuf)
if err != nil {
logger.LogIf(ctx, err)
return xlMeta, err
}
xlMeta.Stat = stat
// parse the xlV1Meta.Erasure fields.
xlMeta.Erasure, err = parseXLErasureInfo(ctx, xlMetaBuf)
if err != nil {
return xlMeta, err
}
// Parse the XL Parts.
xlMeta.Parts = parseXLParts(xlMetaBuf)
// Get the xlMetaV1.Realse field.
xlMeta.Minio.Release = parseXLRelease(xlMetaBuf)
// parse xlMetaV1.
xlMeta.Meta = parseXLMetaMap(xlMetaBuf)
return xlMeta, nil
}
// read xl.json from the given disk, parse and return xlV1MetaV1.Parts.
func readXLMetaParts(ctx context.Context, disk StorageAPI, bucket string, object string) ([]objectPartInfo, map[string]string, error) {
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
logger.LogIf(ctx, err)
return nil, nil, err
}
// obtain xlMetaV1{}.Partsusing `github.com/tidwall/gjson`.
xlMetaParts := parseXLParts(xlMetaBuf)
xlMetaMap := parseXLMetaMap(xlMetaBuf)
return xlMetaParts, xlMetaMap, nil
}
// read xl.json from the given disk and parse xlV1Meta.Stat and xlV1Meta.Meta using gjson.
func readXLMetaStat(ctx context.Context, disk StorageAPI, bucket string, object string) (si statInfo, mp map[string]string, e error) {
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
logger.LogIf(ctx, err)
return si, nil, err
}
// obtain version.
xlVersion := parseXLVersion(xlMetaBuf)
// obtain format.
xlFormat := parseXLFormat(xlMetaBuf)
// Validate if the xl.json we read is sane, return corrupted format.
if !isXLMetaFormatValid(xlVersion, xlFormat) {
// For version mismatchs and unrecognized format, return corrupted format.
logger.LogIf(ctx, errCorruptedFormat)
return si, nil, errCorruptedFormat
}
// obtain xlMetaV1{}.Meta using `github.com/tidwall/gjson`.
xlMetaMap := parseXLMetaMap(xlMetaBuf)
// obtain xlMetaV1{}.Stat using `github.com/tidwall/gjson`.
xlStat, err := parseXLStat(xlMetaBuf)
if err != nil {
logger.LogIf(ctx, err)
return si, nil, err
}
// Return structured `xl.json`.
return xlStat, xlMetaMap, nil
}
// readXLMeta reads `xl.json` and returns back XL metadata structure.
func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1, err error) {
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
if err != errFileNotFound {
logger.GetReqInfo(ctx).AppendTags("disk", disk.String())
logger.LogIf(ctx, err)
}
return xlMetaV1{}, err
}
// obtain xlMetaV1{} using `github.com/tidwall/gjson`.
xlMeta, err = xlMetaV1UnmarshalJSON(ctx, xlMetaBuf)
if err != nil {
logger.GetReqInfo(ctx).AppendTags("disk", disk.String())
logger.LogIf(ctx, err)
return xlMetaV1{}, err
}
// Return structured `xl.json`.
return xlMeta, nil
}
// Reads all `xl.json` metadata as a xlMetaV1 slice.
// Returns error slice indicating the failed metadata reads.
func readAllXLMetadata(ctx context.Context, disks []StorageAPI, bucket, object string) ([]xlMetaV1, []error) {
errs := make([]error, len(disks))
metadataArray := make([]xlMetaV1, len(disks))
var wg = &sync.WaitGroup{}
// Read `xl.json` parallelly across disks.
for index, disk := range disks {
if disk == nil {
errs[index] = errDiskNotFound
continue
}
wg.Add(1)
// Read `xl.json` in routine.
go func(index int, disk StorageAPI) {
defer wg.Done()
var err error
metadataArray[index], err = readXLMeta(ctx, disk, bucket, object)
if err != nil {
errs[index] = err
return
}
}(index, disk)
}
// Wait for all the routines to finish.
wg.Wait()
// Return all the metadata.
return metadataArray, errs
}
// Return shuffled partsMetadata depending on distribution.
func shufflePartsMetadata(partsMetadata []xlMetaV1, distribution []int) (shuffledPartsMetadata []xlMetaV1) {
if distribution == nil {
return partsMetadata
}
shuffledPartsMetadata = make([]xlMetaV1, len(partsMetadata))
// Shuffle slice xl metadata for expected distribution.
for index := range partsMetadata {
blockIndex := distribution[index]
shuffledPartsMetadata[blockIndex-1] = partsMetadata[index]
}
return shuffledPartsMetadata
}
// shuffleDisks - shuffle input disks slice depending on the
// erasure distribution. Return shuffled slice of disks with
// their expected distribution.
func shuffleDisks(disks []StorageAPI, distribution []int) (shuffledDisks []StorageAPI) {
if distribution == nil {
return disks
}
shuffledDisks = make([]StorageAPI, len(disks))
// Shuffle disks for expected distribution.
for index := range disks {
blockIndex := distribution[index]
shuffledDisks[blockIndex-1] = disks[index]
}
return shuffledDisks
}
// evalDisks - returns a new slice of disks where nil is set if
// the corresponding error in errs slice is not nil
func evalDisks(disks []StorageAPI, errs []error) []StorageAPI {
if len(errs) != len(disks) {
logger.LogIf(context.Background(), errors.New("unexpected disks/errors slice length"))
return nil
}
newDisks := make([]StorageAPI, len(disks))
for index := range errs {
if errs[index] == nil {
newDisks[index] = disks[index]
} else {
newDisks[index] = nil
}
}
return newDisks
}
// Errors specifically generated by calculatePartSizeFromIdx function.
var (
errPartSizeZero = errors.New("Part size cannot be zero")
errPartSizeIndex = errors.New("Part index cannot be smaller than 1")
)
// calculatePartSizeFromIdx calculates the part size according to input index.
// returns error if totalSize is -1, partSize is 0, partIndex is 0.
func calculatePartSizeFromIdx(ctx context.Context, totalSize int64, partSize int64, partIndex int) (currPartSize int64, err error) {
if totalSize < -1 {
logger.LogIf(ctx, errInvalidArgument)
return 0, errInvalidArgument
}
if partSize == 0 {
logger.LogIf(ctx, errPartSizeZero)
return 0, errPartSizeZero
}
if partIndex < 1 {
logger.LogIf(ctx, errPartSizeIndex)
return 0, errPartSizeIndex
}
if totalSize > 0 {
// Compute the total count of parts
partsCount := totalSize/partSize + 1
// Return the part's size
switch {
case int64(partIndex) < partsCount:
currPartSize = partSize
case int64(partIndex) == partsCount:
// Size of last part
currPartSize = totalSize % partSize
default:
currPartSize = 0
}
}
return currPartSize, nil
}
| cmd/xl-v1-utils.go | 1 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.9988172650337219,
0.4603522717952728,
0.00016415867139585316,
0.02153266966342926,
0.490678608417511
] |
{
"id": 4,
"code_window": [
"func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1, err error) {\n",
"\t// Reads entire `xl.json`.\n",
"\txlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))\n",
"\tif err != nil {\n",
"\t\tif err != errFileNotFound {\n",
"\t\t\tlogger.GetReqInfo(ctx).AppendTags(\"disk\", disk.String())\n",
"\t\t\tlogger.LogIf(ctx, err)\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif err != errFileNotFound && err != errVolumeNotFound {\n"
],
"file_path": "cmd/xl-v1-utils.go",
"type": "replace",
"edit_start_line_idx": 307
} | // Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.10
package storage
import "google.golang.org/api/googleapi"
func shouldRetry(err error) bool {
switch e := err.(type) {
case *googleapi.Error:
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}
}
| vendor/cloud.google.com/go/storage/go110.go | 0 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.00021330142044462264,
0.000184704054845497,
0.0001731951633701101,
0.00017615982505958527,
0.00001660018097027205
] |
{
"id": 4,
"code_window": [
"func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1, err error) {\n",
"\t// Reads entire `xl.json`.\n",
"\txlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))\n",
"\tif err != nil {\n",
"\t\tif err != errFileNotFound {\n",
"\t\t\tlogger.GetReqInfo(ctx).AppendTags(\"disk\", disk.String())\n",
"\t\t\tlogger.LogIf(ctx, err)\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif err != errFileNotFound && err != errVolumeNotFound {\n"
],
"file_path": "cmd/xl-v1-utils.go",
"type": "replace",
"edit_start_line_idx": 307
} | package jsoniter
import (
"fmt"
"reflect"
"unsafe"
"github.com/modern-go/reflect2"
)
// ValDecoder is an internal type registered to cache as needed.
// Don't confuse jsoniter.ValDecoder with json.Decoder.
// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
//
// Reflection on type to create decoders, which is then cached
// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
// 1. create instance of new value, for example *int will need a int to be allocated
// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
// 3. assignment to map, both key and value will be reflect.Value
// For a simple struct binding, it will be reflect.Value free and allocation free
type ValDecoder interface {
Decode(ptr unsafe.Pointer, iter *Iterator)
}
// ValEncoder is an internal type registered to cache as needed.
// Don't confuse jsoniter.ValEncoder with json.Encoder.
// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
type ValEncoder interface {
IsEmpty(ptr unsafe.Pointer) bool
Encode(ptr unsafe.Pointer, stream *Stream)
}
type checkIsEmpty interface {
IsEmpty(ptr unsafe.Pointer) bool
}
type ctx struct {
*frozenConfig
prefix string
encoders map[reflect2.Type]ValEncoder
decoders map[reflect2.Type]ValDecoder
}
func (b *ctx) caseSensitive() bool {
if b.frozenConfig == nil {
// default is case-insensitive
return false
}
return b.frozenConfig.caseSensitive
}
func (b *ctx) append(prefix string) *ctx {
return &ctx{
frozenConfig: b.frozenConfig,
prefix: b.prefix + " " + prefix,
encoders: b.encoders,
decoders: b.decoders,
}
}
// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
func (iter *Iterator) ReadVal(obj interface{}) {
cacheKey := reflect2.RTypeOf(obj)
decoder := iter.cfg.getDecoderFromCache(cacheKey)
if decoder == nil {
typ := reflect2.TypeOf(obj)
if typ.Kind() != reflect.Ptr {
iter.ReportError("ReadVal", "can only unmarshal into pointer")
return
}
decoder = iter.cfg.DecoderOf(typ)
}
ptr := reflect2.PtrOf(obj)
if ptr == nil {
iter.ReportError("ReadVal", "can not read into nil pointer")
return
}
decoder.Decode(ptr, iter)
}
// WriteVal copy the go interface into underlying JSON, same as json.Marshal
func (stream *Stream) WriteVal(val interface{}) {
if nil == val {
stream.WriteNil()
return
}
cacheKey := reflect2.RTypeOf(val)
encoder := stream.cfg.getEncoderFromCache(cacheKey)
if encoder == nil {
typ := reflect2.TypeOf(val)
encoder = stream.cfg.EncoderOf(typ)
}
encoder.Encode(reflect2.PtrOf(val), stream)
}
func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder {
cacheKey := typ.RType()
decoder := cfg.getDecoderFromCache(cacheKey)
if decoder != nil {
return decoder
}
ctx := &ctx{
frozenConfig: cfg,
prefix: "",
decoders: map[reflect2.Type]ValDecoder{},
encoders: map[reflect2.Type]ValEncoder{},
}
ptrType := typ.(*reflect2.UnsafePtrType)
decoder = decoderOfType(ctx, ptrType.Elem())
cfg.addDecoderToCache(cacheKey, decoder)
return decoder
}
func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
decoder := getTypeDecoderFromExtension(ctx, typ)
if decoder != nil {
return decoder
}
decoder = createDecoderOfType(ctx, typ)
for _, extension := range extensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
for _, extension := range ctx.extraExtensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
return decoder
}
func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
decoder := ctx.decoders[typ]
if decoder != nil {
return decoder
}
placeholder := &placeholderDecoder{}
ctx.decoders[typ] = placeholder
decoder = _createDecoderOfType(ctx, typ)
placeholder.decoder = decoder
return decoder
}
func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
decoder := createDecoderOfJsonRawMessage(ctx, typ)
if decoder != nil {
return decoder
}
decoder = createDecoderOfJsonNumber(ctx, typ)
if decoder != nil {
return decoder
}
decoder = createDecoderOfMarshaler(ctx, typ)
if decoder != nil {
return decoder
}
decoder = createDecoderOfAny(ctx, typ)
if decoder != nil {
return decoder
}
decoder = createDecoderOfNative(ctx, typ)
if decoder != nil {
return decoder
}
switch typ.Kind() {
case reflect.Interface:
ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType)
if isIFace {
return &ifaceDecoder{valType: ifaceType}
}
return &efaceDecoder{}
case reflect.Struct:
return decoderOfStruct(ctx, typ)
case reflect.Array:
return decoderOfArray(ctx, typ)
case reflect.Slice:
return decoderOfSlice(ctx, typ)
case reflect.Map:
return decoderOfMap(ctx, typ)
case reflect.Ptr:
return decoderOfOptional(ctx, typ)
default:
return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
}
}
func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder {
cacheKey := typ.RType()
encoder := cfg.getEncoderFromCache(cacheKey)
if encoder != nil {
return encoder
}
ctx := &ctx{
frozenConfig: cfg,
prefix: "",
decoders: map[reflect2.Type]ValDecoder{},
encoders: map[reflect2.Type]ValEncoder{},
}
encoder = encoderOfType(ctx, typ)
if typ.LikePtr() {
encoder = &onePtrEncoder{encoder}
}
cfg.addEncoderToCache(cacheKey, encoder)
return encoder
}
type onePtrEncoder struct {
encoder ValEncoder
}
func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
}
func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
}
func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
encoder := getTypeEncoderFromExtension(ctx, typ)
if encoder != nil {
return encoder
}
encoder = createEncoderOfType(ctx, typ)
for _, extension := range extensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
for _, extension := range ctx.extraExtensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
return encoder
}
func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
encoder := ctx.encoders[typ]
if encoder != nil {
return encoder
}
placeholder := &placeholderEncoder{}
ctx.encoders[typ] = placeholder
encoder = _createEncoderOfType(ctx, typ)
placeholder.encoder = encoder
return encoder
}
func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
encoder := createEncoderOfJsonRawMessage(ctx, typ)
if encoder != nil {
return encoder
}
encoder = createEncoderOfJsonNumber(ctx, typ)
if encoder != nil {
return encoder
}
encoder = createEncoderOfMarshaler(ctx, typ)
if encoder != nil {
return encoder
}
encoder = createEncoderOfAny(ctx, typ)
if encoder != nil {
return encoder
}
encoder = createEncoderOfNative(ctx, typ)
if encoder != nil {
return encoder
}
kind := typ.Kind()
switch kind {
case reflect.Interface:
return &dynamicEncoder{typ}
case reflect.Struct:
return encoderOfStruct(ctx, typ)
case reflect.Array:
return encoderOfArray(ctx, typ)
case reflect.Slice:
return encoderOfSlice(ctx, typ)
case reflect.Map:
return encoderOfMap(ctx, typ)
case reflect.Ptr:
return encoderOfOptional(ctx, typ)
default:
return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
}
}
type lazyErrorDecoder struct {
err error
}
func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if iter.WhatIsNext() != NilValue {
if iter.Error == nil {
iter.Error = decoder.err
}
} else {
iter.Skip()
}
}
type lazyErrorEncoder struct {
err error
}
func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
if ptr == nil {
stream.WriteNil()
} else if stream.Error == nil {
stream.Error = encoder.err
}
}
func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return false
}
type placeholderDecoder struct {
decoder ValDecoder
}
func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
decoder.decoder.Decode(ptr, iter)
}
type placeholderEncoder struct {
encoder ValEncoder
}
func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
encoder.encoder.Encode(ptr, stream)
}
func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.encoder.IsEmpty(ptr)
}
| vendor/github.com/json-iterator/go/reflect.go | 0 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.0017384471138939261,
0.00022583025565836579,
0.00016265107842627913,
0.00016899840557016432,
0.0002664722269400954
] |
{
"id": 4,
"code_window": [
"func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1, err error) {\n",
"\t// Reads entire `xl.json`.\n",
"\txlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))\n",
"\tif err != nil {\n",
"\t\tif err != errFileNotFound {\n",
"\t\t\tlogger.GetReqInfo(ctx).AppendTags(\"disk\", disk.String())\n",
"\t\t\tlogger.LogIf(ctx, err)\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif err != errFileNotFound && err != errVolumeNotFound {\n"
],
"file_path": "cmd/xl-v1-utils.go",
"type": "replace",
"edit_start_line_idx": 307
} | package reflect2
import (
"reflect"
"unsafe"
)
type eface struct {
rtype unsafe.Pointer
data unsafe.Pointer
}
func unpackEFace(obj interface{}) *eface {
return (*eface)(unsafe.Pointer(&obj))
}
func packEFace(rtype unsafe.Pointer, data unsafe.Pointer) interface{} {
var i interface{}
e := (*eface)(unsafe.Pointer(&i))
e.rtype = rtype
e.data = data
return i
}
type UnsafeEFaceType struct {
unsafeType
}
func newUnsafeEFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeEFaceType {
return &UnsafeEFaceType{
unsafeType: *newUnsafeType(cfg, type1),
}
}
func (type2 *UnsafeEFaceType) IsNil(obj interface{}) bool {
if obj == nil {
return true
}
objEFace := unpackEFace(obj)
assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
return type2.UnsafeIsNil(objEFace.data)
}
func (type2 *UnsafeEFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
if ptr == nil {
return true
}
return unpackEFace(*(*interface{})(ptr)).data == nil
}
func (type2 *UnsafeEFaceType) Indirect(obj interface{}) interface{} {
objEFace := unpackEFace(obj)
assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
return type2.UnsafeIndirect(objEFace.data)
}
func (type2 *UnsafeEFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
return *(*interface{})(ptr)
}
| vendor/github.com/modern-go/reflect2/unsafe_eface.go | 0 | https://github.com/minio/minio/commit/36990aeafd7b8d1846e5f66292eacde60c36f0fb | [
0.00017356168245896697,
0.0001683085720287636,
0.00016489758854731917,
0.00016725729801692069,
0.0000033624976367718773
] |
{
"id": 0,
"code_window": [
"\tif ndvFactor > sampleFactor {\n",
"\t\tndvFactor = sampleFactor\n",
"\t}\n",
"\tbucketIdx := 0\n",
"\tvar lastCount int64\n",
"\thg.Buckets[0].LowerBound = samples[0]\n",
"\tfor i := int64(0); i < int64(len(samples)); i++ {\n",
"\t\tcmp, err := hg.Buckets[bucketIdx].UpperBound.CompareDatum(sc, &samples[i])\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, errors.Trace(err)\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\thg.Buckets[0] = Bucket{\n",
"\t\tLowerBound: samples[0],\n",
"\t\tUpperBound: samples[0],\n",
"\t\tCount: int64(sampleFactor),\n",
"\t\tRepeats: int64(ndvFactor),\n",
"\t}\n",
"\tfor i := int64(1); i < int64(len(samples)); i++ {\n"
],
"file_path": "statistics/builder.go",
"type": "replace",
"edit_start_line_idx": 140
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"github.com/juju/errors"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
)
// SortedBuilder is used to build histograms for PK and index.
type SortedBuilder struct {
sc *stmtctx.StatementContext
numBuckets int64
valuesPerBucket int64
lastNumber int64
bucketIdx int64
Count int64
hist *Histogram
}
// NewSortedBuilder creates a new SortedBuilder.
func NewSortedBuilder(sc *stmtctx.StatementContext, numBuckets, id int64) *SortedBuilder {
return &SortedBuilder{
sc: sc,
numBuckets: numBuckets,
valuesPerBucket: 1,
hist: &Histogram{
ID: id,
Buckets: make([]Bucket, 1, numBuckets),
},
}
}
// Hist returns the histogram built by SortedBuilder.
func (b *SortedBuilder) Hist() *Histogram {
if b.Count == 0 {
return &Histogram{ID: b.hist.ID}
}
return b.hist
}
// Iterate updates the histogram incrementally.
func (b *SortedBuilder) Iterate(data types.Datum) error {
cmp, err := b.hist.Buckets[b.bucketIdx].UpperBound.CompareDatum(b.sc, &data)
if err != nil {
return errors.Trace(err)
}
b.Count++
if cmp == 0 {
// The new item has the same value as current bucket value, to ensure that
// a same value only stored in a single bucket, we do not increase bucketIdx even if it exceeds
// valuesPerBucket.
b.hist.Buckets[b.bucketIdx].Count++
b.hist.Buckets[b.bucketIdx].Repeats++
} else if b.hist.Buckets[b.bucketIdx].Count+1-b.lastNumber <= b.valuesPerBucket {
// The bucket still have room to store a new item, update the bucket.
b.hist.Buckets[b.bucketIdx].Count++
b.hist.Buckets[b.bucketIdx].UpperBound = data
b.hist.Buckets[b.bucketIdx].Repeats = 1
if b.bucketIdx == 0 && b.hist.Buckets[0].Count == 1 {
b.hist.Buckets[0].LowerBound = data
}
b.hist.NDV++
} else {
// All buckets are full, we should merge buckets.
if b.bucketIdx+1 == b.numBuckets {
b.hist.mergeBuckets(b.bucketIdx)
b.valuesPerBucket *= 2
b.bucketIdx = b.bucketIdx / 2
if b.bucketIdx == 0 {
b.lastNumber = 0
} else {
b.lastNumber = b.hist.Buckets[b.bucketIdx-1].Count
}
}
// We may merge buckets, so we should check it again.
if b.hist.Buckets[b.bucketIdx].Count+1-b.lastNumber <= b.valuesPerBucket {
b.hist.Buckets[b.bucketIdx].Count++
b.hist.Buckets[b.bucketIdx].UpperBound = data
b.hist.Buckets[b.bucketIdx].Repeats = 1
} else {
b.lastNumber = b.hist.Buckets[b.bucketIdx].Count
b.bucketIdx++
b.hist.Buckets = append(b.hist.Buckets, Bucket{
Count: b.lastNumber + 1,
UpperBound: data,
LowerBound: data,
Repeats: 1,
})
}
b.hist.NDV++
}
return nil
}
// BuildColumn builds histogram from samples for column.
func BuildColumn(ctx context.Context, numBuckets, id int64, collector *SampleCollector) (*Histogram, error) {
count := collector.Count
if count == 0 {
return &Histogram{ID: id, NullCount: collector.NullCount}, nil
}
sc := ctx.GetSessionVars().StmtCtx
samples := collector.Samples
err := types.SortDatums(sc, samples)
if err != nil {
return nil, errors.Trace(err)
}
ndv := collector.FMSketch.NDV()
if ndv > count {
ndv = count
}
hg := &Histogram{
ID: id,
NDV: ndv,
NullCount: collector.NullCount,
Buckets: make([]Bucket, 1, numBuckets),
}
valuesPerBucket := float64(count)/float64(numBuckets) + 1
// As we use samples to build the histogram, the bucket number and repeat should multiply a factor.
sampleFactor := float64(count) / float64(len(samples))
ndvFactor := float64(count) / float64(hg.NDV)
if ndvFactor > sampleFactor {
ndvFactor = sampleFactor
}
bucketIdx := 0
var lastCount int64
hg.Buckets[0].LowerBound = samples[0]
for i := int64(0); i < int64(len(samples)); i++ {
cmp, err := hg.Buckets[bucketIdx].UpperBound.CompareDatum(sc, &samples[i])
if err != nil {
return nil, errors.Trace(err)
}
totalCount := float64(i+1) * sampleFactor
if cmp == 0 {
// The new item has the same value as current bucket value, to ensure that
// a same value only stored in a single bucket, we do not increase bucketIdx even if it exceeds
// valuesPerBucket.
hg.Buckets[bucketIdx].Count = int64(totalCount)
if float64(hg.Buckets[bucketIdx].Repeats) == ndvFactor {
hg.Buckets[bucketIdx].Repeats = int64(2 * sampleFactor)
} else {
hg.Buckets[bucketIdx].Repeats += int64(sampleFactor)
}
} else if totalCount-float64(lastCount) <= valuesPerBucket {
// The bucket still have room to store a new item, update the bucket.
hg.Buckets[bucketIdx].Count = int64(totalCount)
hg.Buckets[bucketIdx].UpperBound = samples[i]
hg.Buckets[bucketIdx].Repeats = int64(ndvFactor)
} else {
lastCount = hg.Buckets[bucketIdx].Count
// The bucket is full, store the item in the next bucket.
bucketIdx++
hg.Buckets = append(hg.Buckets, Bucket{
Count: int64(totalCount),
UpperBound: samples[i],
LowerBound: samples[i],
Repeats: int64(ndvFactor),
})
}
}
return hg, nil
}
// AnalyzeResult is used to represent analyze result.
type AnalyzeResult struct {
TableID int64
Hist []*Histogram
Cms []*CMSketch
Count int64
IsIndex int
Err error
}
| statistics/builder.go | 1 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.9989116191864014,
0.26467064023017883,
0.00016424823843408376,
0.004047462251037359,
0.43383172154426575
] |
{
"id": 0,
"code_window": [
"\tif ndvFactor > sampleFactor {\n",
"\t\tndvFactor = sampleFactor\n",
"\t}\n",
"\tbucketIdx := 0\n",
"\tvar lastCount int64\n",
"\thg.Buckets[0].LowerBound = samples[0]\n",
"\tfor i := int64(0); i < int64(len(samples)); i++ {\n",
"\t\tcmp, err := hg.Buckets[bucketIdx].UpperBound.CompareDatum(sc, &samples[i])\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, errors.Trace(err)\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\thg.Buckets[0] = Bucket{\n",
"\t\tLowerBound: samples[0],\n",
"\t\tUpperBound: samples[0],\n",
"\t\tCount: int64(sampleFactor),\n",
"\t\tRepeats: int64(ndvFactor),\n",
"\t}\n",
"\tfor i := int64(1); i < int64(len(samples)); i++ {\n"
],
"file_path": "statistics/builder.go",
"type": "replace",
"edit_start_line_idx": 140
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package plan_test
import (
. "github.com/pingcap/check"
"github.com/pingcap/tidb"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/plan"
"github.com/pingcap/tidb/util/testleak"
goctx "golang.org/x/net/context"
)
var _ = Suite(&testPlanSuite{})
type testPlanSuite struct {
*parser.Parser
is infoschema.InfoSchema
}
func (s *testPlanSuite) SetUpSuite(c *C) {
s.is = infoschema.MockInfoSchema([]*model.TableInfo{plan.MockTable()})
s.Parser = parser.New()
}
func (s *testPlanSuite) TestDAGPlanBuilderSimpleCase(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
se, err := tidb.CreateSession4Test(store)
c.Assert(err, IsNil)
_, err = se.Execute(goctx.Background(), "use test")
c.Assert(err, IsNil)
tests := []struct {
sql string
best string
}{
// Test index hint.
{
sql: "select * from t t1 use index(c_d_e)",
best: "IndexLookUp(Index(t.c_d_e)[[<nil>,+inf]], Table(t))",
},
// Test ts + Sort vs. DoubleRead + filter.
{
sql: "select a from t where a between 1 and 2 order by c",
best: "TableReader(Table(t))->Sort->Projection",
},
// Test DNF condition + Double Read.
// FIXME: Some bugs still exist in selectivity.
//{
// sql: "select * from t where (t.c > 0 and t.c < 1) or (t.c > 2 and t.c < 3) or (t.c > 4 and t.c < 5) or (t.c > 6 and t.c < 7) or (t.c > 9 and t.c < 10)",
// best: "IndexLookUp(Index(t.c_d_e)[(0 +inf,1 <nil>) (2 +inf,3 <nil>) (4 +inf,5 <nil>) (6 +inf,7 <nil>) (9 +inf,10 <nil>)], Table(t))",
//},
// Test TopN to table branch in double read.
{
sql: "select * from t where t.c = 1 and t.e = 1 order by t.b limit 1",
best: "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t)->TopN([test.t.b],0,1))->TopN([test.t.b],0,1)",
},
// Test Null Range
{
sql: "select * from t where t.c is null",
best: "IndexLookUp(Index(t.c_d_e)[[<nil>,<nil>]], Table(t))",
},
// Test TopN to index branch in double read.
{
sql: "select * from t where t.c = 1 and t.e = 1 order by t.e limit 1",
best: "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)])->TopN([test.t.e],0,1), Table(t))->TopN([test.t.e],0,1)",
},
// Test TopN to Limit in double read.
{
sql: "select * from t where t.c = 1 and t.e = 1 order by t.d limit 1",
best: "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)])->Limit, Table(t))->Limit",
},
// Test TopN to Limit in index single read.
{
sql: "select c from t where t.c = 1 and t.e = 1 order by t.d limit 1",
best: "IndexReader(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)])->Limit)->Limit->Projection",
},
// Test TopN to Limit in table single read.
{
sql: "select c from t order by t.a limit 1",
best: "TableReader(Table(t)->Limit)->Limit->Projection",
},
// Test TopN push down in table single read.
{
sql: "select c from t order by t.a + t.b limit 1",
best: "TableReader(Table(t)->TopN([plus(test.t.a, test.t.b)],0,1))->TopN([plus(test.t.a, test.t.b)],0,1)->Projection",
},
// Test Limit push down in table single read.
{
sql: "select c from t limit 1",
best: "TableReader(Table(t)->Limit)->Limit",
},
// Test Limit push down in index single read.
{
sql: "select c from t where c = 1 limit 1",
best: "IndexReader(Index(t.c_d_e)[[1,1]]->Limit)->Limit",
},
// Test index single read and Selection.
{
sql: "select c from t where c = 1",
best: "IndexReader(Index(t.c_d_e)[[1,1]])",
},
// Test index single read and Sort.
{
sql: "select c from t order by c",
best: "IndexReader(Index(t.c_d_e)[[<nil>,+inf]])",
},
// Test index single read and Sort.
{
sql: "select c from t where c = 1 order by e",
best: "IndexReader(Index(t.c_d_e)[[1,1]])->Sort->Projection",
},
// Test Limit push down in double single read.
{
sql: "select c, b from t where c = 1 limit 1",
best: "IndexLookUp(Index(t.c_d_e)[[1,1]]->Limit, Table(t))->Limit->Projection",
},
// Test Selection + Limit push down in double single read.
{
sql: "select c, b from t where c = 1 and e = 1 and b = 1 limit 1",
best: "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t)->Sel([eq(test.t.b, 1)])->Limit)->Limit->Projection",
},
// Test Order by multi columns.
{
sql: "select c from t where c = 1 order by d, c",
best: "IndexReader(Index(t.c_d_e)[[1,1]])->Sort->Projection",
},
// Test for index with length.
{
sql: "select c_str from t where e_str = '1' order by d_str, c_str",
best: "IndexLookUp(Index(t.e_d_c_str_prefix)[[1,1]], Table(t))->Sort->Projection",
},
// Test PK in index single read.
{
sql: "select c from t where t.c = 1 and t.a = 1 order by t.d limit 1",
best: "IndexReader(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.a, 1)])->Limit)->Limit->Projection",
},
// Test composed index.
// FIXME: The TopN didn't be pushed.
{
sql: "select c from t where t.c = 1 and t.d = 1 order by t.a limit 1",
best: "IndexReader(Index(t.c_d_e)[[1 1,1 1]])->TopN([test.t.a],0,1)->Projection",
},
// Test PK in index double read.
{
sql: "select * from t where t.c = 1 and t.a = 1 order by t.d limit 1",
best: "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.a, 1)])->Limit, Table(t))->Limit",
},
// Test index filter condition push down.
{
sql: "select * from t use index(e_d_c_str_prefix) where t.c_str = 'abcdefghijk' and t.d_str = 'd' and t.e_str = 'e'",
best: "IndexLookUp(Index(t.e_d_c_str_prefix)[[e d [97 98 99 100 101 102 103 104 105 106],e d [97 98 99 100 101 102 103 104 105 106]]], Table(t)->Sel([eq(test.t.c_str, abcdefghijk)]))",
},
}
for _, tt := range tests {
comment := Commentf("for %s", tt.sql)
stmt, err := s.ParseOneStmt(tt.sql, "", "")
c.Assert(err, IsNil, comment)
err = se.NewTxn()
c.Assert(err, IsNil)
p, err := plan.Optimize(se, stmt, s.is)
c.Assert(err, IsNil)
c.Assert(plan.ToString(p), Equals, tt.best, Commentf("for %s", tt.sql))
}
}
func (s *testPlanSuite) TestDAGPlanBuilderJoin(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
se, err := tidb.CreateSession4Test(store)
c.Assert(err, IsNil)
_, err = se.Execute(goctx.Background(), "use test")
c.Assert(err, IsNil)
tests := []struct {
sql string
best string
}{
{
sql: "select * from t t1 join t t2 on t1.a = t2.c_str",
best: "LeftHashJoin{TableReader(Table(t))->Projection->TableReader(Table(t))->Projection}(cast(t1.a),cast(t2.c_str))->Projection",
},
{
sql: "select * from t t1 join t t2 on t1.b = t2.a",
best: "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.b,t2.a)",
},
{
sql: "select * from t t1 join t t2 on t1.a = t2.a join t t3 on t1.a = t3.a",
best: "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)->TableReader(Table(t))}(t1.a,t3.a)",
},
{
sql: "select * from t t1 join t t2 on t1.a = t2.a join t t3 on t1.b = t3.a",
best: "LeftHashJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)->TableReader(Table(t))}(t1.b,t3.a)",
},
{
sql: "select * from t t1 join t t2 on t1.b = t2.a order by t1.a",
best: "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.b,t2.a)->Sort",
},
{
sql: "select * from t t1 join t t2 on t1.b = t2.a order by t1.a limit 1",
best: "IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.b,t2.a)->Limit",
},
// Test hash join's hint.
{
sql: "select /*+ TIDB_HJ(t1, t2) */ * from t t1 join t t2 on t1.b = t2.a order by t1.a limit 1",
best: "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.b,t2.a)->TopN([t1.a],0,1)",
},
{
sql: "select * from t t1 left join t t2 on t1.b = t2.a where 1 = 1 limit 1",
best: "IndexJoin{TableReader(Table(t)->Limit)->TableReader(Table(t))}(t1.b,t2.a)->Limit",
},
{
sql: "select * from t t1 join t t2 on t1.b = t2.a and t1.c = 1 and t1.d = 1 and t1.e = 1 order by t1.a limit 1",
best: "IndexJoin{IndexLookUp(Index(t.c_d_e)[[1 1 1,1 1 1]], Table(t))->TableReader(Table(t))}(t1.b,t2.a)->TopN([t1.a],0,1)",
},
{
sql: "select * from t t1 join t t2 on t1.b = t2.b join t t3 on t1.b = t3.b",
best: "LeftHashJoin{LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.b,t2.b)->TableReader(Table(t))}(t1.b,t3.b)",
},
{
sql: "select * from t t1 join t t2 on t1.a = t2.a order by t1.a",
best: "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)",
},
{
sql: "select * from t t1 left outer join t t2 on t1.a = t2.a right outer join t t3 on t1.a = t3.a",
best: "MergeRightOuterJoin{MergeLeftOuterJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)->TableReader(Table(t))}(t1.a,t3.a)",
},
{
sql: "select * from t t1 join t t2 on t1.a = t2.a join t t3 on t1.a = t3.a and t1.b = 1 and t3.c = 1",
best: "LeftHashJoin{IndexJoin{TableReader(Table(t)->Sel([eq(t1.b, 1)]))->TableReader(Table(t))}(t1.a,t2.a)->IndexLookUp(Index(t.c_d_e)[[1,1]], Table(t))}(t1.a,t3.a)",
},
{
sql: "select * from t where t.c in (select b from t s where s.a = t.a)",
best: "MergeSemiJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,s.a)",
},
{
sql: "select t.c in (select b from t s where s.a = t.a) from t",
best: "MergeLeftOuterSemiJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,s.a)->Projection",
},
// Test Single Merge Join.
// Merge Join will no longer enforce a sort. If a hint doesn't take effect, we will choose other types of join.
{
sql: "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.b",
best: "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.b)",
},
// Test Single Merge Join + Sort.
{
sql: "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.a order by t2.a",
best: "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)",
},
// Test Single Merge Join + Sort + desc.
{
sql: "select /*+ TIDB_SMJ(t1,t2)*/ * from t t1, t t2 where t1.a = t2.a order by t2.a desc",
best: "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)->Sort",
},
// Test Multi Merge Join.
{
sql: "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a",
best: "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)->TableReader(Table(t))}(t2.a,t3.a)",
},
// Test Multi Merge Join with multi keys.
// TODO: More tests should be added.
{
sql: "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.c = t2.c and t1.d = t2.d and t3.c = t1.c and t3.d = t1.d",
best: "MergeInnerJoin{MergeInnerJoin{IndexLookUp(Index(t.c_d_e)[[<nil>,+inf]], Table(t))->IndexLookUp(Index(t.c_d_e)[[<nil>,+inf]], Table(t))}(t1.c,t2.c)(t1.d,t2.d)->IndexLookUp(Index(t.c_d_e)[[<nil>,+inf]], Table(t))}(t1.c,t3.c)(t1.d,t3.d)",
},
{
sql: "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1, t t2, t t3 where t1.c = t2.c and t1.d = t2.d and t3.c = t1.c and t3.d = t1.d order by t1.c",
best: "MergeInnerJoin{MergeInnerJoin{IndexLookUp(Index(t.c_d_e)[[<nil>,+inf]], Table(t))->IndexLookUp(Index(t.c_d_e)[[<nil>,+inf]], Table(t))}(t1.c,t2.c)(t1.d,t2.d)->IndexLookUp(Index(t.c_d_e)[[<nil>,+inf]], Table(t))}(t1.c,t3.c)(t1.d,t3.d)",
},
// Test Multi Merge Join + Outer Join.
{
sql: "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1 left outer join t t2 on t1.a = t2.a left outer join t t3 on t2.a = t3.a",
best: "LeftHashJoin{MergeLeftOuterJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)->TableReader(Table(t))}(t2.a,t3.a)",
},
{
sql: "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1 left outer join t t2 on t1.a = t2.a left outer join t t3 on t1.a = t3.a",
best: "MergeLeftOuterJoin{MergeLeftOuterJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)->TableReader(Table(t))}(t1.a,t3.a)",
},
// Test Index Join + TableScan.
{
sql: "select /*+ TIDB_INLJ(t1, t2) */ * from t t1, t t2 where t1.a = t2.a",
best: "IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)",
},
// Test Index Join + DoubleRead.
{
sql: "select /*+ TIDB_INLJ(t1, t2) */ * from t t1, t t2 where t1.a = t2.c",
best: "IndexJoin{TableReader(Table(t))->IndexLookUp(Index(t.c_d_e)[[<nil>,+inf]], Table(t))}(t1.a,t2.c)",
},
// Test Index Join + SingleRead.
{
sql: "select /*+ TIDB_INLJ(t1, t2) */ t1.a , t2.a from t t1, t t2 where t1.a = t2.c",
best: "IndexJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[<nil>,+inf]])}(t1.a,t2.c)->Projection",
},
// Test Index Join + Order by.
{
sql: "select /*+ TIDB_INLJ(t1, t2) */ t1.a, t2.a from t t1, t t2 where t1.a = t2.a order by t1.c",
best: "IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)->Sort->Projection",
},
// Test Index Join + Order by.
{
sql: "select /*+ TIDB_INLJ(t1, t2) */ t1.a, t2.a from t t1, t t2 where t1.a = t2.a order by t2.c",
best: "IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)->Sort->Projection",
},
// Test Index Join + TableScan + Rotate.
{
sql: "select /*+ TIDB_INLJ(t2) */ t1.a , t2.a from t t1, t t2 where t1.a = t2.c",
best: "IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(t2.c,t1.a)->Projection",
},
// Test Index Join + OuterJoin + TableScan.
{
sql: "select /*+ TIDB_INLJ(t1, t2) */ * from t t1 left outer join t t2 on t1.a = t2.a and t2.b < 1",
best: "IndexJoin{TableReader(Table(t))->TableReader(Table(t)->Sel([lt(t2.b, 1)]))}(t1.a,t2.a)",
},
{
sql: "select /*+ TIDB_INLJ(t1, t2) */ * from t t1 join t t2 on t1.d=t2.d and t2.c = 1",
best: "IndexJoin{TableReader(Table(t))->IndexLookUp(Index(t.c_d_e)[[<nil>,+inf]], Table(t))}(t1.d,t2.d)",
},
// Test Index Join failed.
{
sql: "select /*+ TIDB_INLJ(t1, t2) */ * from t t1 left outer join t t2 on t1.a = t2.b",
best: "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.b)",
},
// Test Index Join failed.
{
sql: "select /*+ TIDB_INLJ(t1) */ * from t t1 right outer join t t2 on t1.a = t2.b",
best: "RightHashJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.b)",
},
// Test Semi Join hint success.
{
sql: "select /*+ TIDB_INLJ(t1) */ * from t t1 where t1.a in (select a from t t2)",
best: "IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)",
},
// Test Semi Join hint fail.
{
sql: "select /*+ TIDB_INLJ(t2) */ * from t t1 where t1.a in (select a from t t2)",
best: "MergeSemiJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)",
},
}
for _, tt := range tests {
comment := Commentf("for %s", tt.sql)
stmt, err := s.ParseOneStmt(tt.sql, "", "")
c.Assert(err, IsNil, comment)
p, err := plan.Optimize(se, stmt, s.is)
c.Assert(err, IsNil)
c.Assert(plan.ToString(p), Equals, tt.best, Commentf("for %s", tt.sql))
}
}
func (s *testPlanSuite) TestDAGPlanBuilderSubquery(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
se, err := tidb.CreateSession4Test(store)
c.Assert(err, IsNil)
_, err = se.Execute(goctx.Background(), "use test")
c.Assert(err, IsNil)
tests := []struct {
sql string
best string
}{
// Test join key with cast.
{
sql: "select * from t where exists (select s.a from t s having sum(s.a) = t.a )",
best: "LeftHashJoin{TableReader(Table(t))->Projection->TableReader(Table(t)->StreamAgg)->StreamAgg}(cast(test.t.a),sel_agg_1)->Projection",
},
{
sql: "select * from t where exists (select s.a from t s having sum(s.a) = t.a ) order by t.a",
best: "LeftHashJoin{TableReader(Table(t))->Projection->TableReader(Table(t)->StreamAgg)->StreamAgg}(cast(test.t.a),sel_agg_1)->Projection->Sort",
},
// FIXME: Report error by resolver.
//{
// sql: "select * from t where exists (select s.a from t s having s.a = t.a ) order by t.a",
// best: "SemiJoin{TableReader(Table(t))->Projection->TableReader(Table(t)->HashAgg)->HashAgg}(cast(test.t.a),sel_agg_1)->Projection->Sort",
//},
{
sql: "select * from t where a in (select s.a from t s) order by t.a",
best: "MergeSemiJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,s.a)",
},
// Test Nested sub query.
{
sql: "select * from t where exists (select s.a from t s where s.c in (select c from t as k where k.d = s.d) having sum(s.a) = t.a )",
best: "LeftHashJoin{TableReader(Table(t))->Projection->MergeSemiJoin{IndexReader(Index(t.c_d_e)[[<nil>,+inf]])->IndexReader(Index(t.c_d_e)[[<nil>,+inf]])}(s.d,k.d)(s.c,k.c)->StreamAgg}(cast(test.t.a),sel_agg_1)->Projection",
},
// Test Semi Join + Order by.
{
sql: "select * from t where a in (select a from t) order by b",
best: "MergeSemiJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->Sort",
},
// Test Apply.
{
sql: "select t.c in (select count(*) from t s , t t1 where s.a = t.a and s.a = t1.a) from t",
best: "Apply{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->Sel([eq(s.a, test.t.a)])->TableReader(Table(t))}(s.a,t1.a)->StreamAgg}->Projection",
},
{
sql: "select (select count(*) from t s , t t1 where s.a = t.a and s.a = t1.a) from t",
best: "Apply{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->Sel([eq(s.a, test.t.a)])->TableReader(Table(t))}(s.a,t1.a)->StreamAgg}->Projection",
},
{
sql: "select (select count(*) from t s , t t1 where s.a = t.a and s.a = t1.a) from t order by t.a",
best: "Apply{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->Sel([eq(s.a, test.t.a)])->TableReader(Table(t))}(s.a,t1.a)->StreamAgg}->Projection",
},
}
for _, tt := range tests {
comment := Commentf("for %s", tt.sql)
stmt, err := s.ParseOneStmt(tt.sql, "", "")
c.Assert(err, IsNil, comment)
p, err := plan.Optimize(se, stmt, s.is)
c.Assert(err, IsNil)
c.Assert(plan.ToString(p), Equals, tt.best, Commentf("for %s", tt.sql))
}
}
func (s *testPlanSuite) TestDAGPlanTopN(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
se, err := tidb.CreateSession4Test(store)
c.Assert(err, IsNil)
_, err = se.Execute(goctx.Background(), "use test")
c.Assert(err, IsNil)
tests := []struct {
sql string
best string
}{
{
sql: "select * from t t1 left join t t2 on t1.b = t2.b left join t t3 on t2.b = t3.b order by t1.a limit 1",
best: "LeftHashJoin{LeftHashJoin{TableReader(Table(t)->Limit)->TableReader(Table(t))}(t1.b,t2.b)->TableReader(Table(t))}(t2.b,t3.b)->TopN([t1.a],0,1)",
},
{
sql: "select * from t t1 left join t t2 on t1.b = t2.b left join t t3 on t2.b = t3.b order by t1.b limit 1",
best: "LeftHashJoin{LeftHashJoin{TableReader(Table(t)->TopN([t1.b],0,1))->TableReader(Table(t))}(t1.b,t2.b)->TableReader(Table(t))}(t2.b,t3.b)->TopN([t1.b],0,1)",
},
{
sql: "select * from t t1 left join t t2 on t1.b = t2.b left join t t3 on t2.b = t3.b limit 1",
best: "LeftHashJoin{LeftHashJoin{TableReader(Table(t)->Limit)->TableReader(Table(t))}(t1.b,t2.b)->TableReader(Table(t))}(t2.b,t3.b)->Limit",
},
{
sql: "select * from t where b = 1 and c = 1 order by c limit 1",
best: "IndexLookUp(Index(t.c_d_e)[[1,1]], Table(t)->Sel([eq(test.t.b, 1)]))->Limit",
},
{
sql: "select * from t where c = 1 order by c limit 1",
best: "IndexLookUp(Index(t.c_d_e)[[1,1]]->Limit, Table(t))->Limit",
},
{
sql: "select * from t order by a limit 1",
best: "TableReader(Table(t)->Limit)->Limit",
},
{
sql: "select c from t order by c limit 1",
best: "IndexReader(Index(t.c_d_e)[[<nil>,+inf]]->Limit)->Limit",
},
}
for _, tt := range tests {
comment := Commentf("for %s", tt.sql)
stmt, err := s.ParseOneStmt(tt.sql, "", "")
c.Assert(err, IsNil, comment)
p, err := plan.Optimize(se, stmt, s.is)
c.Assert(err, IsNil)
c.Assert(plan.ToString(p), Equals, tt.best, Commentf("for %s", tt.sql))
}
}
func (s *testPlanSuite) TestDAGPlanBuilderBasePhysicalPlan(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
se, err := tidb.CreateSession4Test(store)
c.Assert(err, IsNil)
_, err = se.Execute(goctx.Background(), "use test")
c.Assert(err, IsNil)
tests := []struct {
sql string
best string
}{
// Test for update.
{
sql: "select * from t order by b limit 1 for update",
// TODO: This is not reasonable. Mysql do like this because the limit of InnoDB, should TiDB keep consistency with MySQL?
best: "TableReader(Table(t))->Lock->TopN([test.t.b],0,1)",
},
// Test complex update.
{
sql: "update t set a = 5 where b < 1 order by d limit 1",
best: "TableReader(Table(t)->Sel([lt(test.t.b, 1)])->TopN([test.t.d],0,1))->TopN([test.t.d],0,1)->Update",
},
// Test simple update.
{
sql: "update t set a = 5",
best: "TableReader(Table(t))->Update",
},
// TODO: Test delete/update with join.
// Test complex delete.
{
sql: "delete from t where b < 1 order by d limit 1",
best: "TableReader(Table(t)->Sel([lt(test.t.b, 1)])->TopN([test.t.d],0,1))->TopN([test.t.d],0,1)->Delete",
},
// Test simple delete.
{
sql: "delete from t",
best: "TableReader(Table(t))->Delete",
},
// Test complex insert.
{
sql: "insert into t select * from t where b < 1 order by d limit 1",
best: "TableReader(Table(t)->Sel([lt(test.t.b, 1)])->TopN([test.t.d],0,1))->TopN([test.t.d],0,1)->Insert",
},
// Test simple insert.
{
sql: "insert into t values(0,0,0,0,0,0,0)",
best: "Insert",
},
// Test dual.
{
sql: "select 1",
best: "Dual->Projection",
},
{
sql: "select * from t where false",
best: "Dual",
},
// Test show.
{
sql: "show tables",
best: "Show",
},
}
for _, tt := range tests {
comment := Commentf("for %s", tt.sql)
stmt, err := s.ParseOneStmt(tt.sql, "", "")
c.Assert(err, IsNil, comment)
plan.Preprocess(se, stmt, s.is, false)
p, err := plan.Optimize(se, stmt, s.is)
c.Assert(err, IsNil)
c.Assert(plan.ToString(p), Equals, tt.best, Commentf("for %s", tt.sql))
}
}
func (s *testPlanSuite) TestDAGPlanBuilderUnion(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
se, err := tidb.CreateSession4Test(store)
c.Assert(err, IsNil)
_, err = se.Execute(goctx.Background(), "use test")
c.Assert(err, IsNil)
tests := []struct {
sql string
best string
}{
// Test simple union.
{
sql: "select * from t union all select * from t",
best: "UnionAll{TableReader(Table(t))->Projection->TableReader(Table(t))->Projection}",
},
// Test Order by + Union.
{
sql: "select * from t union all (select * from t) order by a ",
best: "UnionAll{TableReader(Table(t))->Projection->TableReader(Table(t))->Projection}->Sort",
},
// Test Limit + Union.
{
sql: "select * from t union all (select * from t) limit 1",
best: "UnionAll{TableReader(Table(t)->Limit)->Projection->TableReader(Table(t)->Limit)->Projection}->Limit",
},
// Test TopN + Union.
{
sql: "select a from t union all (select c from t) order by a limit 1",
best: "UnionAll{TableReader(Table(t))->Projection->TableReader(Table(t))->Projection}->TopN([t.a],0,1)",
},
}
for _, tt := range tests {
comment := Commentf("for %s", tt.sql)
stmt, err := s.ParseOneStmt(tt.sql, "", "")
c.Assert(err, IsNil, comment)
p, err := plan.Optimize(se, stmt, s.is)
c.Assert(err, IsNil)
c.Assert(plan.ToString(p), Equals, tt.best, Commentf("for %s", tt.sql))
}
}
func (s *testPlanSuite) TestDAGPlanBuilderUnionScan(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
se, err := tidb.CreateSession4Test(store)
c.Assert(err, IsNil)
_, err = se.Execute(goctx.Background(), "use test")
c.Assert(err, IsNil)
tests := []struct {
sql string
best string
}{
// Read table.
{
sql: "select * from t",
best: "TableReader(Table(t))->UnionScan([])",
},
{
sql: "select * from t where b = 1",
best: "TableReader(Table(t)->Sel([eq(test.t.b, 1)]))->UnionScan([eq(test.t.b, 1)])",
},
{
sql: "select * from t where a = 1",
best: "TableReader(Table(t))->UnionScan([eq(test.t.a, 1)])",
},
{
sql: "select * from t where a = 1 order by a",
best: "TableReader(Table(t))->UnionScan([eq(test.t.a, 1)])",
},
{
sql: "select * from t where a = 1 order by b",
best: "TableReader(Table(t))->UnionScan([eq(test.t.a, 1)])->Sort",
},
{
sql: "select * from t where a = 1 limit 1",
best: "TableReader(Table(t))->UnionScan([eq(test.t.a, 1)])->Limit",
},
{
sql: "select * from t where c = 1",
best: "IndexLookUp(Index(t.c_d_e)[[1,1]], Table(t))->UnionScan([eq(test.t.c, 1)])",
},
{
sql: "select c from t where c = 1",
best: "IndexReader(Index(t.c_d_e)[[1,1]])->UnionScan([eq(test.t.c, 1)])->Projection",
},
}
for _, tt := range tests {
comment := Commentf("for %s", tt.sql)
stmt, err := s.ParseOneStmt(tt.sql, "", "")
c.Assert(err, IsNil, comment)
err = se.NewTxn()
c.Assert(err, IsNil)
// Make txn not read only.
se.Txn().Set(nil, nil)
p, err := plan.Optimize(se, stmt, s.is)
c.Assert(err, IsNil)
c.Assert(plan.ToString(p), Equals, tt.best, Commentf("for %s", tt.sql))
}
}
func (s *testPlanSuite) TestDAGPlanBuilderAgg(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
se, err := tidb.CreateSession4Test(store)
c.Assert(err, IsNil)
se.Execute(goctx.Background(), "use test")
c.Assert(err, IsNil)
tests := []struct {
sql string
best string
}{
// Test distinct.
{
sql: "select distinct b from t",
best: "TableReader(Table(t)->HashAgg)->HashAgg",
},
// Test agg + table.
{
sql: "select sum(a), avg(b + c) from t group by d",
best: "TableReader(Table(t)->HashAgg)->HashAgg",
},
{
sql: "select sum(distinct a), avg(b + c) from t group by d",
best: "TableReader(Table(t))->HashAgg",
},
// Test group by (c + d)
{
sql: "select sum(e), avg(e + c) from t where c = 1 group by (c + d)",
best: "IndexReader(Index(t.c_d_e)[[1,1]]->HashAgg)->HashAgg",
},
// Test stream agg + index single.
{
sql: "select sum(e), avg(e + c) from t where c = 1 group by c",
best: "IndexReader(Index(t.c_d_e)[[1,1]]->StreamAgg)->StreamAgg",
},
// Test hash agg + index single.
{
sql: "select sum(e), avg(e + c) from t where c = 1 group by d",
best: "IndexReader(Index(t.c_d_e)[[1,1]]->HashAgg)->HashAgg",
},
// Test hash agg + index double.
{
sql: "select sum(e), avg(b + c) from t where c = 1 and e = 1 group by d",
best: "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t)->HashAgg)->HashAgg",
},
// Test stream agg + index double.
{
sql: "select sum(e), avg(b + c) from t where c = 1 and e = 1 group by c",
best: "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t))->StreamAgg",
},
// Test hash agg + order.
{
sql: "select sum(e) as k, avg(b + c) from t where c = 1 and b = 1 and e = 1 group by d order by k",
best: "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t)->Sel([eq(test.t.b, 1)])->HashAgg)->HashAgg->Sort",
},
// Test stream agg + order.
{
sql: "select sum(e) as k, avg(b + c) from t where c = 1 and b = 1 and e = 1 group by c order by k",
best: "IndexLookUp(Index(t.c_d_e)[[1,1]]->Sel([eq(test.t.e, 1)]), Table(t)->Sel([eq(test.t.b, 1)]))->StreamAgg->Sort",
},
// Test agg can't push down.
{
sql: "select sum(to_base64(e)) from t where c = 1",
best: "IndexReader(Index(t.c_d_e)[[1,1]])->StreamAgg",
},
{
sql: "select (select count(1) k from t s where s.a = t.a having k != 0) from t",
best: "Apply{TableReader(Table(t))->TableReader(Table(t))->Sel([eq(s.a, test.t.a)])->StreamAgg->Sel([ne(k, 0)])}->Projection",
},
// Test stream agg with multi group by columns.
{
sql: "select sum(to_base64(e)) from t group by e,d,c order by c",
best: "IndexReader(Index(t.c_d_e)[[<nil>,+inf]])->StreamAgg->Projection",
},
{
sql: "select sum(e+1) from t group by e,d,c order by c",
best: "IndexReader(Index(t.c_d_e)[[<nil>,+inf]]->StreamAgg)->StreamAgg->Projection",
},
{
sql: "select sum(to_base64(e)) from t group by e,d,c order by c,e",
best: "IndexReader(Index(t.c_d_e)[[<nil>,+inf]])->StreamAgg->Sort->Projection",
},
{
sql: "select sum(e+1) from t group by e,d,c order by c,e",
best: "IndexReader(Index(t.c_d_e)[[<nil>,+inf]]->StreamAgg)->StreamAgg->Sort->Projection",
},
// Test stream agg + limit or sort
{
sql: "select count(*) from t group by g order by g limit 10",
best: "IndexReader(Index(t.g)[[<nil>,+inf]]->StreamAgg)->StreamAgg->Limit->Projection",
},
{
sql: "select count(*) from t group by g limit 10",
best: "IndexReader(Index(t.g)[[<nil>,+inf]]->StreamAgg)->StreamAgg->Limit",
},
{
sql: "select count(*) from t group by g order by g",
best: "IndexReader(Index(t.g)[[<nil>,+inf]]->StreamAgg)->StreamAgg->Projection",
},
{
sql: "select count(*) from t group by g order by g desc limit 1",
best: "IndexReader(Index(t.g)[[<nil>,+inf]]->StreamAgg)->StreamAgg->Limit->Projection",
},
// Test hash agg + limit or sort
{
sql: "select count(*) from t group by b order by b limit 10",
best: "TableReader(Table(t)->HashAgg)->HashAgg->TopN([test.t.b],0,10)->Projection",
},
{
sql: "select count(*) from t group by b order by b",
best: "TableReader(Table(t)->HashAgg)->HashAgg->Sort->Projection",
},
{
sql: "select count(*) from t group by b limit 10",
best: "TableReader(Table(t)->HashAgg)->HashAgg->Limit",
},
// Test merge join + stream agg
{
sql: "select sum(a.g), sum(b.g) from t a join t b on a.g = b.g group by a.g",
best: "MergeInnerJoin{IndexReader(Index(t.g)[[<nil>,+inf]])->IndexReader(Index(t.g)[[<nil>,+inf]])}(a.g,b.g)->StreamAgg",
},
// Test index join + stream agg
{
sql: "select /*+ tidb_inlj(a,b) */ sum(a.g), sum(b.g) from t a join t b on a.g = b.g and a.g > 60 group by a.g order by a.g limit 1",
best: "IndexJoin{IndexReader(Index(t.g)[(60,+inf]])->IndexReader(Index(t.g)[[<nil>,+inf]]->Sel([gt(b.g, 60)]))}(a.g,b.g)->StreamAgg->Limit->Projection",
},
{
sql: "select sum(a.g), sum(b.g) from t a join t b on a.g = b.g and a.a>5 group by a.g order by a.g limit 1",
best: "IndexJoin{IndexReader(Index(t.g)[[<nil>,+inf]]->Sel([gt(a.a, 5)]))->IndexReader(Index(t.g)[[<nil>,+inf]])}(a.g,b.g)->StreamAgg->Limit->Projection",
},
}
for _, tt := range tests {
comment := Commentf("for %s", tt.sql)
stmt, err := s.ParseOneStmt(tt.sql, "", "")
c.Assert(err, IsNil, comment)
p, err := plan.Optimize(se, stmt, s.is)
c.Assert(err, IsNil)
c.Assert(plan.ToString(p), Equals, tt.best, Commentf("for %s", tt.sql))
}
}
func (s *testPlanSuite) TestRefine(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
se, err := tidb.CreateSession4Test(store)
c.Assert(err, IsNil)
_, err = se.Execute(goctx.Background(), "use test")
c.Assert(err, IsNil)
tests := []struct {
sql string
best string
}{
{
sql: "select a from t where c is not null",
best: "IndexReader(Index(t.c_d_e)[[-inf,+inf]])->Projection",
},
{
sql: "select a from t where c >= 4",
best: "IndexReader(Index(t.c_d_e)[[4,+inf]])->Projection",
},
{
sql: "select a from t where c <= 4",
best: "IndexReader(Index(t.c_d_e)[[-inf,4]])->Projection",
},
{
sql: "select a from t where c = 4 and d = 5 and e = 6",
best: "IndexReader(Index(t.c_d_e)[[4 5 6,4 5 6]])->Projection",
},
{
sql: "select a from t where d = 4 and c = 5",
best: "IndexReader(Index(t.c_d_e)[[5 4,5 4]])->Projection",
},
{
sql: "select a from t where c = 4 and e < 5",
best: "IndexReader(Index(t.c_d_e)[[4,4]]->Sel([lt(test.t.e, 5)]))->Projection",
},
{
sql: "select a from t where c = 4 and d <= 5 and d > 3",
best: "IndexReader(Index(t.c_d_e)[(4 3,4 5]])->Projection",
},
{
sql: "select a from t where d <= 5 and d > 3",
best: "TableReader(Table(t)->Sel([le(test.t.d, 5) gt(test.t.d, 3)]))->Projection",
},
{
sql: "select a from t where c between 1 and 2",
best: "IndexReader(Index(t.c_d_e)[[1,2]])->Projection",
},
{
sql: "select a from t where c not between 1 and 2",
best: "IndexReader(Index(t.c_d_e)[[-inf,1) (2,+inf]])->Projection",
},
{
sql: "select a from t where c <= 5 and c >= 3 and d = 1",
best: "IndexReader(Index(t.c_d_e)[[3,5]]->Sel([eq(test.t.d, 1)]))->Projection",
},
{
sql: "select a from t where c = 1 or c = 2 or c = 3",
best: "IndexReader(Index(t.c_d_e)[[1,1] [2,2] [3,3]])->Projection",
},
{
sql: "select b from t where c = 1 or c = 2 or c = 3 or c = 4 or c = 5",
best: "IndexLookUp(Index(t.c_d_e)[[1,1] [2,2] [3,3] [4,4] [5,5]], Table(t))->Projection",
},
{
sql: "select a from t where c = 5",
best: "IndexReader(Index(t.c_d_e)[[5,5]])->Projection",
},
{
sql: "select a from t where c = 5 and b = 1",
best: "IndexLookUp(Index(t.c_d_e)[[5,5]], Table(t)->Sel([eq(test.t.b, 1)]))->Projection",
},
{
sql: "select a from t where not a",
best: "TableReader(Table(t)->Sel([not(test.t.a)]))",
},
{
sql: "select a from t where c in (1)",
best: "IndexReader(Index(t.c_d_e)[[1,1]])->Projection",
},
{
sql: "select a from t where c in ('1')",
best: "IndexReader(Index(t.c_d_e)[[1,1]])->Projection",
},
{
sql: "select a from t where c = 1.0",
best: "IndexReader(Index(t.c_d_e)[[1,1]])->Projection",
},
{
sql: "select a from t where c in (1) and d > 3",
best: "IndexReader(Index(t.c_d_e)[(1 3,1 +inf]])->Projection",
},
{
sql: "select a from t where c in (1, 2, 3) and (d > 3 and d < 4 or d > 5 and d < 6)",
best: "IndexReader(Index(t.c_d_e)[(1 3,1 4) (1 5,1 6) (2 3,2 4) (2 5,2 6) (3 3,3 4) (3 5,3 6)])->Projection",
},
{
sql: "select a from t where c in (1, 2, 3)",
best: "IndexReader(Index(t.c_d_e)[[1,1] [2,2] [3,3]])->Projection",
},
{
sql: "select a from t where c in (1, 2, 3) and d in (1,2) and e = 1",
best: "IndexReader(Index(t.c_d_e)[[1 1 1,1 1 1] [1 2 1,1 2 1] [2 1 1,2 1 1] [2 2 1,2 2 1] [3 1 1,3 1 1] [3 2 1,3 2 1]])->Projection",
},
{
sql: "select a from t where d in (1, 2, 3)",
best: "TableReader(Table(t)->Sel([in(test.t.d, 1, 2, 3)]))->Projection",
},
// TODO: func in is rewritten to DNF which will influence the extraction behavior of accessCondition.
//{
// sql: "select a from t where c not in (1)",
// best: "Table(t)->Projection",
//},
// test like
{
sql: "select a from t use index(c_d_e) where c != 1",
best: "IndexReader(Index(t.c_d_e)[[-inf,1) (1,+inf]])->Projection",
},
{
sql: "select a from t where c_str like ''",
best: "IndexReader(Index(t.c_d_e_str)[[,]])->Projection",
},
{
sql: "select a from t where c_str like 'abc'",
best: "IndexReader(Index(t.c_d_e_str)[[abc,abc]])->Projection",
},
{
sql: "select a from t where c_str not like 'abc'",
best: "TableReader(Table(t)->Sel([not(like(test.t.c_str, abc, 92))]))->Projection",
},
{
sql: "select a from t where not (c_str like 'abc' or c_str like 'abd')",
best: "TableReader(Table(t)->Sel([and(not(like(test.t.c_str, abc, 92)), not(like(test.t.c_str, abd, 92)))]))->Projection",
},
{
sql: "select a from t where c_str like '_abc'",
best: "TableReader(Table(t)->Sel([like(test.t.c_str, _abc, 92)]))->Projection",
},
{
sql: "select a from t where c_str like 'abc%'",
best: "IndexReader(Index(t.c_d_e_str)[[abc,abd)])->Projection",
},
{
sql: "select a from t where c_str like 'abc_'",
best: "IndexReader(Index(t.c_d_e_str)[(abc,abd)]->Sel([like(test.t.c_str, abc_, 92)]))->Projection",
},
{
sql: "select a from t where c_str like 'abc%af'",
best: "IndexReader(Index(t.c_d_e_str)[[abc,abd)]->Sel([like(test.t.c_str, abc%af, 92)]))->Projection",
},
{
sql: `select a from t where c_str like 'abc\\_' escape ''`,
best: "IndexReader(Index(t.c_d_e_str)[[abc_,abc_]])->Projection",
},
{
sql: `select a from t where c_str like 'abc\\_'`,
best: "IndexReader(Index(t.c_d_e_str)[[abc_,abc_]])->Projection",
},
// {
// sql: `select a from t where c_str like 'abc\\\\_'`,
// best: "IndexReader(Index(t.c_d_e_str)[(abc\\ +inf,abc] <nil>)])->Selection->Projection",
// },
{
sql: `select a from t where c_str like 'abc\\_%'`,
best: "IndexReader(Index(t.c_d_e_str)[[abc_,abc`)])->Projection",
},
{
sql: `select a from t where c_str like 'abc=_%' escape '='`,
best: "IndexReader(Index(t.c_d_e_str)[[abc_,abc`)])->Projection",
},
{
sql: `select a from t where c_str like 'abc\\__'`,
best: "IndexReader(Index(t.c_d_e_str)[(abc_,abc`)]->Sel([like(test.t.c_str, abc\\__, 92)]))->Projection",
},
{
// Check that 123 is converted to string '123'. index can be used.
sql: `select a from t where c_str like 123`,
best: "IndexReader(Index(t.c_d_e_str)[[123,123]])->Projection",
},
// c is type int which will be added cast to specified type when building function signature, no index can be used.
{
sql: `select a from t where c like '1'`,
best: "TableReader(Table(t))->Sel([like(cast(test.t.c), 1, 92)])->Projection",
},
//{
// sql: `select a from t where c = 1.9 and d > 3`,
// best: "Index(t.c_d_e)[]->Projection",
//},
{
sql: `select a from t where c < 1.1`,
best: "IndexReader(Index(t.c_d_e)[[-inf,2)])->Projection",
},
{
sql: `select a from t where c <= 1.9`,
best: "IndexReader(Index(t.c_d_e)[[-inf,1]])->Projection",
},
{
sql: `select a from t where c >= 1.1`,
best: "IndexReader(Index(t.c_d_e)[[2,+inf]])->Projection",
},
{
sql: `select a from t where c > 1.9`,
best: "IndexReader(Index(t.c_d_e)[(1,+inf]])->Projection",
},
{
sql: `select a from t where c = 123456789098765432101234`,
best: "TableReader(Table(t))->Sel([eq(cast(test.t.c), 123456789098765432101234)])->Projection",
},
{
sql: `select a from t where c = 'hanfei'`,
best: "TableReader(Table(t))->Sel([eq(cast(test.t.c), cast(hanfei))])->Projection",
},
}
for _, tt := range tests {
comment := Commentf("for %s", tt.sql)
stmt, err := s.ParseOneStmt(tt.sql, "", "")
c.Assert(err, IsNil, comment)
sc := se.(context.Context).GetSessionVars().StmtCtx
sc.IgnoreTruncate = false
p, err := plan.Optimize(se, stmt, s.is)
c.Assert(err, IsNil)
c.Assert(plan.ToString(p), Equals, tt.best, Commentf("for %s", tt.sql))
}
}
func (s *testPlanSuite) TestAggEliminater(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
se, err := tidb.CreateSession4Test(store)
c.Assert(err, IsNil)
_, err = se.Execute(goctx.Background(), "use test")
c.Assert(err, IsNil)
tests := []struct {
sql string
best string
}{
// Max to Limit + Sort-Desc.
{
sql: "select max(a) from t;",
best: "TableReader(Table(t)->Limit)->Limit->HashAgg",
},
// Min to Limit + Sort.
{
sql: "select min(a) from t;",
best: "TableReader(Table(t)->Limit)->Limit->HashAgg",
},
// Min to Limit + Sort, and isnull() should be added.
{
sql: "select min(c_str) from t;",
best: "IndexReader(Index(t.c_d_e_str)[[-inf,+inf]]->Limit)->Limit->HashAgg",
},
// Do nothing to max + firstrow.
{
sql: "select max(a), b from t;",
best: "TableReader(Table(t)->StreamAgg)->StreamAgg",
},
// If max/min contains scalar function, we can still do transformation.
{
sql: "select max(a+1) from t;",
best: "TableReader(Table(t)->Sel([not(isnull(plus(test.t.a, 1)))])->TopN([plus(test.t.a, 1) true],0,1))->TopN([plus(test.t.a, 1) true],0,1)->HashAgg",
},
// Do nothing to max+min.
{
sql: "select max(a), min(a) from t;",
best: "TableReader(Table(t)->StreamAgg)->StreamAgg",
},
// Do nothing to max with groupby.
{
sql: "select max(a) from t group by b;",
best: "TableReader(Table(t)->HashAgg)->HashAgg",
},
// If inner is not a data source, we can still do transformation.
{
sql: "select max(a) from (select t1.a from t t1 join t t2 on t1.a=t2.a) t",
best: "IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(t1.a,t2.a)->Limit->HashAgg",
},
}
for _, tt := range tests {
comment := Commentf("for %s", tt.sql)
stmt, err := s.ParseOneStmt(tt.sql, "", "")
c.Assert(err, IsNil, comment)
sc := se.(context.Context).GetSessionVars().StmtCtx
sc.IgnoreTruncate = false
p, err := plan.Optimize(se, stmt, s.is)
c.Assert(err, IsNil)
c.Assert(plan.ToString(p), Equals, tt.best, Commentf("for %s", tt.sql))
}
}
| plan/physical_plan_test.go | 0 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.0008557334658689797,
0.00017941241094376892,
0.000163900142069906,
0.00017305105575360358,
0.00006381401180988178
] |
{
"id": 0,
"code_window": [
"\tif ndvFactor > sampleFactor {\n",
"\t\tndvFactor = sampleFactor\n",
"\t}\n",
"\tbucketIdx := 0\n",
"\tvar lastCount int64\n",
"\thg.Buckets[0].LowerBound = samples[0]\n",
"\tfor i := int64(0); i < int64(len(samples)); i++ {\n",
"\t\tcmp, err := hg.Buckets[bucketIdx].UpperBound.CompareDatum(sc, &samples[i])\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, errors.Trace(err)\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\thg.Buckets[0] = Bucket{\n",
"\t\tLowerBound: samples[0],\n",
"\t\tUpperBound: samples[0],\n",
"\t\tCount: int64(sampleFactor),\n",
"\t\tRepeats: int64(ndvFactor),\n",
"\t}\n",
"\tfor i := int64(1); i < int64(len(samples)); i++ {\n"
],
"file_path": "statistics/builder.go",
"type": "replace",
"edit_start_line_idx": 140
} | Copyright (c) 2014 The sortutil Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| _vendor/src/github.com/cznic/sortutil/LICENSE | 0 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.00017427907732781023,
0.0001695406244834885,
0.00016133445024024695,
0.00017300831677857786,
0.000005825779226142913
] |
{
"id": 0,
"code_window": [
"\tif ndvFactor > sampleFactor {\n",
"\t\tndvFactor = sampleFactor\n",
"\t}\n",
"\tbucketIdx := 0\n",
"\tvar lastCount int64\n",
"\thg.Buckets[0].LowerBound = samples[0]\n",
"\tfor i := int64(0); i < int64(len(samples)); i++ {\n",
"\t\tcmp, err := hg.Buckets[bucketIdx].UpperBound.CompareDatum(sc, &samples[i])\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, errors.Trace(err)\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\thg.Buckets[0] = Bucket{\n",
"\t\tLowerBound: samples[0],\n",
"\t\tUpperBound: samples[0],\n",
"\t\tCount: int64(sampleFactor),\n",
"\t\tRepeats: int64(ndvFactor),\n",
"\t}\n",
"\tfor i := int64(1); i < int64(len(samples)); i++ {\n"
],
"file_path": "statistics/builder.go",
"type": "replace",
"edit_start_line_idx": 140
} | // Extensions to the go-check unittest framework.
//
// NOTE: see https://github.com/go-check/check/pull/6 for reasons why these
// checkers live here.
package check
import (
"bytes"
"reflect"
)
// -----------------------------------------------------------------------
// IsTrue / IsFalse checker.
type isBoolValueChecker struct {
*CheckerInfo
expected bool
}
func (checker *isBoolValueChecker) Check(
params []interface{},
names []string) (
result bool,
error string) {
obtained, ok := params[0].(bool)
if !ok {
return false, "Argument to " + checker.Name + " must be bool"
}
return obtained == checker.expected, ""
}
// The IsTrue checker verifies that the obtained value is true.
//
// For example:
//
// c.Assert(value, IsTrue)
//
var IsTrue Checker = &isBoolValueChecker{
&CheckerInfo{Name: "IsTrue", Params: []string{"obtained"}},
true,
}
// The IsFalse checker verifies that the obtained value is false.
//
// For example:
//
// c.Assert(value, IsFalse)
//
var IsFalse Checker = &isBoolValueChecker{
&CheckerInfo{Name: "IsFalse", Params: []string{"obtained"}},
false,
}
// -----------------------------------------------------------------------
// BytesEquals checker.
type bytesEquals struct{}
func (b *bytesEquals) Check(params []interface{}, names []string) (bool, string) {
if len(params) != 2 {
return false, "BytesEqual takes 2 bytestring arguments"
}
b1, ok1 := params[0].([]byte)
b2, ok2 := params[1].([]byte)
if !(ok1 && ok2) {
return false, "Arguments to BytesEqual must both be bytestrings"
}
return bytes.Equal(b1, b2), ""
}
func (b *bytesEquals) Info() *CheckerInfo {
return &CheckerInfo{
Name: "BytesEquals",
Params: []string{"bytes_one", "bytes_two"},
}
}
// BytesEquals checker compares two bytes sequence using bytes.Equal.
//
// For example:
//
// c.Assert(b, BytesEquals, []byte("bar"))
//
// Main difference between DeepEquals and BytesEquals is that BytesEquals treats
// `nil` as empty byte sequence while DeepEquals doesn't.
//
// c.Assert(nil, BytesEquals, []byte("")) // succeeds
// c.Assert(nil, DeepEquals, []byte("")) // fails
var BytesEquals = &bytesEquals{}
// -----------------------------------------------------------------------
// HasKey checker.
type hasKey struct{}
func (h *hasKey) Check(params []interface{}, names []string) (bool, string) {
if len(params) != 2 {
return false, "HasKey takes 2 arguments: a map and a key"
}
mapValue := reflect.ValueOf(params[0])
if mapValue.Kind() != reflect.Map {
return false, "First argument to HasKey must be a map"
}
keyValue := reflect.ValueOf(params[1])
if !keyValue.Type().AssignableTo(mapValue.Type().Key()) {
return false, "Second argument must be assignable to the map key type"
}
return mapValue.MapIndex(keyValue).IsValid(), ""
}
func (h *hasKey) Info() *CheckerInfo {
return &CheckerInfo{
Name: "HasKey",
Params: []string{"obtained", "key"},
}
}
// The HasKey checker verifies that the obtained map contains the given key.
//
// For example:
//
// c.Assert(myMap, HasKey, "foo")
//
var HasKey = &hasKey{}
| _vendor/src/github.com/pingcap/check/checkers2.go | 0 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.0001741006999509409,
0.00017047856817953289,
0.00016161419625859708,
0.000171768493601121,
0.000003702160029206425
] |
{
"id": 1,
"code_window": [
"\t\"github.com/pingcap/tidb/model\"\n",
"\t\"github.com/pingcap/tidb/mysql\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/stmtctx\"\n",
"\t\"github.com/pingcap/tidb/types\"\n",
"\t\"github.com/pingcap/tidb/util/chunk\"\n",
"\t\"github.com/pingcap/tidb/util/codec\"\n",
"\t\"github.com/pingcap/tidb/util/mock\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/types/json\"\n"
],
"file_path": "statistics/statistics_test.go",
"type": "add",
"edit_start_line_idx": 28
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"bytes"
"math"
"testing"
"github.com/juju/errors"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/ranger"
goctx "golang.org/x/net/context"
)
func TestT(t *testing.T) {
TestingT(t)
}
var _ = Suite(&testStatisticsSuite{})
type testStatisticsSuite struct {
count int64
samples []types.Datum
rc ast.RecordSet
pk ast.RecordSet
}
type dataTable struct {
count int64
samples []types.Datum
}
type recordSet struct {
data []types.Datum
count int64
cursor int64
fields []*ast.ResultField
}
func (r *recordSet) Fields() []*ast.ResultField {
return r.fields
}
func (r *recordSet) setFields(tps ...uint8) {
r.fields = make([]*ast.ResultField, len(tps))
for i := 0; i < len(tps); i++ {
rf := new(ast.ResultField)
rf.Column = new(model.ColumnInfo)
rf.Column.FieldType = *types.NewFieldType(tps[i])
r.fields[i] = rf
}
}
func (r *recordSet) Next(goctx.Context) (types.Row, error) {
if r.cursor == r.count {
return nil, nil
}
r.cursor++
return types.DatumRow{r.data[r.cursor-1]}, nil
}
func (r *recordSet) NextChunk(goCtx goctx.Context, chk *chunk.Chunk) error {
return nil
}
func (r *recordSet) NewChunk() *chunk.Chunk {
return nil
}
func (r *recordSet) SupportChunk() bool {
return false
}
func (r *recordSet) Close() error {
r.cursor = 0
return nil
}
func (s *testStatisticsSuite) SetUpSuite(c *C) {
s.count = 100000
samples := make([]types.Datum, 10000)
start := 1000
samples[0].SetInt64(0)
for i := 1; i < start; i++ {
samples[i].SetInt64(2)
}
for i := start; i < len(samples); i++ {
samples[i].SetInt64(int64(i))
}
for i := start; i < len(samples); i += 3 {
samples[i].SetInt64(samples[i].GetInt64() + 1)
}
for i := start; i < len(samples); i += 5 {
samples[i].SetInt64(samples[i].GetInt64() + 2)
}
sc := new(stmtctx.StatementContext)
err := types.SortDatums(sc, samples)
c.Check(err, IsNil)
s.samples = samples
rc := &recordSet{
data: make([]types.Datum, s.count),
count: s.count,
cursor: 0,
}
rc.setFields(mysql.TypeLonglong)
rc.data[0].SetInt64(0)
for i := 1; i < start; i++ {
rc.data[i].SetInt64(2)
}
for i := int64(start); i < rc.count; i++ {
rc.data[i].SetInt64(int64(i))
}
for i := int64(start); i < rc.count; i += 3 {
rc.data[i].SetInt64(rc.data[i].GetInt64() + 1)
}
for i := int64(start); i < rc.count; i += 5 {
rc.data[i].SetInt64(rc.data[i].GetInt64() + 2)
}
err = types.SortDatums(sc, rc.data)
c.Check(err, IsNil)
s.rc = rc
pk := &recordSet{
data: make([]types.Datum, s.count),
count: s.count,
cursor: 0,
}
pk.setFields(mysql.TypeLonglong)
for i := int64(0); i < rc.count; i++ {
pk.data[i].SetInt64(int64(i))
}
s.pk = pk
}
func encodeKey(key types.Datum) types.Datum {
bytes, _ := codec.EncodeKey(nil, key)
return types.NewBytesDatum(bytes)
}
func buildPK(ctx context.Context, numBuckets, id int64, records ast.RecordSet) (int64, *Histogram, error) {
b := NewSortedBuilder(ctx.GetSessionVars().StmtCtx, numBuckets, id)
goCtx := goctx.Background()
for {
row, err := records.Next(goCtx)
if err != nil {
return 0, nil, errors.Trace(err)
}
if row == nil {
break
}
datums := ast.RowToDatums(row, records.Fields())
err = b.Iterate(datums[0])
if err != nil {
return 0, nil, errors.Trace(err)
}
}
return b.Count, b.hist, nil
}
func buildIndex(ctx context.Context, numBuckets, id int64, records ast.RecordSet) (int64, *Histogram, *CMSketch, error) {
b := NewSortedBuilder(ctx.GetSessionVars().StmtCtx, numBuckets, id)
cms := NewCMSketch(8, 2048)
goCtx := goctx.Background()
for {
row, err := records.Next(goCtx)
if err != nil {
return 0, nil, nil, errors.Trace(err)
}
if row == nil {
break
}
datums := ast.RowToDatums(row, records.Fields())
bytes, err := codec.EncodeKey(nil, datums...)
if err != nil {
return 0, nil, nil, errors.Trace(err)
}
data := types.NewBytesDatum(bytes)
err = b.Iterate(data)
if err != nil {
return 0, nil, nil, errors.Trace(err)
}
cms.InsertBytes(bytes)
}
return b.Count, b.Hist(), cms, nil
}
func calculateScalar(hist *Histogram) {
for i, bkt := range hist.Buckets {
bkt.lowerScalar, bkt.upperScalar, bkt.commonPfxLen = preCalculateDatumScalar(&bkt.LowerBound, &bkt.UpperBound)
hist.Buckets[i] = bkt
}
}
func checkRepeats(c *C, hg *Histogram) {
for _, bkt := range hg.Buckets {
c.Assert(bkt.Repeats, Greater, int64(0))
}
}
func (s *testStatisticsSuite) TestBuild(c *C) {
bucketCount := int64(256)
sketch, _, _ := buildFMSketch(s.rc.(*recordSet).data, 1000)
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
collector := &SampleCollector{
Count: s.count,
NullCount: 0,
Samples: s.samples,
FMSketch: sketch,
}
col, err := BuildColumn(ctx, bucketCount, 2, collector)
checkRepeats(c, col)
calculateScalar(col)
c.Check(err, IsNil)
c.Check(len(col.Buckets), Equals, 232)
count, err := col.equalRowCount(sc, types.NewIntDatum(1000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 0)
count, err = col.lessRowCount(sc, types.NewIntDatum(1000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 10000)
count, err = col.lessRowCount(sc, types.NewIntDatum(2000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 19995)
count, err = col.greaterRowCount(sc, types.NewIntDatum(2000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 80003)
count, err = col.lessRowCount(sc, types.NewIntDatum(200000000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 100000)
count, err = col.greaterRowCount(sc, types.NewIntDatum(200000000))
c.Check(err, IsNil)
c.Check(count, Equals, 0.0)
count, err = col.equalRowCount(sc, types.NewIntDatum(200000000))
c.Check(err, IsNil)
c.Check(count, Equals, 0.0)
count, err = col.betweenRowCount(sc, types.NewIntDatum(3000), types.NewIntDatum(3500))
c.Check(err, IsNil)
c.Check(int(count), Equals, 5008)
count, err = col.lessRowCount(sc, types.NewIntDatum(1))
c.Check(err, IsNil)
c.Check(int(count), Equals, 9)
builder := SampleBuilder{
Sc: mock.NewContext().GetSessionVars().StmtCtx,
RecordSet: s.pk,
ColLen: 1,
PkID: -1,
MaxSampleSize: 1000,
MaxFMSketchSize: 1000,
}
s.pk.Close()
collectors, _, err := builder.CollectColumnStats()
c.Assert(err, IsNil)
c.Assert(len(collectors), Equals, 1)
col, err = BuildColumn(mock.NewContext(), 256, 2, collectors[0])
c.Assert(err, IsNil)
checkRepeats(c, col)
tblCount, col, _, err := buildIndex(ctx, bucketCount, 1, ast.RecordSet(s.rc))
checkRepeats(c, col)
calculateScalar(col)
c.Check(err, IsNil)
c.Check(int(tblCount), Equals, 100000)
count, err = col.equalRowCount(sc, encodeKey(types.NewIntDatum(10000)))
c.Check(err, IsNil)
c.Check(int(count), Equals, 1)
count, err = col.lessRowCount(sc, encodeKey(types.NewIntDatum(20000)))
c.Check(err, IsNil)
c.Check(int(count), Equals, 19999)
count, err = col.betweenRowCount(sc, encodeKey(types.NewIntDatum(30000)), encodeKey(types.NewIntDatum(35000)))
c.Check(err, IsNil)
c.Check(int(count), Equals, 4999)
count, err = col.lessRowCount(sc, encodeKey(types.NewIntDatum(0)))
c.Check(err, IsNil)
c.Check(int(count), Equals, 0)
s.pk.(*recordSet).cursor = 0
tblCount, col, err = buildPK(ctx, bucketCount, 4, ast.RecordSet(s.pk))
checkRepeats(c, col)
calculateScalar(col)
c.Check(err, IsNil)
c.Check(int(tblCount), Equals, 100000)
count, err = col.equalRowCount(sc, types.NewIntDatum(10000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 1)
count, err = col.lessRowCount(sc, types.NewIntDatum(20000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 20000)
count, err = col.betweenRowCount(sc, types.NewIntDatum(30000), types.NewIntDatum(35000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 5000)
count, err = col.greaterAndEqRowCount(sc, types.NewIntDatum(1001))
c.Check(err, IsNil)
c.Check(int(count), Equals, 98999)
count, err = col.lessAndEqRowCount(sc, types.NewIntDatum(99999))
c.Check(err, IsNil)
c.Check(int(count), Equals, 100000)
count, err = col.lessAndEqRowCount(sc, types.Datum{})
c.Check(err, IsNil)
c.Check(int(count), Equals, 0)
count, err = col.greaterRowCount(sc, types.NewIntDatum(1001))
c.Check(err, IsNil)
c.Check(int(count), Equals, 98998)
count, err = col.lessRowCount(sc, types.NewIntDatum(99999))
c.Check(err, IsNil)
c.Check(int(count), Equals, 99999)
}
func (s *testStatisticsSuite) TestHistogramProtoConversion(c *C) {
ctx := mock.NewContext()
s.rc.Close()
tblCount, col, _, err := buildIndex(ctx, 256, 1, ast.RecordSet(s.rc))
c.Check(err, IsNil)
c.Check(int(tblCount), Equals, 100000)
p := HistogramToProto(col)
h := HistogramFromProto(p)
c.Assert(col.NDV, Equals, h.NDV)
c.Assert(len(col.Buckets), Equals, len(h.Buckets))
for i, bkt := range col.Buckets {
c.Assert(bkt.Count, Equals, h.Buckets[i].Count)
c.Assert(bkt.Repeats, Equals, h.Buckets[i].Repeats)
c.Assert(bytes.Equal(bkt.LowerBound.GetBytes(), h.Buckets[i].LowerBound.GetBytes()), IsTrue)
c.Assert(bytes.Equal(bkt.UpperBound.GetBytes(), h.Buckets[i].UpperBound.GetBytes()), IsTrue)
}
}
func mockHistogram(lower, num int64) *Histogram {
h := &Histogram{
NDV: num,
}
for i := int64(0); i < num; i++ {
bkt := Bucket{
LowerBound: types.NewIntDatum(lower + i),
UpperBound: types.NewIntDatum(lower + i),
Count: i + 1,
Repeats: 1,
}
h.Buckets = append(h.Buckets, bkt)
}
return h
}
func (s *testStatisticsSuite) TestMergeHistogram(c *C) {
tests := []struct {
leftLower int64
leftNum int64
rightLower int64
rightNum int64
bucketNum int
ndv int64
}{
{
leftLower: 0,
leftNum: 0,
rightLower: 0,
rightNum: 1,
bucketNum: 1,
ndv: 1,
},
{
leftLower: 0,
leftNum: 200,
rightLower: 200,
rightNum: 200,
bucketNum: 200,
ndv: 400,
},
{
leftLower: 0,
leftNum: 200,
rightLower: 199,
rightNum: 200,
bucketNum: 200,
ndv: 399,
},
}
sc := mock.NewContext().GetSessionVars().StmtCtx
bucketCount := 256
for _, t := range tests {
lh := mockHistogram(t.leftLower, t.leftNum)
rh := mockHistogram(t.rightLower, t.rightNum)
h, err := MergeHistograms(sc, lh, rh, bucketCount)
c.Assert(err, IsNil)
c.Assert(h.NDV, Equals, t.ndv)
c.Assert(len(h.Buckets), Equals, t.bucketNum)
c.Assert(h.Buckets[len(h.Buckets)-1].Count, Equals, t.leftNum+t.rightNum)
expectLower := types.NewIntDatum(t.leftLower)
cmp, err := h.Buckets[0].LowerBound.CompareDatum(sc, &expectLower)
c.Assert(err, IsNil)
c.Assert(cmp, Equals, 0)
expectUpper := types.NewIntDatum(t.rightLower + t.rightNum - 1)
cmp, err = h.Buckets[len(h.Buckets)-1].UpperBound.CompareDatum(sc, &expectUpper)
c.Assert(err, IsNil)
c.Assert(cmp, Equals, 0)
}
}
func (s *testStatisticsSuite) TestPseudoTable(c *C) {
ti := &model.TableInfo{}
colInfo := &model.ColumnInfo{
ID: 1,
FieldType: *types.NewFieldType(mysql.TypeLonglong),
}
ti.Columns = append(ti.Columns, colInfo)
tbl := PseudoTable(ti.ID)
c.Assert(tbl.Count, Greater, int64(0))
sc := new(stmtctx.StatementContext)
count, err := tbl.ColumnLessRowCount(sc, types.NewIntDatum(100), colInfo.ID)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 3333)
count, err = tbl.ColumnEqualRowCount(sc, types.NewIntDatum(1000), colInfo.ID)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 10)
count, err = tbl.ColumnBetweenRowCount(sc, types.NewIntDatum(1000), types.NewIntDatum(5000), colInfo.ID)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 250)
}
func buildCMSketch(values []types.Datum) *CMSketch {
cms := NewCMSketch(8, 2048)
for _, val := range values {
cms.insert(&val)
}
return cms
}
func (s *testStatisticsSuite) TestColumnRange(c *C) {
bucketCount := int64(256)
sketch, _, _ := buildFMSketch(s.rc.(*recordSet).data, 1000)
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
collector := &SampleCollector{
Count: s.count,
NullCount: 0,
Samples: s.samples,
FMSketch: sketch,
}
hg, err := BuildColumn(ctx, bucketCount, 2, collector)
calculateScalar(hg)
c.Check(err, IsNil)
col := &Column{Histogram: *hg, CMSketch: buildCMSketch(s.rc.(*recordSet).data)}
tbl := &Table{
Count: int64(col.totalRowCount()),
Columns: make(map[int64]*Column),
}
ran := []*ranger.NewRange{{
LowVal: []types.Datum{{}},
HighVal: []types.Datum{types.MaxValueDatum()},
}}
count, err := tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal[0] = types.MinNotNullDatum()
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 99900)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].LowExclude = true
ran[0].HighVal[0] = types.NewIntDatum(2000)
ran[0].HighExclude = true
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 2500)
ran[0].LowExclude = false
ran[0].HighExclude = false
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 2500)
ran[0].LowVal[0] = ran[0].HighVal[0]
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100)
tbl.Columns[0] = col
ran[0].LowVal[0] = types.Datum{}
ran[0].HighVal[0] = types.MaxValueDatum()
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].LowExclude = true
ran[0].HighVal[0] = types.NewIntDatum(2000)
ran[0].HighExclude = true
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 9994)
ran[0].LowExclude = false
ran[0].HighExclude = false
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 9996)
ran[0].LowVal[0] = ran[0].HighVal[0]
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1)
}
func (s *testStatisticsSuite) TestIntColumnRanges(c *C) {
bucketCount := int64(256)
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
s.pk.(*recordSet).cursor = 0
rowCount, hg, err := buildPK(ctx, bucketCount, 0, s.pk)
calculateScalar(hg)
c.Check(err, IsNil)
c.Check(rowCount, Equals, int64(100000))
col := &Column{Histogram: *hg}
tbl := &Table{
Count: int64(col.totalRowCount()),
Columns: make(map[int64]*Column),
}
ran := []ranger.IntColumnRange{{
LowVal: math.MinInt64,
HighVal: math.MaxInt64,
}}
count, err := tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal = 1000
ran[0].HighVal = 2000
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1000)
ran[0].LowVal = 1001
ran[0].HighVal = 1999
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 998)
ran[0].LowVal = 1000
ran[0].HighVal = 1000
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100)
tbl.Columns[0] = col
ran[0].LowVal = math.MinInt64
ran[0].HighVal = math.MaxInt64
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal = 1000
ran[0].HighVal = 2000
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1000)
ran[0].LowVal = 1001
ran[0].HighVal = 1999
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 998)
ran[0].LowVal = 1000
ran[0].HighVal = 1000
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1)
}
func (s *testStatisticsSuite) TestIndexRanges(c *C) {
bucketCount := int64(256)
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
s.rc.(*recordSet).cursor = 0
rowCount, hg, cms, err := buildIndex(ctx, bucketCount, 0, s.rc)
calculateScalar(hg)
c.Check(err, IsNil)
c.Check(rowCount, Equals, int64(100000))
idxInfo := &model.IndexInfo{Columns: []*model.IndexColumn{{Offset: 0}}}
idx := &Index{Histogram: *hg, CMSketch: cms, Info: idxInfo}
tbl := &Table{
Count: int64(idx.totalRowCount()),
Indices: make(map[int64]*Index),
}
ran := []*ranger.NewRange{{
LowVal: []types.Datum{types.MinNotNullDatum()},
HighVal: []types.Datum{types.MaxValueDatum()},
}}
count, err := tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 99900)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(2000)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 2500)
ran[0].LowVal[0] = types.NewIntDatum(1001)
ran[0].HighVal[0] = types.NewIntDatum(1999)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 2500)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(1000)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100)
tbl.Indices[0] = idx
ran[0].LowVal[0] = types.MinNotNullDatum()
ran[0].HighVal[0] = types.MaxValueDatum()
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(2000)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1000)
ran[0].LowVal[0] = types.NewIntDatum(1001)
ran[0].HighVal[0] = types.NewIntDatum(1990)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 989)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(1000)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 0)
}
| statistics/statistics_test.go | 1 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.07896126806735992,
0.001579968724399805,
0.00016344895993825048,
0.0001721120934234932,
0.009710323996841908
] |
{
"id": 1,
"code_window": [
"\t\"github.com/pingcap/tidb/model\"\n",
"\t\"github.com/pingcap/tidb/mysql\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/stmtctx\"\n",
"\t\"github.com/pingcap/tidb/types\"\n",
"\t\"github.com/pingcap/tidb/util/chunk\"\n",
"\t\"github.com/pingcap/tidb/util/codec\"\n",
"\t\"github.com/pingcap/tidb/util/mock\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/types/json\"\n"
],
"file_path": "statistics/statistics_test.go",
"type": "add",
"edit_start_line_idx": 28
} | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package jaeger
import (
"github.com/opentracing/opentracing-go"
)
// TODO this file should not be needed after TChannel PR.
type formatKey int
// SpanContextFormat is a constant used as OpenTracing Format.
// Requires *SpanContext as carrier.
// This format is intended for interop with TChannel or other Zipkin-like tracers.
const SpanContextFormat formatKey = iota
type jaegerTraceContextPropagator struct {
tracer *Tracer
}
func (p *jaegerTraceContextPropagator) Inject(
ctx SpanContext,
abstractCarrier interface{},
) error {
carrier, ok := abstractCarrier.(*SpanContext)
if !ok {
return opentracing.ErrInvalidCarrier
}
carrier.CopyFrom(&ctx)
return nil
}
func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
carrier, ok := abstractCarrier.(*SpanContext)
if !ok {
return emptyContext, opentracing.ErrInvalidCarrier
}
ctx := new(SpanContext)
ctx.CopyFrom(carrier)
return *ctx, nil
}
| _vendor/src/github.com/uber/jaeger-client-go/interop.go | 0 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.0002763659867923707,
0.0002046412118943408,
0.00016584368131589144,
0.00017328964895568788,
0.00004216310480842367
] |
{
"id": 1,
"code_window": [
"\t\"github.com/pingcap/tidb/model\"\n",
"\t\"github.com/pingcap/tidb/mysql\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/stmtctx\"\n",
"\t\"github.com/pingcap/tidb/types\"\n",
"\t\"github.com/pingcap/tidb/util/chunk\"\n",
"\t\"github.com/pingcap/tidb/util/codec\"\n",
"\t\"github.com/pingcap/tidb/util/mock\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/types/json\"\n"
],
"file_path": "statistics/statistics_test.go",
"type": "add",
"edit_start_line_idx": 28
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package mvmap
import (
"bytes"
)
type entry struct {
addr dataAddr
keyLen uint32
valLen uint32
next entryAddr
}
type entryStore struct {
slices [][]entry
sliceIdx uint32
sliceLen uint32
}
type dataStore struct {
slices [][]byte
sliceIdx uint32
sliceLen uint32
}
type entryAddr struct {
sliceIdx uint32
offset uint32
}
type dataAddr struct {
sliceIdx uint32
offset uint32
}
const (
maxDataSliceLen = 64 * 1024
maxEntrySliceLen = 8 * 1024
)
func (ds *dataStore) put(key, value []byte) dataAddr {
dataLen := uint32(len(key) + len(value))
if ds.sliceLen != 0 && ds.sliceLen+dataLen > maxDataSliceLen {
ds.slices = append(ds.slices, make([]byte, 0, max(maxDataSliceLen, int(dataLen))))
ds.sliceLen = 0
ds.sliceIdx++
}
addr := dataAddr{sliceIdx: ds.sliceIdx, offset: ds.sliceLen}
slice := ds.slices[ds.sliceIdx]
slice = append(slice, key...)
slice = append(slice, value...)
ds.slices[ds.sliceIdx] = slice
ds.sliceLen += dataLen
return addr
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func (ds *dataStore) get(e entry, key []byte) []byte {
slice := ds.slices[e.addr.sliceIdx]
valOffset := e.addr.offset + e.keyLen
if bytes.Compare(key, slice[e.addr.offset:valOffset]) != 0 {
return nil
}
return slice[valOffset : valOffset+e.valLen]
}
func (ds *dataStore) getEntryData(e entry) (key, value []byte) {
slice := ds.slices[e.addr.sliceIdx]
keyOffset := e.addr.offset
key = slice[keyOffset : keyOffset+e.keyLen]
valOffset := e.addr.offset + e.keyLen
value = slice[valOffset : valOffset+e.valLen]
return
}
var nullEntryAddr = entryAddr{}
func (es *entryStore) put(e entry) entryAddr {
if es.sliceLen == maxEntrySliceLen {
es.slices = append(es.slices, make([]entry, 0, maxEntrySliceLen))
es.sliceLen = 0
es.sliceIdx++
}
addr := entryAddr{sliceIdx: es.sliceIdx, offset: es.sliceLen}
slice := es.slices[es.sliceIdx]
slice = append(slice, e)
es.slices[es.sliceIdx] = slice
es.sliceLen++
return addr
}
func (es *entryStore) get(addr entryAddr) entry {
return es.slices[addr.sliceIdx][addr.offset]
}
// MVMap stores multiple value for a given key with minimum GC overhead.
// A given key can store multiple values.
// It is not thread-safe, should only be used in one goroutine.
type MVMap struct {
entryStore entryStore
dataStore dataStore
hashTable map[uint64]entryAddr
length int
}
// NewMVMap creates a new multi-value map.
func NewMVMap() *MVMap {
m := new(MVMap)
m.hashTable = make(map[uint64]entryAddr)
m.entryStore.slices = [][]entry{make([]entry, 0, 64)}
// Append the first empty entry, so the zero entryAddr can represent null.
m.entryStore.put(entry{})
m.dataStore.slices = [][]byte{make([]byte, 0, 1024)}
return m
}
// Put puts the key/value pairs to the MVMap, if the key already exists, old value will not be overwritten,
// values are stored in a list.
func (m *MVMap) Put(key, value []byte) {
hashKey := fnvHash64(key)
oldEntryAddr := m.hashTable[hashKey]
dataAddr := m.dataStore.put(key, value)
e := entry{
addr: dataAddr,
keyLen: uint32(len(key)),
valLen: uint32(len(value)),
next: oldEntryAddr,
}
newEntryAddr := m.entryStore.put(e)
m.hashTable[hashKey] = newEntryAddr
m.length++
}
// Get gets the values of the key.
func (m *MVMap) Get(key []byte) [][]byte {
var values [][]byte
hashKey := fnvHash64(key)
entryAddr := m.hashTable[hashKey]
for entryAddr != nullEntryAddr {
e := m.entryStore.get(entryAddr)
entryAddr = e.next
val := m.dataStore.get(e, key)
if val == nil {
continue
}
values = append(values, val)
}
// Keep the order of input.
for i := 0; i < len(values)/2; i++ {
j := len(values) - 1 - i
values[i], values[j] = values[j], values[i]
}
return values
}
// Len returns the number of values in th mv map, the number of keys may be less than Len
// if the same key is put more than once.
func (m *MVMap) Len() int {
return m.length
}
// Iterator is used to iterate the MVMap.
type Iterator struct {
m *MVMap
sliceCur int
entryCur int
}
// Next returns the next key/value pair of the MVMap.
// It returns (nil, nil) when there is no more entries to iterate.
func (i *Iterator) Next() (key, value []byte) {
for {
if i.sliceCur >= len(i.m.entryStore.slices) {
return nil, nil
}
entrySlice := i.m.entryStore.slices[i.sliceCur]
if i.entryCur >= len(entrySlice) {
i.sliceCur++
i.entryCur = 0
continue
}
entry := entrySlice[i.entryCur]
key, value = i.m.dataStore.getEntryData(entry)
i.entryCur++
return
}
}
// NewIterator creates a iterator for the MVMap.
func (m *MVMap) NewIterator() *Iterator {
// The first entry is empty, so init entryCur to 1.
return &Iterator{m: m, entryCur: 1}
}
| util/mvmap/mvmap.go | 0 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.00018853895016945899,
0.0001693194208201021,
0.00016279725241474807,
0.00016765977488830686,
0.000006322248282231158
] |
{
"id": 1,
"code_window": [
"\t\"github.com/pingcap/tidb/model\"\n",
"\t\"github.com/pingcap/tidb/mysql\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/stmtctx\"\n",
"\t\"github.com/pingcap/tidb/types\"\n",
"\t\"github.com/pingcap/tidb/util/chunk\"\n",
"\t\"github.com/pingcap/tidb/util/codec\"\n",
"\t\"github.com/pingcap/tidb/util/mock\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/types/json\"\n"
],
"file_path": "statistics/statistics_test.go",
"type": "add",
"edit_start_line_idx": 28
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bufio"
"bytes"
"encoding/binary"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/mysql"
)
type ConnTestSuite struct{}
var _ = Suite(ConnTestSuite{})
func (ts ConnTestSuite) TestMalformHandshakeHeader(c *C) {
c.Parallel()
data := []byte{0x00}
var p handshakeResponse41
_, err := parseHandshakeResponseHeader(&p, data)
c.Assert(err, NotNil)
}
func (ts ConnTestSuite) TestParseHandshakeResponse(c *C) {
c.Parallel()
// test data from http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse41
data := []byte{
0x85, 0xa2, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x40, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x72, 0x6f, 0x6f, 0x74, 0x00, 0x14, 0x22, 0x50, 0x79, 0xa2, 0x12, 0xd4,
0xe8, 0x82, 0xe5, 0xb3, 0xf4, 0x1a, 0x97, 0x75, 0x6b, 0xc8, 0xbe, 0xdb, 0x9f, 0x80, 0x6d, 0x79,
0x73, 0x71, 0x6c, 0x5f, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77,
0x6f, 0x72, 0x64, 0x00, 0x61, 0x03, 0x5f, 0x6f, 0x73, 0x09, 0x64, 0x65, 0x62, 0x69, 0x61, 0x6e,
0x36, 0x2e, 0x30, 0x0c, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
0x08, 0x6c, 0x69, 0x62, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x04, 0x5f, 0x70, 0x69, 0x64, 0x05, 0x32,
0x32, 0x33, 0x34, 0x34, 0x0f, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72,
0x73, 0x69, 0x6f, 0x6e, 0x08, 0x35, 0x2e, 0x36, 0x2e, 0x36, 0x2d, 0x6d, 0x39, 0x09, 0x5f, 0x70,
0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x06, 0x78, 0x38, 0x36, 0x5f, 0x36, 0x34, 0x03, 0x66,
0x6f, 0x6f, 0x03, 0x62, 0x61, 0x72,
}
var p handshakeResponse41
offset, err := parseHandshakeResponseHeader(&p, data)
c.Assert(err, IsNil)
c.Assert(p.Capability&mysql.ClientConnectAtts, Equals, mysql.ClientConnectAtts)
err = parseHandshakeResponseBody(&p, data, offset)
c.Assert(err, IsNil)
eq := mapIdentical(p.Attrs, map[string]string{
"_client_version": "5.6.6-m9",
"_platform": "x86_64",
"foo": "bar",
"_os": "debian6.0",
"_client_name": "libmysql",
"_pid": "22344"})
c.Assert(eq, IsTrue)
data = []byte{
0x8d, 0xa6, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x6d, 0x00, 0x14, 0xab, 0x09, 0xee, 0xf6, 0xbc, 0xb1, 0x32,
0x3e, 0x61, 0x14, 0x38, 0x65, 0xc0, 0x99, 0x1d, 0x95, 0x7d, 0x75, 0xd4, 0x47, 0x74, 0x65, 0x73,
0x74, 0x00, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x5f, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70,
0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x00,
}
p = handshakeResponse41{}
offset, err = parseHandshakeResponseHeader(&p, data)
c.Assert(err, IsNil)
capability := mysql.ClientProtocol41 |
mysql.ClientPluginAuth |
mysql.ClientSecureConnection |
mysql.ClientConnectWithDB
c.Assert(p.Capability&capability, Equals, capability)
err = parseHandshakeResponseBody(&p, data, offset)
c.Assert(err, IsNil)
c.Assert(p.User, Equals, "pam")
c.Assert(p.DBName, Equals, "test")
}
func (ts ConnTestSuite) TestIssue1768(c *C) {
c.Parallel()
// this data is from captured handshake packet, using mysql client.
// TiDB should handle authorization correctly, even mysql client set
// the ClientPluginAuthLenencClientData capability.
data := []byte{
0x85, 0xa6, 0xff, 0x01, 0x00, 0x00, 0x00, 0x01, 0x21, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x73, 0x74, 0x00, 0x14, 0xe9, 0x7a, 0x2b, 0xec, 0x4a, 0xa8,
0xea, 0x67, 0x8a, 0xc2, 0x46, 0x4d, 0x32, 0xa4, 0xda, 0x39, 0x77, 0xe5, 0x61, 0x1a, 0x65, 0x03,
0x5f, 0x6f, 0x73, 0x05, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x0c, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x08, 0x6c, 0x69, 0x62, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x04,
0x5f, 0x70, 0x69, 0x64, 0x04, 0x39, 0x30, 0x33, 0x30, 0x0f, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x06, 0x35, 0x2e, 0x37, 0x2e, 0x31, 0x34,
0x09, 0x5f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x06, 0x78, 0x38, 0x36, 0x5f, 0x36,
0x34, 0x0c, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x05, 0x6d,
0x79, 0x73, 0x71, 0x6c,
}
p := handshakeResponse41{}
offset, err := parseHandshakeResponseHeader(&p, data)
c.Assert(err, IsNil)
c.Assert(p.Capability&mysql.ClientPluginAuthLenencClientData, Equals, mysql.ClientPluginAuthLenencClientData)
err = parseHandshakeResponseBody(&p, data, offset)
c.Assert(err, IsNil)
c.Assert(len(p.Auth) > 0, IsTrue)
}
func (ts ConnTestSuite) TestInitialHandshake(c *C) {
c.Parallel()
var outBuffer bytes.Buffer
cc := &clientConn{
connectionID: 1,
salt: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14},
server: &Server{
capability: defaultCapability,
},
pkt: &packetIO{
bufWriter: bufio.NewWriter(&outBuffer),
},
}
err := cc.writeInitialHandshake()
c.Assert(err, IsNil)
expected := new(bytes.Buffer)
expected.WriteByte(0x0a) // Protocol
expected.WriteString(mysql.ServerVersion) // Version
expected.WriteByte(0x00) // NULL
binary.Write(expected, binary.LittleEndian, uint32(1)) // Connection ID
expected.Write([]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00}) // Salt
binary.Write(expected, binary.LittleEndian, uint16(defaultCapability&0xFFFF)) // Server Capability
expected.WriteByte(uint8(mysql.DefaultCollationID)) // Server Language
binary.Write(expected, binary.LittleEndian, mysql.ServerStatusAutocommit) // Server Status
binary.Write(expected, binary.LittleEndian, uint16((defaultCapability>>16)&0xFFFF)) // Extended Server Capability
expected.WriteByte(0x15) // Authentication Plugin Length
expected.Write([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}) // Unused
expected.Write([]byte{0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x00}) // Salt
expected.WriteString("mysql_native_password") // Authentication Plugin
expected.WriteByte(0x00) // NULL
c.Assert(outBuffer.Bytes()[4:], DeepEquals, expected.Bytes())
}
func mapIdentical(m1, m2 map[string]string) bool {
return mapBelong(m1, m2) && mapBelong(m2, m1)
}
func mapBelong(m1, m2 map[string]string) bool {
for k1, v1 := range m1 {
v2, ok := m2[k1]
if !ok && v1 != v2 {
return false
}
}
return true
}
| server/conn_test.go | 0 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.009747028350830078,
0.0008931922493502498,
0.00016557522758375853,
0.0001742142776492983,
0.0022505803499370813
] |
{
"id": 2,
"code_window": [
"\tsamples []types.Datum\n",
"\trc ast.RecordSet\n",
"\tpk ast.RecordSet\n",
"}\n",
"\n",
"type dataTable struct {\n",
"\tcount int64\n",
"\tsamples []types.Datum\n",
"}\n",
"\n",
"type recordSet struct {\n",
"\tdata []types.Datum\n",
"\tcount int64\n",
"\tcursor int64\n",
"\tfields []*ast.ResultField\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "statistics/statistics_test.go",
"type": "replace",
"edit_start_line_idx": 48
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"bytes"
"math"
"testing"
"github.com/juju/errors"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/ranger"
goctx "golang.org/x/net/context"
)
func TestT(t *testing.T) {
TestingT(t)
}
var _ = Suite(&testStatisticsSuite{})
type testStatisticsSuite struct {
count int64
samples []types.Datum
rc ast.RecordSet
pk ast.RecordSet
}
type dataTable struct {
count int64
samples []types.Datum
}
type recordSet struct {
data []types.Datum
count int64
cursor int64
fields []*ast.ResultField
}
func (r *recordSet) Fields() []*ast.ResultField {
return r.fields
}
func (r *recordSet) setFields(tps ...uint8) {
r.fields = make([]*ast.ResultField, len(tps))
for i := 0; i < len(tps); i++ {
rf := new(ast.ResultField)
rf.Column = new(model.ColumnInfo)
rf.Column.FieldType = *types.NewFieldType(tps[i])
r.fields[i] = rf
}
}
func (r *recordSet) Next(goctx.Context) (types.Row, error) {
if r.cursor == r.count {
return nil, nil
}
r.cursor++
return types.DatumRow{r.data[r.cursor-1]}, nil
}
func (r *recordSet) NextChunk(goCtx goctx.Context, chk *chunk.Chunk) error {
return nil
}
func (r *recordSet) NewChunk() *chunk.Chunk {
return nil
}
func (r *recordSet) SupportChunk() bool {
return false
}
func (r *recordSet) Close() error {
r.cursor = 0
return nil
}
func (s *testStatisticsSuite) SetUpSuite(c *C) {
s.count = 100000
samples := make([]types.Datum, 10000)
start := 1000
samples[0].SetInt64(0)
for i := 1; i < start; i++ {
samples[i].SetInt64(2)
}
for i := start; i < len(samples); i++ {
samples[i].SetInt64(int64(i))
}
for i := start; i < len(samples); i += 3 {
samples[i].SetInt64(samples[i].GetInt64() + 1)
}
for i := start; i < len(samples); i += 5 {
samples[i].SetInt64(samples[i].GetInt64() + 2)
}
sc := new(stmtctx.StatementContext)
err := types.SortDatums(sc, samples)
c.Check(err, IsNil)
s.samples = samples
rc := &recordSet{
data: make([]types.Datum, s.count),
count: s.count,
cursor: 0,
}
rc.setFields(mysql.TypeLonglong)
rc.data[0].SetInt64(0)
for i := 1; i < start; i++ {
rc.data[i].SetInt64(2)
}
for i := int64(start); i < rc.count; i++ {
rc.data[i].SetInt64(int64(i))
}
for i := int64(start); i < rc.count; i += 3 {
rc.data[i].SetInt64(rc.data[i].GetInt64() + 1)
}
for i := int64(start); i < rc.count; i += 5 {
rc.data[i].SetInt64(rc.data[i].GetInt64() + 2)
}
err = types.SortDatums(sc, rc.data)
c.Check(err, IsNil)
s.rc = rc
pk := &recordSet{
data: make([]types.Datum, s.count),
count: s.count,
cursor: 0,
}
pk.setFields(mysql.TypeLonglong)
for i := int64(0); i < rc.count; i++ {
pk.data[i].SetInt64(int64(i))
}
s.pk = pk
}
func encodeKey(key types.Datum) types.Datum {
bytes, _ := codec.EncodeKey(nil, key)
return types.NewBytesDatum(bytes)
}
func buildPK(ctx context.Context, numBuckets, id int64, records ast.RecordSet) (int64, *Histogram, error) {
b := NewSortedBuilder(ctx.GetSessionVars().StmtCtx, numBuckets, id)
goCtx := goctx.Background()
for {
row, err := records.Next(goCtx)
if err != nil {
return 0, nil, errors.Trace(err)
}
if row == nil {
break
}
datums := ast.RowToDatums(row, records.Fields())
err = b.Iterate(datums[0])
if err != nil {
return 0, nil, errors.Trace(err)
}
}
return b.Count, b.hist, nil
}
func buildIndex(ctx context.Context, numBuckets, id int64, records ast.RecordSet) (int64, *Histogram, *CMSketch, error) {
b := NewSortedBuilder(ctx.GetSessionVars().StmtCtx, numBuckets, id)
cms := NewCMSketch(8, 2048)
goCtx := goctx.Background()
for {
row, err := records.Next(goCtx)
if err != nil {
return 0, nil, nil, errors.Trace(err)
}
if row == nil {
break
}
datums := ast.RowToDatums(row, records.Fields())
bytes, err := codec.EncodeKey(nil, datums...)
if err != nil {
return 0, nil, nil, errors.Trace(err)
}
data := types.NewBytesDatum(bytes)
err = b.Iterate(data)
if err != nil {
return 0, nil, nil, errors.Trace(err)
}
cms.InsertBytes(bytes)
}
return b.Count, b.Hist(), cms, nil
}
func calculateScalar(hist *Histogram) {
for i, bkt := range hist.Buckets {
bkt.lowerScalar, bkt.upperScalar, bkt.commonPfxLen = preCalculateDatumScalar(&bkt.LowerBound, &bkt.UpperBound)
hist.Buckets[i] = bkt
}
}
func checkRepeats(c *C, hg *Histogram) {
for _, bkt := range hg.Buckets {
c.Assert(bkt.Repeats, Greater, int64(0))
}
}
func (s *testStatisticsSuite) TestBuild(c *C) {
bucketCount := int64(256)
sketch, _, _ := buildFMSketch(s.rc.(*recordSet).data, 1000)
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
collector := &SampleCollector{
Count: s.count,
NullCount: 0,
Samples: s.samples,
FMSketch: sketch,
}
col, err := BuildColumn(ctx, bucketCount, 2, collector)
checkRepeats(c, col)
calculateScalar(col)
c.Check(err, IsNil)
c.Check(len(col.Buckets), Equals, 232)
count, err := col.equalRowCount(sc, types.NewIntDatum(1000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 0)
count, err = col.lessRowCount(sc, types.NewIntDatum(1000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 10000)
count, err = col.lessRowCount(sc, types.NewIntDatum(2000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 19995)
count, err = col.greaterRowCount(sc, types.NewIntDatum(2000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 80003)
count, err = col.lessRowCount(sc, types.NewIntDatum(200000000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 100000)
count, err = col.greaterRowCount(sc, types.NewIntDatum(200000000))
c.Check(err, IsNil)
c.Check(count, Equals, 0.0)
count, err = col.equalRowCount(sc, types.NewIntDatum(200000000))
c.Check(err, IsNil)
c.Check(count, Equals, 0.0)
count, err = col.betweenRowCount(sc, types.NewIntDatum(3000), types.NewIntDatum(3500))
c.Check(err, IsNil)
c.Check(int(count), Equals, 5008)
count, err = col.lessRowCount(sc, types.NewIntDatum(1))
c.Check(err, IsNil)
c.Check(int(count), Equals, 9)
builder := SampleBuilder{
Sc: mock.NewContext().GetSessionVars().StmtCtx,
RecordSet: s.pk,
ColLen: 1,
PkID: -1,
MaxSampleSize: 1000,
MaxFMSketchSize: 1000,
}
s.pk.Close()
collectors, _, err := builder.CollectColumnStats()
c.Assert(err, IsNil)
c.Assert(len(collectors), Equals, 1)
col, err = BuildColumn(mock.NewContext(), 256, 2, collectors[0])
c.Assert(err, IsNil)
checkRepeats(c, col)
tblCount, col, _, err := buildIndex(ctx, bucketCount, 1, ast.RecordSet(s.rc))
checkRepeats(c, col)
calculateScalar(col)
c.Check(err, IsNil)
c.Check(int(tblCount), Equals, 100000)
count, err = col.equalRowCount(sc, encodeKey(types.NewIntDatum(10000)))
c.Check(err, IsNil)
c.Check(int(count), Equals, 1)
count, err = col.lessRowCount(sc, encodeKey(types.NewIntDatum(20000)))
c.Check(err, IsNil)
c.Check(int(count), Equals, 19999)
count, err = col.betweenRowCount(sc, encodeKey(types.NewIntDatum(30000)), encodeKey(types.NewIntDatum(35000)))
c.Check(err, IsNil)
c.Check(int(count), Equals, 4999)
count, err = col.lessRowCount(sc, encodeKey(types.NewIntDatum(0)))
c.Check(err, IsNil)
c.Check(int(count), Equals, 0)
s.pk.(*recordSet).cursor = 0
tblCount, col, err = buildPK(ctx, bucketCount, 4, ast.RecordSet(s.pk))
checkRepeats(c, col)
calculateScalar(col)
c.Check(err, IsNil)
c.Check(int(tblCount), Equals, 100000)
count, err = col.equalRowCount(sc, types.NewIntDatum(10000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 1)
count, err = col.lessRowCount(sc, types.NewIntDatum(20000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 20000)
count, err = col.betweenRowCount(sc, types.NewIntDatum(30000), types.NewIntDatum(35000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 5000)
count, err = col.greaterAndEqRowCount(sc, types.NewIntDatum(1001))
c.Check(err, IsNil)
c.Check(int(count), Equals, 98999)
count, err = col.lessAndEqRowCount(sc, types.NewIntDatum(99999))
c.Check(err, IsNil)
c.Check(int(count), Equals, 100000)
count, err = col.lessAndEqRowCount(sc, types.Datum{})
c.Check(err, IsNil)
c.Check(int(count), Equals, 0)
count, err = col.greaterRowCount(sc, types.NewIntDatum(1001))
c.Check(err, IsNil)
c.Check(int(count), Equals, 98998)
count, err = col.lessRowCount(sc, types.NewIntDatum(99999))
c.Check(err, IsNil)
c.Check(int(count), Equals, 99999)
}
func (s *testStatisticsSuite) TestHistogramProtoConversion(c *C) {
ctx := mock.NewContext()
s.rc.Close()
tblCount, col, _, err := buildIndex(ctx, 256, 1, ast.RecordSet(s.rc))
c.Check(err, IsNil)
c.Check(int(tblCount), Equals, 100000)
p := HistogramToProto(col)
h := HistogramFromProto(p)
c.Assert(col.NDV, Equals, h.NDV)
c.Assert(len(col.Buckets), Equals, len(h.Buckets))
for i, bkt := range col.Buckets {
c.Assert(bkt.Count, Equals, h.Buckets[i].Count)
c.Assert(bkt.Repeats, Equals, h.Buckets[i].Repeats)
c.Assert(bytes.Equal(bkt.LowerBound.GetBytes(), h.Buckets[i].LowerBound.GetBytes()), IsTrue)
c.Assert(bytes.Equal(bkt.UpperBound.GetBytes(), h.Buckets[i].UpperBound.GetBytes()), IsTrue)
}
}
func mockHistogram(lower, num int64) *Histogram {
h := &Histogram{
NDV: num,
}
for i := int64(0); i < num; i++ {
bkt := Bucket{
LowerBound: types.NewIntDatum(lower + i),
UpperBound: types.NewIntDatum(lower + i),
Count: i + 1,
Repeats: 1,
}
h.Buckets = append(h.Buckets, bkt)
}
return h
}
func (s *testStatisticsSuite) TestMergeHistogram(c *C) {
tests := []struct {
leftLower int64
leftNum int64
rightLower int64
rightNum int64
bucketNum int
ndv int64
}{
{
leftLower: 0,
leftNum: 0,
rightLower: 0,
rightNum: 1,
bucketNum: 1,
ndv: 1,
},
{
leftLower: 0,
leftNum: 200,
rightLower: 200,
rightNum: 200,
bucketNum: 200,
ndv: 400,
},
{
leftLower: 0,
leftNum: 200,
rightLower: 199,
rightNum: 200,
bucketNum: 200,
ndv: 399,
},
}
sc := mock.NewContext().GetSessionVars().StmtCtx
bucketCount := 256
for _, t := range tests {
lh := mockHistogram(t.leftLower, t.leftNum)
rh := mockHistogram(t.rightLower, t.rightNum)
h, err := MergeHistograms(sc, lh, rh, bucketCount)
c.Assert(err, IsNil)
c.Assert(h.NDV, Equals, t.ndv)
c.Assert(len(h.Buckets), Equals, t.bucketNum)
c.Assert(h.Buckets[len(h.Buckets)-1].Count, Equals, t.leftNum+t.rightNum)
expectLower := types.NewIntDatum(t.leftLower)
cmp, err := h.Buckets[0].LowerBound.CompareDatum(sc, &expectLower)
c.Assert(err, IsNil)
c.Assert(cmp, Equals, 0)
expectUpper := types.NewIntDatum(t.rightLower + t.rightNum - 1)
cmp, err = h.Buckets[len(h.Buckets)-1].UpperBound.CompareDatum(sc, &expectUpper)
c.Assert(err, IsNil)
c.Assert(cmp, Equals, 0)
}
}
func (s *testStatisticsSuite) TestPseudoTable(c *C) {
ti := &model.TableInfo{}
colInfo := &model.ColumnInfo{
ID: 1,
FieldType: *types.NewFieldType(mysql.TypeLonglong),
}
ti.Columns = append(ti.Columns, colInfo)
tbl := PseudoTable(ti.ID)
c.Assert(tbl.Count, Greater, int64(0))
sc := new(stmtctx.StatementContext)
count, err := tbl.ColumnLessRowCount(sc, types.NewIntDatum(100), colInfo.ID)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 3333)
count, err = tbl.ColumnEqualRowCount(sc, types.NewIntDatum(1000), colInfo.ID)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 10)
count, err = tbl.ColumnBetweenRowCount(sc, types.NewIntDatum(1000), types.NewIntDatum(5000), colInfo.ID)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 250)
}
func buildCMSketch(values []types.Datum) *CMSketch {
cms := NewCMSketch(8, 2048)
for _, val := range values {
cms.insert(&val)
}
return cms
}
func (s *testStatisticsSuite) TestColumnRange(c *C) {
bucketCount := int64(256)
sketch, _, _ := buildFMSketch(s.rc.(*recordSet).data, 1000)
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
collector := &SampleCollector{
Count: s.count,
NullCount: 0,
Samples: s.samples,
FMSketch: sketch,
}
hg, err := BuildColumn(ctx, bucketCount, 2, collector)
calculateScalar(hg)
c.Check(err, IsNil)
col := &Column{Histogram: *hg, CMSketch: buildCMSketch(s.rc.(*recordSet).data)}
tbl := &Table{
Count: int64(col.totalRowCount()),
Columns: make(map[int64]*Column),
}
ran := []*ranger.NewRange{{
LowVal: []types.Datum{{}},
HighVal: []types.Datum{types.MaxValueDatum()},
}}
count, err := tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal[0] = types.MinNotNullDatum()
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 99900)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].LowExclude = true
ran[0].HighVal[0] = types.NewIntDatum(2000)
ran[0].HighExclude = true
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 2500)
ran[0].LowExclude = false
ran[0].HighExclude = false
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 2500)
ran[0].LowVal[0] = ran[0].HighVal[0]
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100)
tbl.Columns[0] = col
ran[0].LowVal[0] = types.Datum{}
ran[0].HighVal[0] = types.MaxValueDatum()
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].LowExclude = true
ran[0].HighVal[0] = types.NewIntDatum(2000)
ran[0].HighExclude = true
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 9994)
ran[0].LowExclude = false
ran[0].HighExclude = false
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 9996)
ran[0].LowVal[0] = ran[0].HighVal[0]
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1)
}
func (s *testStatisticsSuite) TestIntColumnRanges(c *C) {
bucketCount := int64(256)
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
s.pk.(*recordSet).cursor = 0
rowCount, hg, err := buildPK(ctx, bucketCount, 0, s.pk)
calculateScalar(hg)
c.Check(err, IsNil)
c.Check(rowCount, Equals, int64(100000))
col := &Column{Histogram: *hg}
tbl := &Table{
Count: int64(col.totalRowCount()),
Columns: make(map[int64]*Column),
}
ran := []ranger.IntColumnRange{{
LowVal: math.MinInt64,
HighVal: math.MaxInt64,
}}
count, err := tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal = 1000
ran[0].HighVal = 2000
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1000)
ran[0].LowVal = 1001
ran[0].HighVal = 1999
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 998)
ran[0].LowVal = 1000
ran[0].HighVal = 1000
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100)
tbl.Columns[0] = col
ran[0].LowVal = math.MinInt64
ran[0].HighVal = math.MaxInt64
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal = 1000
ran[0].HighVal = 2000
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1000)
ran[0].LowVal = 1001
ran[0].HighVal = 1999
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 998)
ran[0].LowVal = 1000
ran[0].HighVal = 1000
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1)
}
func (s *testStatisticsSuite) TestIndexRanges(c *C) {
bucketCount := int64(256)
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
s.rc.(*recordSet).cursor = 0
rowCount, hg, cms, err := buildIndex(ctx, bucketCount, 0, s.rc)
calculateScalar(hg)
c.Check(err, IsNil)
c.Check(rowCount, Equals, int64(100000))
idxInfo := &model.IndexInfo{Columns: []*model.IndexColumn{{Offset: 0}}}
idx := &Index{Histogram: *hg, CMSketch: cms, Info: idxInfo}
tbl := &Table{
Count: int64(idx.totalRowCount()),
Indices: make(map[int64]*Index),
}
ran := []*ranger.NewRange{{
LowVal: []types.Datum{types.MinNotNullDatum()},
HighVal: []types.Datum{types.MaxValueDatum()},
}}
count, err := tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 99900)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(2000)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 2500)
ran[0].LowVal[0] = types.NewIntDatum(1001)
ran[0].HighVal[0] = types.NewIntDatum(1999)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 2500)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(1000)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100)
tbl.Indices[0] = idx
ran[0].LowVal[0] = types.MinNotNullDatum()
ran[0].HighVal[0] = types.MaxValueDatum()
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(2000)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1000)
ran[0].LowVal[0] = types.NewIntDatum(1001)
ran[0].HighVal[0] = types.NewIntDatum(1990)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 989)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(1000)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 0)
}
| statistics/statistics_test.go | 1 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.9973347783088684,
0.10905873775482178,
0.00016296000103466213,
0.00017702124023344368,
0.30564793944358826
] |
{
"id": 2,
"code_window": [
"\tsamples []types.Datum\n",
"\trc ast.RecordSet\n",
"\tpk ast.RecordSet\n",
"}\n",
"\n",
"type dataTable struct {\n",
"\tcount int64\n",
"\tsamples []types.Datum\n",
"}\n",
"\n",
"type recordSet struct {\n",
"\tdata []types.Datum\n",
"\tcount int64\n",
"\tcursor int64\n",
"\tfields []*ast.ResultField\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "statistics/statistics_test.go",
"type": "replace",
"edit_start_line_idx": 48
} | // mksyscall.pl -l32 -arm syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
// +build arm,netbsd
package unix
import (
"syscall"
"unsafe"
)
var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setgroups(ngid int, gid *_Gid_t) (err error) {
_, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
wpid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socket(domain int, typ int, proto int) (fd int, err error) {
r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
_, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
_, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Shutdown(s int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
_, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
var _p0 unsafe.Pointer
if len(mib) > 0 {
_p0 = unsafe.Pointer(&mib[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
use(_p0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func utimes(path string, timeval *[2]Timeval) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func futimes(fd int, timeval *[2]Timeval) (err error) {
_, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntl(fd int, cmd int, arg int) (val int, err error) {
r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe() (fd1 int, fd2 int, err error) {
r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0)
fd1 = int(r0)
fd2 = int(r1)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getdents(fd int, buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Access(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
_, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chflags(path string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chmod(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chown(path string, uid int, gid int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chroot(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup(fd int) (nfd int, err error) {
r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0)
nfd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup2(from int, to int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Exit(code int) {
Syscall(SYS_EXIT, uintptr(code), 0, 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchdir(fd int) (err error) {
_, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchflags(fd int, flags int) (err error) {
_, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmod(fd int, mode uint32) (err error) {
_, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchown(fd int, uid int, gid int) (err error) {
_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Flock(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fpathconf(fd int, name int) (val int, err error) {
r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0)
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fsync(fd int) (err error) {
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ftruncate(fd int, length int64) (err error) {
_, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getegid() (egid int) {
r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
egid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Geteuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getgid() (gid int) {
r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
gid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgid(pid int) (pgid int, err error) {
r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
pgid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgrp() (pgrp int) {
r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0)
pgrp = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpid() (pid int) {
r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
pid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getppid() (ppid int) {
r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
ppid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpriority(which int, who int) (prio int, err error) {
r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
prio = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrlimit(which int, lim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrusage(who int, rusage *Rusage) (err error) {
_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getsid(pid int) (sid int, err error) {
r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
sid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Gettimeofday(tv *Timeval) (err error) {
_, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Issetugid() (tainted bool) {
r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0)
tainted = bool(r0 != 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kill(pid int, signum syscall.Signal) (err error) {
_, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kqueue() (fd int, err error) {
r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lchown(path string, uid int, gid int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Link(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(link)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, backlog int) (err error) {
_, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lstat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdir(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkfifo(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknod(path string, mode uint32, dev int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlock(b []byte) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlockall(flags int) (err error) {
_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mprotect(b []byte, prot int) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlock(b []byte) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlockall() (err error) {
_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Open(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
use(unsafe.Pointer(_p0))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pathconf(path string, name int) (val int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
use(unsafe.Pointer(_p0))
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pread(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func read(fd int, p []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Readlink(path string, buf []byte) (n int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(buf) > 0 {
_p1 = unsafe.Pointer(&buf[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
use(unsafe.Pointer(_p0))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rename(from string, to string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(from)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(to)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Revoke(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rmdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0)
newoffset = int64(int64(r1)<<32 | int64(r0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) {
_, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setegid(egid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seteuid(euid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setgid(gid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpgid(pid int, pgid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpriority(which int, who int, prio int) (err error) {
_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setregid(rgid int, egid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setreuid(ruid int, euid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setrlimit(which int, lim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setsid() (pid int, err error) {
r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
pid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Settimeofday(tp *Timeval) (err error) {
_, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setuid(uid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Stat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Symlink(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(link)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Sync() (err error) {
_, _, e1 := Syscall(SYS_SYNC, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Truncate(path string, length int64) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Umask(newmask int) (oldmask int) {
r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0)
oldmask = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unlink(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unmount(path string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func write(fd int, p []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0)
ret = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func munmap(addr uintptr, length uintptr) (err error) {
_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
| _vendor/src/golang.org/x/sys/unix/zsyscall_netbsd_arm.go | 0 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.00022682287089992315,
0.00017000491789076477,
0.000161401680088602,
0.00017000413208734244,
0.000005650183084071614
] |
{
"id": 2,
"code_window": [
"\tsamples []types.Datum\n",
"\trc ast.RecordSet\n",
"\tpk ast.RecordSet\n",
"}\n",
"\n",
"type dataTable struct {\n",
"\tcount int64\n",
"\tsamples []types.Datum\n",
"}\n",
"\n",
"type recordSet struct {\n",
"\tdata []types.Datum\n",
"\tcount int64\n",
"\tcursor int64\n",
"\tfields []*ast.ResultField\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "statistics/statistics_test.go",
"type": "replace",
"edit_start_line_idx": 48
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tables
import (
"bytes"
"encoding/binary"
"io"
"github.com/juju/errors"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/codec"
)
func encodeHandle(h int64) []byte {
buf := &bytes.Buffer{}
err := binary.Write(buf, binary.BigEndian, h)
if err != nil {
panic(err)
}
return buf.Bytes()
}
func decodeHandle(data []byte) (int64, error) {
var h int64
buf := bytes.NewBuffer(data)
err := binary.Read(buf, binary.BigEndian, &h)
return h, errors.Trace(err)
}
// indexIter is for KV store index iterator.
type indexIter struct {
it kv.Iterator
idx *index
prefix kv.Key
}
// Close does the clean up works when KV store index iterator is closed.
func (c *indexIter) Close() {
if c.it != nil {
c.it.Close()
c.it = nil
}
}
// Next returns current key and moves iterator to the next step.
func (c *indexIter) Next() (val []types.Datum, h int64, err error) {
if !c.it.Valid() {
return nil, 0, errors.Trace(io.EOF)
}
if !c.it.Key().HasPrefix(c.prefix) {
return nil, 0, errors.Trace(io.EOF)
}
// get indexedValues
buf := c.it.Key()[len(c.prefix):]
vv, err := codec.Decode(buf, len(c.idx.idxInfo.Columns))
if err != nil {
return nil, 0, errors.Trace(err)
}
// if index is *not* unique, the handle is in keybuf
if !c.idx.idxInfo.Unique {
h = vv[len(vv)-1].GetInt64()
val = vv[0 : len(vv)-1]
} else {
// otherwise handle is value
h, err = decodeHandle(c.it.Value())
if err != nil {
return nil, 0, errors.Trace(err)
}
val = vv
}
// update new iter to next
err = c.it.Next()
if err != nil {
return nil, 0, errors.Trace(err)
}
return
}
// index is the data structure for index data in the KV store.
type index struct {
tblInfo *model.TableInfo
idxInfo *model.IndexInfo
prefix kv.Key
buffer []byte // It's used reduce the number of new slice when multiple index keys are created.
}
// NewIndexWithBuffer builds a new Index object whit the buffer.
func NewIndexWithBuffer(tableInfo *model.TableInfo, indexInfo *model.IndexInfo) table.Index {
idxPrefix := tablecodec.EncodeTableIndexPrefix(tableInfo.ID, indexInfo.ID)
index := &index{
tblInfo: tableInfo,
idxInfo: indexInfo,
prefix: idxPrefix,
buffer: make([]byte, 0, len(idxPrefix)+len(indexInfo.Columns)*9+9),
}
return index
}
// NewIndex builds a new Index object.
func NewIndex(tableInfo *model.TableInfo, indexInfo *model.IndexInfo) table.Index {
index := &index{
tblInfo: tableInfo,
idxInfo: indexInfo,
prefix: tablecodec.EncodeTableIndexPrefix(tableInfo.ID, indexInfo.ID),
}
return index
}
// Meta returns index info.
func (c *index) Meta() *model.IndexInfo {
return c.idxInfo
}
// GenIndexKey generates storage key for index values. Returned distinct indicates whether the
// indexed values should be distinct in storage (i.e. whether handle is encoded in the key).
func (c *index) GenIndexKey(indexedValues []types.Datum, h int64) (key []byte, distinct bool, err error) {
if c.idxInfo.Unique {
// See https://dev.mysql.com/doc/refman/5.7/en/create-index.html
// A UNIQUE index creates a constraint such that all values in the index must be distinct.
// An error occurs if you try to add a new row with a key value that matches an existing row.
// For all engines, a UNIQUE index permits multiple NULL values for columns that can contain NULL.
distinct = true
for _, cv := range indexedValues {
if cv.IsNull() {
distinct = false
break
}
}
}
// For string columns, indexes can be created that use only the leading part of column values,
// using col_name(length) syntax to specify an index prefix length.
for i := 0; i < len(indexedValues); i++ {
v := &indexedValues[i]
if v.Kind() == types.KindString || v.Kind() == types.KindBytes {
ic := c.idxInfo.Columns[i]
if ic.Length != types.UnspecifiedLength && len(v.GetBytes()) > ic.Length {
// truncate value and limit its length
v.SetBytes(v.GetBytes()[:ic.Length])
}
}
}
if c.buffer != nil {
key = c.buffer[:0]
} else {
key = make([]byte, 0, len(c.prefix)+len(indexedValues)*9+9)
}
key = append(key, []byte(c.prefix)...)
key, err = codec.EncodeKey(key, indexedValues...)
if !distinct && err == nil {
key, err = codec.EncodeKey(key, types.NewDatum(h))
}
if err != nil {
return nil, false, errors.Trace(err)
}
return
}
// Create creates a new entry in the kvIndex data.
// If the index is unique and there is an existing entry with the same key,
// Create will return the existing entry's handle as the first return value, ErrKeyExists as the second return value.
func (c *index) Create(ctx context.Context, rm kv.RetrieverMutator, indexedValues []types.Datum, h int64) (int64, error) {
importData := ctx.GetSessionVars().ImportingData
key, distinct, err := c.GenIndexKey(indexedValues, h)
if err != nil {
return 0, errors.Trace(err)
}
if !distinct {
// non-unique index doesn't need store value, write a '0' to reduce space
err = rm.Set(key, []byte{'0'})
return 0, errors.Trace(err)
}
var value []byte
if !importData {
value, err = rm.Get(key)
}
if importData || kv.IsErrNotFound(err) {
err = rm.Set(key, encodeHandle(h))
return 0, errors.Trace(err)
}
handle, err := decodeHandle(value)
if err != nil {
return 0, errors.Trace(err)
}
return handle, kv.ErrKeyExists
}
// Delete removes the entry for handle h and indexdValues from KV index.
func (c *index) Delete(m kv.Mutator, indexedValues []types.Datum, h int64) error {
key, _, err := c.GenIndexKey(indexedValues, h)
if err != nil {
return errors.Trace(err)
}
err = m.Delete(key)
return errors.Trace(err)
}
// Drop removes the KV index from store.
func (c *index) Drop(rm kv.RetrieverMutator) error {
it, err := rm.Seek(c.prefix)
if err != nil {
return errors.Trace(err)
}
defer it.Close()
// remove all indices
for it.Valid() {
if !it.Key().HasPrefix(c.prefix) {
break
}
err := rm.Delete(it.Key())
if err != nil {
return errors.Trace(err)
}
err = it.Next()
if err != nil {
return errors.Trace(err)
}
}
return nil
}
// Seek searches KV index for the entry with indexedValues.
func (c *index) Seek(r kv.Retriever, indexedValues []types.Datum) (iter table.IndexIterator, hit bool, err error) {
key, _, err := c.GenIndexKey(indexedValues, 0)
if err != nil {
return nil, false, errors.Trace(err)
}
it, err := r.Seek(key)
if err != nil {
return nil, false, errors.Trace(err)
}
// check if hit
hit = false
if it.Valid() && it.Key().Cmp(key) == 0 {
hit = true
}
return &indexIter{it: it, idx: c, prefix: c.prefix}, hit, nil
}
// SeekFirst returns an iterator which points to the first entry of the KV index.
func (c *index) SeekFirst(r kv.Retriever) (iter table.IndexIterator, err error) {
it, err := r.Seek(c.prefix)
if err != nil {
return nil, errors.Trace(err)
}
return &indexIter{it: it, idx: c, prefix: c.prefix}, nil
}
func (c *index) Exist(rm kv.RetrieverMutator, indexedValues []types.Datum, h int64) (bool, int64, error) {
key, distinct, err := c.GenIndexKey(indexedValues, h)
if err != nil {
return false, 0, errors.Trace(err)
}
value, err := rm.Get(key)
if kv.IsErrNotFound(err) {
return false, 0, nil
}
if err != nil {
return false, 0, errors.Trace(err)
}
// For distinct index, the value of key is handle.
if distinct {
handle, err := decodeHandle(value)
if err != nil {
return false, 0, errors.Trace(err)
}
if handle != h {
return true, handle, errors.Trace(kv.ErrKeyExists)
}
return true, handle, nil
}
return true, h, nil
}
func (c *index) FetchValues(r []types.Datum) ([]types.Datum, error) {
vals := make([]types.Datum, len(c.idxInfo.Columns))
for i, ic := range c.idxInfo.Columns {
if ic.Offset < 0 || ic.Offset >= len(r) {
return nil, table.ErrIndexOutBound.Gen("Index column %s offset out of bound, offset: %d, row: %v",
ic.Name, ic.Offset, r)
}
vals[i] = r[ic.Offset]
}
return vals, nil
}
| table/tables/index.go | 0 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.0003918422444257885,
0.00018674027523957193,
0.00016482482897117734,
0.0001718662679195404,
0.00004984505721949972
] |
{
"id": 2,
"code_window": [
"\tsamples []types.Datum\n",
"\trc ast.RecordSet\n",
"\tpk ast.RecordSet\n",
"}\n",
"\n",
"type dataTable struct {\n",
"\tcount int64\n",
"\tsamples []types.Datum\n",
"}\n",
"\n",
"type recordSet struct {\n",
"\tdata []types.Datum\n",
"\tcount int64\n",
"\tcursor int64\n",
"\tfields []*ast.ResultField\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "statistics/statistics_test.go",
"type": "replace",
"edit_start_line_idx": 48
} | // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package idna
// This file contains definitions for interpreting the trie value of the idna
// trie generated by "go run gen*.go". It is shared by both the generator
// program and the resultant package. Sharing is achieved by the generator
// copying gen_trieval.go to trieval.go and changing what's above this comment.
// info holds information from the IDNA mapping table for a single rune. It is
// the value returned by a trie lookup. In most cases, all information fits in
// a 16-bit value. For mappings, this value may contain an index into a slice
// with the mapped string. Such mappings can consist of the actual mapped value
// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
// input rune. This technique is used by the cases packages and reduces the
// table size significantly.
//
// The per-rune values have the following format:
//
// if mapped {
// if inlinedXOR {
// 15..13 inline XOR marker
// 12..11 unused
// 10..3 inline XOR mask
// } else {
// 15..3 index into xor or mapping table
// }
// } else {
// 15..13 unused
// 12 modifier (including virama)
// 11 virama modifier
// 10..8 joining type
// 7..3 category type
// }
// 2 use xor pattern
// 1..0 mapped category
//
// See the definitions below for a more detailed description of the various
// bits.
type info uint16
const (
catSmallMask = 0x3
catBigMask = 0xF8
indexShift = 3
xorBit = 0x4 // interpret the index as an xor pattern
inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
joinShift = 8
joinMask = 0x07
viramaModifier = 0x0800
modifier = 0x1000
)
// A category corresponds to a category defined in the IDNA mapping table.
type category uint16
const (
unknown category = 0 // not defined currently in unicode.
mapped category = 1
disallowedSTD3Mapped category = 2
deviation category = 3
)
const (
valid category = 0x08
validNV8 category = 0x18
validXV8 category = 0x28
disallowed category = 0x40
disallowedSTD3Valid category = 0x80
ignored category = 0xC0
)
// join types and additional rune information
const (
joiningL = (iota + 1)
joiningD
joiningT
joiningR
//the following types are derived during processing
joinZWJ
joinZWNJ
joinVirama
numJoinTypes
)
func (c info) isMapped() bool {
return c&0x3 != 0
}
func (c info) category() category {
small := c & catSmallMask
if small != 0 {
return category(small)
}
return category(c & catBigMask)
}
func (c info) joinType() info {
if c.isMapped() {
return 0
}
return (c >> joinShift) & joinMask
}
func (c info) isModifier() bool {
return c&(modifier|catSmallMask) == modifier
}
func (c info) isViramaModifier() bool {
return c&(viramaModifier|catSmallMask) == viramaModifier
}
| _vendor/src/golang.org/x/net/idna/trieval.go | 0 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.00017570536874700338,
0.0001710641081444919,
0.00016559338837396353,
0.00017075706273317337,
0.00000277336312137777
] |
{
"id": 3,
"code_window": [
"\tc.Check(err, IsNil)\n",
"\tc.Check(int(count), Equals, 98998)\n",
"\tcount, err = col.lessRowCount(sc, types.NewIntDatum(99999))\n",
"\tc.Check(err, IsNil)\n",
"\tc.Check(int(count), Equals, 99999)\n",
"}\n",
"\n",
"func (s *testStatisticsSuite) TestHistogramProtoConversion(c *C) {\n",
"\tctx := mock.NewContext()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tdatum := types.Datum{}\n",
"\tdatum.SetMysqlJSON(json.BinaryJSON{TypeCode: json.TypeCodeLiteral})\n",
"\tcollector = &SampleCollector{\n",
"\t\tCount: 1,\n",
"\t\tNullCount: 0,\n",
"\t\tSamples: []types.Datum{datum},\n",
"\t\tFMSketch: sketch,\n",
"\t}\n",
"\tcol, err = BuildColumn(ctx, bucketCount, 2, collector)\n",
"\tc.Assert(err, IsNil)\n",
"\tc.Assert(len(col.Buckets), Equals, 1)\n",
"\tc.Assert(col.Buckets[0].LowerBound, DeepEquals, col.Buckets[0].UpperBound)\n"
],
"file_path": "statistics/statistics_test.go",
"type": "add",
"edit_start_line_idx": 330
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"bytes"
"math"
"testing"
"github.com/juju/errors"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/ranger"
goctx "golang.org/x/net/context"
)
func TestT(t *testing.T) {
TestingT(t)
}
var _ = Suite(&testStatisticsSuite{})
type testStatisticsSuite struct {
count int64
samples []types.Datum
rc ast.RecordSet
pk ast.RecordSet
}
type dataTable struct {
count int64
samples []types.Datum
}
type recordSet struct {
data []types.Datum
count int64
cursor int64
fields []*ast.ResultField
}
func (r *recordSet) Fields() []*ast.ResultField {
return r.fields
}
func (r *recordSet) setFields(tps ...uint8) {
r.fields = make([]*ast.ResultField, len(tps))
for i := 0; i < len(tps); i++ {
rf := new(ast.ResultField)
rf.Column = new(model.ColumnInfo)
rf.Column.FieldType = *types.NewFieldType(tps[i])
r.fields[i] = rf
}
}
func (r *recordSet) Next(goctx.Context) (types.Row, error) {
if r.cursor == r.count {
return nil, nil
}
r.cursor++
return types.DatumRow{r.data[r.cursor-1]}, nil
}
func (r *recordSet) NextChunk(goCtx goctx.Context, chk *chunk.Chunk) error {
return nil
}
func (r *recordSet) NewChunk() *chunk.Chunk {
return nil
}
func (r *recordSet) SupportChunk() bool {
return false
}
func (r *recordSet) Close() error {
r.cursor = 0
return nil
}
func (s *testStatisticsSuite) SetUpSuite(c *C) {
s.count = 100000
samples := make([]types.Datum, 10000)
start := 1000
samples[0].SetInt64(0)
for i := 1; i < start; i++ {
samples[i].SetInt64(2)
}
for i := start; i < len(samples); i++ {
samples[i].SetInt64(int64(i))
}
for i := start; i < len(samples); i += 3 {
samples[i].SetInt64(samples[i].GetInt64() + 1)
}
for i := start; i < len(samples); i += 5 {
samples[i].SetInt64(samples[i].GetInt64() + 2)
}
sc := new(stmtctx.StatementContext)
err := types.SortDatums(sc, samples)
c.Check(err, IsNil)
s.samples = samples
rc := &recordSet{
data: make([]types.Datum, s.count),
count: s.count,
cursor: 0,
}
rc.setFields(mysql.TypeLonglong)
rc.data[0].SetInt64(0)
for i := 1; i < start; i++ {
rc.data[i].SetInt64(2)
}
for i := int64(start); i < rc.count; i++ {
rc.data[i].SetInt64(int64(i))
}
for i := int64(start); i < rc.count; i += 3 {
rc.data[i].SetInt64(rc.data[i].GetInt64() + 1)
}
for i := int64(start); i < rc.count; i += 5 {
rc.data[i].SetInt64(rc.data[i].GetInt64() + 2)
}
err = types.SortDatums(sc, rc.data)
c.Check(err, IsNil)
s.rc = rc
pk := &recordSet{
data: make([]types.Datum, s.count),
count: s.count,
cursor: 0,
}
pk.setFields(mysql.TypeLonglong)
for i := int64(0); i < rc.count; i++ {
pk.data[i].SetInt64(int64(i))
}
s.pk = pk
}
func encodeKey(key types.Datum) types.Datum {
bytes, _ := codec.EncodeKey(nil, key)
return types.NewBytesDatum(bytes)
}
func buildPK(ctx context.Context, numBuckets, id int64, records ast.RecordSet) (int64, *Histogram, error) {
b := NewSortedBuilder(ctx.GetSessionVars().StmtCtx, numBuckets, id)
goCtx := goctx.Background()
for {
row, err := records.Next(goCtx)
if err != nil {
return 0, nil, errors.Trace(err)
}
if row == nil {
break
}
datums := ast.RowToDatums(row, records.Fields())
err = b.Iterate(datums[0])
if err != nil {
return 0, nil, errors.Trace(err)
}
}
return b.Count, b.hist, nil
}
func buildIndex(ctx context.Context, numBuckets, id int64, records ast.RecordSet) (int64, *Histogram, *CMSketch, error) {
b := NewSortedBuilder(ctx.GetSessionVars().StmtCtx, numBuckets, id)
cms := NewCMSketch(8, 2048)
goCtx := goctx.Background()
for {
row, err := records.Next(goCtx)
if err != nil {
return 0, nil, nil, errors.Trace(err)
}
if row == nil {
break
}
datums := ast.RowToDatums(row, records.Fields())
bytes, err := codec.EncodeKey(nil, datums...)
if err != nil {
return 0, nil, nil, errors.Trace(err)
}
data := types.NewBytesDatum(bytes)
err = b.Iterate(data)
if err != nil {
return 0, nil, nil, errors.Trace(err)
}
cms.InsertBytes(bytes)
}
return b.Count, b.Hist(), cms, nil
}
func calculateScalar(hist *Histogram) {
for i, bkt := range hist.Buckets {
bkt.lowerScalar, bkt.upperScalar, bkt.commonPfxLen = preCalculateDatumScalar(&bkt.LowerBound, &bkt.UpperBound)
hist.Buckets[i] = bkt
}
}
func checkRepeats(c *C, hg *Histogram) {
for _, bkt := range hg.Buckets {
c.Assert(bkt.Repeats, Greater, int64(0))
}
}
func (s *testStatisticsSuite) TestBuild(c *C) {
bucketCount := int64(256)
sketch, _, _ := buildFMSketch(s.rc.(*recordSet).data, 1000)
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
collector := &SampleCollector{
Count: s.count,
NullCount: 0,
Samples: s.samples,
FMSketch: sketch,
}
col, err := BuildColumn(ctx, bucketCount, 2, collector)
checkRepeats(c, col)
calculateScalar(col)
c.Check(err, IsNil)
c.Check(len(col.Buckets), Equals, 232)
count, err := col.equalRowCount(sc, types.NewIntDatum(1000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 0)
count, err = col.lessRowCount(sc, types.NewIntDatum(1000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 10000)
count, err = col.lessRowCount(sc, types.NewIntDatum(2000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 19995)
count, err = col.greaterRowCount(sc, types.NewIntDatum(2000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 80003)
count, err = col.lessRowCount(sc, types.NewIntDatum(200000000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 100000)
count, err = col.greaterRowCount(sc, types.NewIntDatum(200000000))
c.Check(err, IsNil)
c.Check(count, Equals, 0.0)
count, err = col.equalRowCount(sc, types.NewIntDatum(200000000))
c.Check(err, IsNil)
c.Check(count, Equals, 0.0)
count, err = col.betweenRowCount(sc, types.NewIntDatum(3000), types.NewIntDatum(3500))
c.Check(err, IsNil)
c.Check(int(count), Equals, 5008)
count, err = col.lessRowCount(sc, types.NewIntDatum(1))
c.Check(err, IsNil)
c.Check(int(count), Equals, 9)
builder := SampleBuilder{
Sc: mock.NewContext().GetSessionVars().StmtCtx,
RecordSet: s.pk,
ColLen: 1,
PkID: -1,
MaxSampleSize: 1000,
MaxFMSketchSize: 1000,
}
s.pk.Close()
collectors, _, err := builder.CollectColumnStats()
c.Assert(err, IsNil)
c.Assert(len(collectors), Equals, 1)
col, err = BuildColumn(mock.NewContext(), 256, 2, collectors[0])
c.Assert(err, IsNil)
checkRepeats(c, col)
tblCount, col, _, err := buildIndex(ctx, bucketCount, 1, ast.RecordSet(s.rc))
checkRepeats(c, col)
calculateScalar(col)
c.Check(err, IsNil)
c.Check(int(tblCount), Equals, 100000)
count, err = col.equalRowCount(sc, encodeKey(types.NewIntDatum(10000)))
c.Check(err, IsNil)
c.Check(int(count), Equals, 1)
count, err = col.lessRowCount(sc, encodeKey(types.NewIntDatum(20000)))
c.Check(err, IsNil)
c.Check(int(count), Equals, 19999)
count, err = col.betweenRowCount(sc, encodeKey(types.NewIntDatum(30000)), encodeKey(types.NewIntDatum(35000)))
c.Check(err, IsNil)
c.Check(int(count), Equals, 4999)
count, err = col.lessRowCount(sc, encodeKey(types.NewIntDatum(0)))
c.Check(err, IsNil)
c.Check(int(count), Equals, 0)
s.pk.(*recordSet).cursor = 0
tblCount, col, err = buildPK(ctx, bucketCount, 4, ast.RecordSet(s.pk))
checkRepeats(c, col)
calculateScalar(col)
c.Check(err, IsNil)
c.Check(int(tblCount), Equals, 100000)
count, err = col.equalRowCount(sc, types.NewIntDatum(10000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 1)
count, err = col.lessRowCount(sc, types.NewIntDatum(20000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 20000)
count, err = col.betweenRowCount(sc, types.NewIntDatum(30000), types.NewIntDatum(35000))
c.Check(err, IsNil)
c.Check(int(count), Equals, 5000)
count, err = col.greaterAndEqRowCount(sc, types.NewIntDatum(1001))
c.Check(err, IsNil)
c.Check(int(count), Equals, 98999)
count, err = col.lessAndEqRowCount(sc, types.NewIntDatum(99999))
c.Check(err, IsNil)
c.Check(int(count), Equals, 100000)
count, err = col.lessAndEqRowCount(sc, types.Datum{})
c.Check(err, IsNil)
c.Check(int(count), Equals, 0)
count, err = col.greaterRowCount(sc, types.NewIntDatum(1001))
c.Check(err, IsNil)
c.Check(int(count), Equals, 98998)
count, err = col.lessRowCount(sc, types.NewIntDatum(99999))
c.Check(err, IsNil)
c.Check(int(count), Equals, 99999)
}
func (s *testStatisticsSuite) TestHistogramProtoConversion(c *C) {
ctx := mock.NewContext()
s.rc.Close()
tblCount, col, _, err := buildIndex(ctx, 256, 1, ast.RecordSet(s.rc))
c.Check(err, IsNil)
c.Check(int(tblCount), Equals, 100000)
p := HistogramToProto(col)
h := HistogramFromProto(p)
c.Assert(col.NDV, Equals, h.NDV)
c.Assert(len(col.Buckets), Equals, len(h.Buckets))
for i, bkt := range col.Buckets {
c.Assert(bkt.Count, Equals, h.Buckets[i].Count)
c.Assert(bkt.Repeats, Equals, h.Buckets[i].Repeats)
c.Assert(bytes.Equal(bkt.LowerBound.GetBytes(), h.Buckets[i].LowerBound.GetBytes()), IsTrue)
c.Assert(bytes.Equal(bkt.UpperBound.GetBytes(), h.Buckets[i].UpperBound.GetBytes()), IsTrue)
}
}
func mockHistogram(lower, num int64) *Histogram {
h := &Histogram{
NDV: num,
}
for i := int64(0); i < num; i++ {
bkt := Bucket{
LowerBound: types.NewIntDatum(lower + i),
UpperBound: types.NewIntDatum(lower + i),
Count: i + 1,
Repeats: 1,
}
h.Buckets = append(h.Buckets, bkt)
}
return h
}
func (s *testStatisticsSuite) TestMergeHistogram(c *C) {
tests := []struct {
leftLower int64
leftNum int64
rightLower int64
rightNum int64
bucketNum int
ndv int64
}{
{
leftLower: 0,
leftNum: 0,
rightLower: 0,
rightNum: 1,
bucketNum: 1,
ndv: 1,
},
{
leftLower: 0,
leftNum: 200,
rightLower: 200,
rightNum: 200,
bucketNum: 200,
ndv: 400,
},
{
leftLower: 0,
leftNum: 200,
rightLower: 199,
rightNum: 200,
bucketNum: 200,
ndv: 399,
},
}
sc := mock.NewContext().GetSessionVars().StmtCtx
bucketCount := 256
for _, t := range tests {
lh := mockHistogram(t.leftLower, t.leftNum)
rh := mockHistogram(t.rightLower, t.rightNum)
h, err := MergeHistograms(sc, lh, rh, bucketCount)
c.Assert(err, IsNil)
c.Assert(h.NDV, Equals, t.ndv)
c.Assert(len(h.Buckets), Equals, t.bucketNum)
c.Assert(h.Buckets[len(h.Buckets)-1].Count, Equals, t.leftNum+t.rightNum)
expectLower := types.NewIntDatum(t.leftLower)
cmp, err := h.Buckets[0].LowerBound.CompareDatum(sc, &expectLower)
c.Assert(err, IsNil)
c.Assert(cmp, Equals, 0)
expectUpper := types.NewIntDatum(t.rightLower + t.rightNum - 1)
cmp, err = h.Buckets[len(h.Buckets)-1].UpperBound.CompareDatum(sc, &expectUpper)
c.Assert(err, IsNil)
c.Assert(cmp, Equals, 0)
}
}
func (s *testStatisticsSuite) TestPseudoTable(c *C) {
ti := &model.TableInfo{}
colInfo := &model.ColumnInfo{
ID: 1,
FieldType: *types.NewFieldType(mysql.TypeLonglong),
}
ti.Columns = append(ti.Columns, colInfo)
tbl := PseudoTable(ti.ID)
c.Assert(tbl.Count, Greater, int64(0))
sc := new(stmtctx.StatementContext)
count, err := tbl.ColumnLessRowCount(sc, types.NewIntDatum(100), colInfo.ID)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 3333)
count, err = tbl.ColumnEqualRowCount(sc, types.NewIntDatum(1000), colInfo.ID)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 10)
count, err = tbl.ColumnBetweenRowCount(sc, types.NewIntDatum(1000), types.NewIntDatum(5000), colInfo.ID)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 250)
}
func buildCMSketch(values []types.Datum) *CMSketch {
cms := NewCMSketch(8, 2048)
for _, val := range values {
cms.insert(&val)
}
return cms
}
func (s *testStatisticsSuite) TestColumnRange(c *C) {
bucketCount := int64(256)
sketch, _, _ := buildFMSketch(s.rc.(*recordSet).data, 1000)
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
collector := &SampleCollector{
Count: s.count,
NullCount: 0,
Samples: s.samples,
FMSketch: sketch,
}
hg, err := BuildColumn(ctx, bucketCount, 2, collector)
calculateScalar(hg)
c.Check(err, IsNil)
col := &Column{Histogram: *hg, CMSketch: buildCMSketch(s.rc.(*recordSet).data)}
tbl := &Table{
Count: int64(col.totalRowCount()),
Columns: make(map[int64]*Column),
}
ran := []*ranger.NewRange{{
LowVal: []types.Datum{{}},
HighVal: []types.Datum{types.MaxValueDatum()},
}}
count, err := tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal[0] = types.MinNotNullDatum()
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 99900)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].LowExclude = true
ran[0].HighVal[0] = types.NewIntDatum(2000)
ran[0].HighExclude = true
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 2500)
ran[0].LowExclude = false
ran[0].HighExclude = false
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 2500)
ran[0].LowVal[0] = ran[0].HighVal[0]
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100)
tbl.Columns[0] = col
ran[0].LowVal[0] = types.Datum{}
ran[0].HighVal[0] = types.MaxValueDatum()
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].LowExclude = true
ran[0].HighVal[0] = types.NewIntDatum(2000)
ran[0].HighExclude = true
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 9994)
ran[0].LowExclude = false
ran[0].HighExclude = false
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 9996)
ran[0].LowVal[0] = ran[0].HighVal[0]
count, err = tbl.GetRowCountByColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1)
}
func (s *testStatisticsSuite) TestIntColumnRanges(c *C) {
bucketCount := int64(256)
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
s.pk.(*recordSet).cursor = 0
rowCount, hg, err := buildPK(ctx, bucketCount, 0, s.pk)
calculateScalar(hg)
c.Check(err, IsNil)
c.Check(rowCount, Equals, int64(100000))
col := &Column{Histogram: *hg}
tbl := &Table{
Count: int64(col.totalRowCount()),
Columns: make(map[int64]*Column),
}
ran := []ranger.IntColumnRange{{
LowVal: math.MinInt64,
HighVal: math.MaxInt64,
}}
count, err := tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal = 1000
ran[0].HighVal = 2000
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1000)
ran[0].LowVal = 1001
ran[0].HighVal = 1999
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 998)
ran[0].LowVal = 1000
ran[0].HighVal = 1000
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100)
tbl.Columns[0] = col
ran[0].LowVal = math.MinInt64
ran[0].HighVal = math.MaxInt64
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal = 1000
ran[0].HighVal = 2000
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1000)
ran[0].LowVal = 1001
ran[0].HighVal = 1999
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 998)
ran[0].LowVal = 1000
ran[0].HighVal = 1000
count, err = tbl.GetRowCountByIntColumnRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1)
}
func (s *testStatisticsSuite) TestIndexRanges(c *C) {
bucketCount := int64(256)
ctx := mock.NewContext()
sc := ctx.GetSessionVars().StmtCtx
s.rc.(*recordSet).cursor = 0
rowCount, hg, cms, err := buildIndex(ctx, bucketCount, 0, s.rc)
calculateScalar(hg)
c.Check(err, IsNil)
c.Check(rowCount, Equals, int64(100000))
idxInfo := &model.IndexInfo{Columns: []*model.IndexColumn{{Offset: 0}}}
idx := &Index{Histogram: *hg, CMSketch: cms, Info: idxInfo}
tbl := &Table{
Count: int64(idx.totalRowCount()),
Indices: make(map[int64]*Index),
}
ran := []*ranger.NewRange{{
LowVal: []types.Datum{types.MinNotNullDatum()},
HighVal: []types.Datum{types.MaxValueDatum()},
}}
count, err := tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 99900)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(2000)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 2500)
ran[0].LowVal[0] = types.NewIntDatum(1001)
ran[0].HighVal[0] = types.NewIntDatum(1999)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 2500)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(1000)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100)
tbl.Indices[0] = idx
ran[0].LowVal[0] = types.MinNotNullDatum()
ran[0].HighVal[0] = types.MaxValueDatum()
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 100000)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(2000)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 1000)
ran[0].LowVal[0] = types.NewIntDatum(1001)
ran[0].HighVal[0] = types.NewIntDatum(1990)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 989)
ran[0].LowVal[0] = types.NewIntDatum(1000)
ran[0].HighVal[0] = types.NewIntDatum(1000)
count, err = tbl.GetRowCountByIndexRanges(sc, 0, ran)
c.Assert(err, IsNil)
c.Assert(int(count), Equals, 0)
}
| statistics/statistics_test.go | 1 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.9955436587333679,
0.06795785576105118,
0.00016090550343506038,
0.002312174066901207,
0.23749224841594696
] |
{
"id": 3,
"code_window": [
"\tc.Check(err, IsNil)\n",
"\tc.Check(int(count), Equals, 98998)\n",
"\tcount, err = col.lessRowCount(sc, types.NewIntDatum(99999))\n",
"\tc.Check(err, IsNil)\n",
"\tc.Check(int(count), Equals, 99999)\n",
"}\n",
"\n",
"func (s *testStatisticsSuite) TestHistogramProtoConversion(c *C) {\n",
"\tctx := mock.NewContext()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tdatum := types.Datum{}\n",
"\tdatum.SetMysqlJSON(json.BinaryJSON{TypeCode: json.TypeCodeLiteral})\n",
"\tcollector = &SampleCollector{\n",
"\t\tCount: 1,\n",
"\t\tNullCount: 0,\n",
"\t\tSamples: []types.Datum{datum},\n",
"\t\tFMSketch: sketch,\n",
"\t}\n",
"\tcol, err = BuildColumn(ctx, bucketCount, 2, collector)\n",
"\tc.Assert(err, IsNil)\n",
"\tc.Assert(len(col.Buckets), Equals, 1)\n",
"\tc.Assert(col.Buckets[0].LowerBound, DeepEquals, col.Buckets[0].UpperBound)\n"
],
"file_path": "statistics/statistics_test.go",
"type": "add",
"edit_start_line_idx": 330
} | // mksysnum_freebsd.pl
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
// +build arm,freebsd
package unix
const (
// SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int
SYS_EXIT = 1 // { void sys_exit(int rval); } exit \
SYS_FORK = 2 // { int fork(void); }
SYS_READ = 3 // { ssize_t read(int fd, void *buf, \
SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \
SYS_OPEN = 5 // { int open(char *path, int flags, int mode); }
SYS_CLOSE = 6 // { int close(int fd); }
SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \
SYS_LINK = 9 // { int link(char *path, char *link); }
SYS_UNLINK = 10 // { int unlink(char *path); }
SYS_CHDIR = 12 // { int chdir(char *path); }
SYS_FCHDIR = 13 // { int fchdir(int fd); }
SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); }
SYS_CHMOD = 15 // { int chmod(char *path, int mode); }
SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); }
SYS_OBREAK = 17 // { int obreak(char *nsize); } break \
SYS_GETPID = 20 // { pid_t getpid(void); }
SYS_MOUNT = 21 // { int mount(char *type, char *path, \
SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); }
SYS_SETUID = 23 // { int setuid(uid_t uid); }
SYS_GETUID = 24 // { uid_t getuid(void); }
SYS_GETEUID = 25 // { uid_t geteuid(void); }
SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \
SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \
SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \
SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \
SYS_ACCEPT = 30 // { int accept(int s, \
SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \
SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \
SYS_ACCESS = 33 // { int access(char *path, int amode); }
SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); }
SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); }
SYS_SYNC = 36 // { int sync(void); }
SYS_KILL = 37 // { int kill(int pid, int signum); }
SYS_GETPPID = 39 // { pid_t getppid(void); }
SYS_DUP = 41 // { int dup(u_int fd); }
SYS_PIPE = 42 // { int pipe(void); }
SYS_GETEGID = 43 // { gid_t getegid(void); }
SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \
SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \
SYS_GETGID = 47 // { gid_t getgid(void); }
SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \
SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); }
SYS_ACCT = 51 // { int acct(char *path); }
SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \
SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \
SYS_REBOOT = 55 // { int reboot(int opt); }
SYS_REVOKE = 56 // { int revoke(char *path); }
SYS_SYMLINK = 57 // { int symlink(char *path, char *link); }
SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \
SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \
SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \
SYS_CHROOT = 61 // { int chroot(char *path); }
SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \
SYS_VFORK = 66 // { int vfork(void); }
SYS_SBRK = 69 // { int sbrk(int incr); }
SYS_SSTK = 70 // { int sstk(int incr); }
SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \
SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); }
SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \
SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \
SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \
SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \
SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \
SYS_GETPGRP = 81 // { int getpgrp(void); }
SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); }
SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \
SYS_SWAPON = 85 // { int swapon(char *name); }
SYS_GETITIMER = 86 // { int getitimer(u_int which, \
SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); }
SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); }
SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); }
SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \
SYS_FSYNC = 95 // { int fsync(int fd); }
SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \
SYS_SOCKET = 97 // { int socket(int domain, int type, \
SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \
SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); }
SYS_BIND = 104 // { int bind(int s, caddr_t name, \
SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \
SYS_LISTEN = 106 // { int listen(int s, int backlog); }
SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \
SYS_GETRUSAGE = 117 // { int getrusage(int who, \
SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \
SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \
SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \
SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \
SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); }
SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); }
SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); }
SYS_SETREGID = 127 // { int setregid(int rgid, int egid); }
SYS_RENAME = 128 // { int rename(char *from, char *to); }
SYS_FLOCK = 131 // { int flock(int fd, int how); }
SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); }
SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \
SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); }
SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \
SYS_MKDIR = 136 // { int mkdir(char *path, int mode); }
SYS_RMDIR = 137 // { int rmdir(char *path); }
SYS_UTIMES = 138 // { int utimes(char *path, \
SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \
SYS_SETSID = 147 // { int setsid(void); }
SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \
SYS_LGETFH = 160 // { int lgetfh(char *fname, \
SYS_GETFH = 161 // { int getfh(char *fname, \
SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); }
SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \
SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \
SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \
SYS_SETFIB = 175 // { int setfib(int fibnum); }
SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); }
SYS_SETGID = 181 // { int setgid(gid_t gid); }
SYS_SETEGID = 182 // { int setegid(gid_t egid); }
SYS_SETEUID = 183 // { int seteuid(uid_t euid); }
SYS_STAT = 188 // { int stat(char *path, struct stat *ub); }
SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); }
SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); }
SYS_PATHCONF = 191 // { int pathconf(char *path, int name); }
SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); }
SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \
SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \
SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \
SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \
SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \
SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \
SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \
SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \
SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); }
SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); }
SYS_UNDELETE = 205 // { int undelete(char *path); }
SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); }
SYS_GETPGID = 207 // { int getpgid(pid_t pid); }
SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \
SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \
SYS_CLOCK_SETTIME = 233 // { int clock_settime( \
SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \
SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \
SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); }
SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \
SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \
SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); }
SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \
SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); }
SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \
SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \
SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\
SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); }
SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \
SYS_RFORK = 251 // { int rfork(int flags); }
SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \
SYS_ISSETUGID = 253 // { int issetugid(void); }
SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); }
SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \
SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); }
SYS_LUTIMES = 276 // { int lutimes(char *path, \
SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); }
SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); }
SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); }
SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \
SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \
SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \
SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \
SYS_MODNEXT = 300 // { int modnext(int modid); }
SYS_MODSTAT = 301 // { int modstat(int modid, \
SYS_MODFNEXT = 302 // { int modfnext(int modid); }
SYS_MODFIND = 303 // { int modfind(const char *name); }
SYS_KLDLOAD = 304 // { int kldload(const char *file); }
SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); }
SYS_KLDFIND = 306 // { int kldfind(const char *file); }
SYS_KLDNEXT = 307 // { int kldnext(int fileid); }
SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \
SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); }
SYS_GETSID = 310 // { int getsid(pid_t pid); }
SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \
SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \
SYS_YIELD = 321 // { int yield(void); }
SYS_MLOCKALL = 324 // { int mlockall(int how); }
SYS_MUNLOCKALL = 325 // { int munlockall(void); }
SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); }
SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \
SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \
SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \
SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); }
SYS_SCHED_YIELD = 331 // { int sched_yield (void); }
SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); }
SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); }
SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \
SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); }
SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \
SYS_JAIL = 338 // { int jail(struct jail *jail); }
SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \
SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); }
SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); }
SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \
SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \
SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \
SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \
SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \
SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \
SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \
SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \
SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \
SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \
SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \
SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \
SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \
SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \
SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \
SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \
SYS_KQUEUE = 362 // { int kqueue(void); }
SYS_KEVENT = 363 // { int kevent(int fd, \
SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \
SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \
SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \
SYS___SETUGID = 374 // { int __setugid(int flag); }
SYS_EACCESS = 376 // { int eaccess(char *path, int amode); }
SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \
SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); }
SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); }
SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \
SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \
SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \
SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \
SYS_KENV = 390 // { int kenv(int what, const char *name, \
SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \
SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \
SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \
SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \
SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \
SYS_STATFS = 396 // { int statfs(char *path, \
SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); }
SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \
SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \
SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \
SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \
SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \
SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \
SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \
SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \
SYS_SIGACTION = 416 // { int sigaction(int sig, \
SYS_SIGRETURN = 417 // { int sigreturn( \
SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); }
SYS_SETCONTEXT = 422 // { int setcontext( \
SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \
SYS_SWAPOFF = 424 // { int swapoff(const char *name); }
SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \
SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \
SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \
SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \
SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \
SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \
SYS_THR_EXIT = 431 // { void thr_exit(long *state); }
SYS_THR_SELF = 432 // { int thr_self(long *id); }
SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); }
SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); }
SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); }
SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); }
SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \
SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \
SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \
SYS_THR_SUSPEND = 442 // { int thr_suspend( \
SYS_THR_WAKE = 443 // { int thr_wake(long id); }
SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); }
SYS_AUDIT = 445 // { int audit(const void *record, \
SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \
SYS_GETAUID = 447 // { int getauid(uid_t *auid); }
SYS_SETAUID = 448 // { int setauid(uid_t *auid); }
SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); }
SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); }
SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \
SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \
SYS_AUDITCTL = 453 // { int auditctl(char *path); }
SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \
SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \
SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); }
SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); }
SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); }
SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \
SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); }
SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \
SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \
SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \
SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \
SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \
SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \
SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \
SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); }
SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); }
SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); }
SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \
SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); }
SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); }
SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \
SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \
SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \
SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \
SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \
SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \
SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \
SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \
SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \
SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \
SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \
SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); }
SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); }
SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \
SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \
SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \
SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \
SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \
SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); }
SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); }
SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \
SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \
SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); }
SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); }
SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); }
SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); }
SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \
SYS_CAP_ENTER = 516 // { int cap_enter(void); }
SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); }
SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); }
SYS_PDKILL = 519 // { int pdkill(int fd, int signum); }
SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); }
SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \
SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \
SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); }
SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \
SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \
SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \
SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \
SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \
SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \
SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \
SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \
SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \
SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \
SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \
SYS_ACCEPT4 = 541 // { int accept4(int s, \
SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); }
SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \
SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \
)
| _vendor/src/golang.org/x/sys/unix/zsysnum_freebsd_arm.go | 0 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.0005215994897298515,
0.00018592312699183822,
0.00016586820129305124,
0.00016948350821621716,
0.00005976685861242004
] |
{
"id": 3,
"code_window": [
"\tc.Check(err, IsNil)\n",
"\tc.Check(int(count), Equals, 98998)\n",
"\tcount, err = col.lessRowCount(sc, types.NewIntDatum(99999))\n",
"\tc.Check(err, IsNil)\n",
"\tc.Check(int(count), Equals, 99999)\n",
"}\n",
"\n",
"func (s *testStatisticsSuite) TestHistogramProtoConversion(c *C) {\n",
"\tctx := mock.NewContext()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tdatum := types.Datum{}\n",
"\tdatum.SetMysqlJSON(json.BinaryJSON{TypeCode: json.TypeCodeLiteral})\n",
"\tcollector = &SampleCollector{\n",
"\t\tCount: 1,\n",
"\t\tNullCount: 0,\n",
"\t\tSamples: []types.Datum{datum},\n",
"\t\tFMSketch: sketch,\n",
"\t}\n",
"\tcol, err = BuildColumn(ctx, bucketCount, 2, collector)\n",
"\tc.Assert(err, IsNil)\n",
"\tc.Assert(len(col.Buckets), Equals, 1)\n",
"\tc.Assert(col.Buckets[0].LowerBound, DeepEquals, col.Buckets[0].UpperBound)\n"
],
"file_path": "statistics/statistics_test.go",
"type": "add",
"edit_start_line_idx": 330
} | Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| _vendor/src/golang.org/x/text/LICENSE | 0 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.00017340255726594478,
0.000171152176335454,
0.00016781155136413872,
0.0001722424349281937,
0.0000024091943942039507
] |
{
"id": 3,
"code_window": [
"\tc.Check(err, IsNil)\n",
"\tc.Check(int(count), Equals, 98998)\n",
"\tcount, err = col.lessRowCount(sc, types.NewIntDatum(99999))\n",
"\tc.Check(err, IsNil)\n",
"\tc.Check(int(count), Equals, 99999)\n",
"}\n",
"\n",
"func (s *testStatisticsSuite) TestHistogramProtoConversion(c *C) {\n",
"\tctx := mock.NewContext()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tdatum := types.Datum{}\n",
"\tdatum.SetMysqlJSON(json.BinaryJSON{TypeCode: json.TypeCodeLiteral})\n",
"\tcollector = &SampleCollector{\n",
"\t\tCount: 1,\n",
"\t\tNullCount: 0,\n",
"\t\tSamples: []types.Datum{datum},\n",
"\t\tFMSketch: sketch,\n",
"\t}\n",
"\tcol, err = BuildColumn(ctx, bucketCount, 2, collector)\n",
"\tc.Assert(err, IsNil)\n",
"\tc.Assert(len(col.Buckets), Equals, 1)\n",
"\tc.Assert(col.Buckets[0].LowerBound, DeepEquals, col.Buckets[0].UpperBound)\n"
],
"file_path": "statistics/statistics_test.go",
"type": "add",
"edit_start_line_idx": 330
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"os"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/mock"
goctx "golang.org/x/net/context"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
logutil.InitLogger(&logutil.LogConfig{
Level: logLevel,
Format: "highlight",
})
TestingT(t)
}
func testCreateStore(c *C, name string) kv.Storage {
store, err := tikv.NewMockTikvStore()
c.Assert(err, IsNil)
return store
}
func testNewContext(d *ddl) context.Context {
ctx := mock.NewContext()
ctx.Store = d.store
return ctx
}
func testNewDDL(ctx goctx.Context, etcdCli *clientv3.Client, store kv.Storage,
infoHandle *infoschema.Handle, hook Callback, lease time.Duration) *ddl {
return newDDL(ctx, etcdCli, store, infoHandle, hook, lease, nil)
}
func getSchemaVer(c *C, ctx context.Context) int64 {
err := ctx.NewTxn()
c.Assert(err, IsNil)
m := meta.NewMeta(ctx.Txn())
ver, err := m.GetSchemaVersion()
c.Assert(err, IsNil)
return ver
}
type historyJobArgs struct {
ver int64
db *model.DBInfo
tbl *model.TableInfo
tblIDs map[int64]struct{}
}
func checkEqualTable(c *C, t1, t2 *model.TableInfo) {
c.Assert(t1.ID, Equals, t2.ID)
c.Assert(t1.Name, Equals, t2.Name)
c.Assert(t1.Charset, Equals, t2.Charset)
c.Assert(t1.Collate, Equals, t2.Collate)
c.Assert(t1.PKIsHandle, DeepEquals, t2.PKIsHandle)
c.Assert(t1.Comment, DeepEquals, t2.Comment)
c.Assert(t1.AutoIncID, DeepEquals, t2.AutoIncID)
}
func checkHistoryJob(c *C, job *model.Job) {
c.Assert(job.State, Equals, model.JobStateSynced)
}
func checkHistoryJobArgs(c *C, ctx context.Context, id int64, args *historyJobArgs) {
c.Assert(ctx.NewTxn(), IsNil)
t := meta.NewMeta(ctx.Txn())
historyJob, err := t.GetHistoryDDLJob(id)
c.Assert(err, IsNil)
if args.tbl != nil {
c.Assert(historyJob.BinlogInfo.SchemaVersion, Equals, args.ver)
checkEqualTable(c, historyJob.BinlogInfo.TableInfo, args.tbl)
return
}
// for handling schema job
c.Assert(historyJob.BinlogInfo.SchemaVersion, Equals, args.ver)
c.Assert(historyJob.BinlogInfo.DBInfo, DeepEquals, args.db)
// only for creating schema job
if args.db != nil && len(args.tblIDs) == 0 {
return
}
}
func testCreateIndex(c *C, ctx context.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job {
job := &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionAddIndex,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{unique, model.NewCIStr(indexName),
[]*ast.IndexColName{{
Column: &ast.ColumnName{Name: model.NewCIStr(colName)},
Length: types.UnspecifiedLength}}},
}
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
return job
}
func testDropIndex(c *C, ctx context.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName string) *model.Job {
job := &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionDropIndex,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{model.NewCIStr(indexName)},
}
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
return job
}
| ddl/ddl_test.go | 0 | https://github.com/pingcap/tidb/commit/835b764db34b9d6469686397a6415845ff4bbd1e | [
0.0007236324017867446,
0.00029402601649053395,
0.00016331576625816524,
0.00017937022494152188,
0.00018113115220330656
] |
{
"id": 0,
"code_window": [
"\tdefer leaktest.AfterTest(t)()\n",
"\tdefer log.Scope(t).Close(t)\n",
"\n",
"\tparams := base.TestClusterArgs{\n",
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\tDisableSpanConfigs: true, // TODO(irfansharif): #75060.\n",
"\t\t\tKnobs: base.TestingKnobs{\n",
"\t\t\t\tSpanConfig: &spanconfig.TestingKnobs{\n",
"\t\t\t\t\t// We compare job progress before and after a restore. Disable\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/backupccl/full_cluster_backup_restore_test.go",
"type": "replace",
"edit_start_line_idx": 52
} | // Copyright 2020 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package backupccl
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
_ "github.com/cockroachdb/cockroach/pkg/ccl/partitionccl"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/spanconfig"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
// Large test to ensure that all of the system table data is being restored in
// the new cluster. Ensures that all the moving pieces are working together.
func TestFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
params := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
DisableSpanConfigs: true, // TODO(irfansharif): #75060.
Knobs: base.TestingKnobs{
SpanConfig: &spanconfig.TestingKnobs{
// We compare job progress before and after a restore. Disable
// the automatic jobs checkpointing which could possibly mutate
// the progress data during the backup/restore process.
JobDisablePersistingCheckpoints: true,
},
GCJob: &sql.GCJobTestingKnobs{
DisableNewProtectedTimestampSubsystemCheck: true,
},
},
}}
const numAccounts = 10
tcBackup, sqlDB, tempDir, cleanupFn := backupRestoreTestSetupWithParams(t, singleNode, numAccounts, InitManualReplication, params)
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, params)
defer cleanupFn()
defer cleanupEmptyCluster()
backupKVDB := tcBackup.Server(0).DB()
// Closed when the restore is allowed to progress with the rest of the backup.
allowProgressAfterPreRestore := make(chan struct{})
// Closed to signal the the zones have been restored.
restoredZones := make(chan struct{})
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.afterPreRestore = func() error {
close(restoredZones)
<-allowProgressAfterPreRestore
return nil
}
return r
},
}
}
// The claim_session_id field in jobs is a uuid and so needs to be excluded
// when comparing jobs pre/post restore.
const jobsQuery = `
SELECT id, status, created, payload, progress, created_by_type, created_by_id, claim_instance_id
FROM system.jobs
`
// Pause SQL Stats compaction job to ensure the test is deterministic.
sqlDB.Exec(t, `PAUSE SCHEDULES SELECT id FROM [SHOW SCHEDULES FOR SQL STATISTICS]`)
// Disable automatic stats collection on the backup and restoring clusters to ensure
// the test is deterministic.
sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=false`)
sqlDBRestore.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=false`)
// Create some other descriptors as well.
sqlDB.Exec(t, `
USE data;
CREATE SCHEMA test_data_schema;
CREATE TABLE data.test_data_schema.test_table (a int);
INSERT INTO data.test_data_schema.test_table VALUES (1), (2);
USE defaultdb;
CREATE SCHEMA test_schema;
CREATE TABLE defaultdb.test_schema.test_table (a int);
INSERT INTO defaultdb.test_schema.test_table VALUES (1), (2);
CREATE TABLE defaultdb.foo (a int);
CREATE TYPE greeting AS ENUM ('hi');
CREATE TABLE welcomes (a greeting);
CREATE DATABASE data2;
USE data2;
CREATE SCHEMA empty_schema;
CREATE TABLE data2.foo (a int);
`)
tableDesc := desctestutils.TestingGetPublicTableDescriptor(backupKVDB, keys.SystemSQLCodec, "data2", "foo")
// Store the highest user-table ID for later assertions.
maxBackupTableID := tableDesc.GetID()
// Setup the system systemTablesToVerify to ensure that they are copied to the new cluster.
// Populate system.users.
numUsers := 1000
if util.RaceEnabled {
numUsers = 10
}
for i := 0; i < numUsers; i++ {
sqlDB.Exec(t, fmt.Sprintf("CREATE USER maxroach%d", i))
sqlDB.Exec(t, fmt.Sprintf("ALTER USER maxroach%d CREATEDB", i))
}
// Populate system.zones.
sqlDB.Exec(t, `ALTER TABLE data.bank CONFIGURE ZONE USING gc.ttlseconds = 3600`)
sqlDB.Exec(t, `ALTER TABLE defaultdb.foo CONFIGURE ZONE USING gc.ttlseconds = 45`)
sqlDB.Exec(t, `ALTER DATABASE data2 CONFIGURE ZONE USING gc.ttlseconds = 900`)
// Populate system.jobs.
// Note: this is not the backup under test, this just serves as a job which
// should appear in the restore.
// This job will eventually fail since it will run from a new cluster.
sqlDB.Exec(t, `BACKUP data.bank TO 'nodelocal://0/throwawayjob'`)
preBackupJobs := sqlDB.QueryStr(t, jobsQuery)
// Populate system.settings.
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_io_write.concurrent_addsstable_requests = 5`)
sqlDB.Exec(t, `INSERT INTO system.ui (key, value, "lastUpdated") VALUES ($1, $2, now())`, "some_key", "some_val")
// Populate system.comments.
sqlDB.Exec(t, `COMMENT ON TABLE data.bank IS 'table comment string'`)
sqlDB.Exec(t, `COMMENT ON DATABASE data IS 'database comment string'`)
sqlDB.Exec(t,
`INSERT INTO system.locations ("localityKey", "localityValue", latitude, longitude) VALUES ($1, $2, $3, $4)`,
"city", "New York City", 40.71427, -74.00597,
)
// Populate system.role_members.
sqlDB.Exec(t, `CREATE ROLE system_ops;`)
sqlDB.Exec(t, `GRANT system_ops TO maxroach1;`)
// Populate system.scheduled_jobs table with a first run in the future to prevent immediate adoption.
firstRun := timeutil.Now().Add(time.Hour).Format(timeutil.TimestampWithoutTZFormat)
sqlDB.Exec(t, `CREATE SCHEDULE FOR BACKUP data.bank INTO $1 RECURRING '@hourly' FULL BACKUP ALWAYS WITH SCHEDULE OPTIONS first_run = $2`, localFoo, firstRun)
sqlDB.Exec(t, `PAUSE SCHEDULES SELECT id FROM [SHOW SCHEDULES FOR BACKUP]`)
injectStats(t, sqlDB, "data.bank", "id")
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
// Create a bunch of user tables on the restoring cluster that we're going
// to delete.
numTables := 50
if util.RaceEnabled {
numTables = 2
}
for i := 0; i < numTables; i++ {
sqlDBRestore.Exec(t, `CREATE DATABASE db_to_drop`)
sqlDBRestore.Exec(t, `CREATE TABLE db_to_drop.table_to_drop (a int)`)
sqlDBRestore.Exec(t, `ALTER TABLE db_to_drop.table_to_drop CONFIGURE ZONE USING gc.ttlseconds=1`)
sqlDBRestore.Exec(t, `DROP DATABASE db_to_drop`)
}
// Wait for the GC job to finish to ensure the descriptors no longer exist.
sqlDBRestore.CheckQueryResultsRetry(
t, "SELECT count(*) FROM [SHOW JOBS] WHERE job_type = 'SCHEMA CHANGE GC' AND status = 'running'",
[][]string{{"0"}},
)
doneRestore := make(chan struct{})
go func() {
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
close(doneRestore)
}()
// Check that zones are restored during pre-restore.
t.Run("ensure zones are restored during pre-restore", func(t *testing.T) {
<-restoredZones
// Not specifying the schema makes the query search using defaultdb first.
// which ends up returning the error
// pq: database "defaultdb" is offline: restoring
checkZones := "SELECT * FROM system.public.zones"
sqlDBRestore.CheckQueryResults(t, checkZones, sqlDB.QueryStr(t, checkZones))
// Check that the user tables are still offline.
sqlDBRestore.ExpectErr(t, "database \"data\" is offline: restoring", "SELECT * FROM data.public.bank")
// Check there is no data in the span that we expect user data to be imported.
store := tcRestore.GetFirstStoreFromServer(t, 0)
startKey := keys.SystemSQLCodec.TablePrefix(bootstrap.TestingUserDescID(0))
endKey := keys.SystemSQLCodec.TablePrefix(uint32(maxBackupTableID)).PrefixEnd()
it := store.Engine().NewMVCCIterator(storage.MVCCKeyAndIntentsIterKind, storage.IterOptions{
UpperBound: endKey,
})
defer it.Close()
it.SeekGE(storage.MVCCKey{Key: startKey})
hasKey, err := it.Valid()
require.NoError(t, err)
require.False(t, hasKey)
})
// Allow the restore to make progress after we've checked the pre-restore
// stage.
close(allowProgressAfterPreRestore)
// Wait for the restore to finish before checking that it did the right thing.
<-doneRestore
t.Run("ensure all databases restored", func(t *testing.T) {
sqlDBRestore.CheckQueryResults(t,
`SELECT database_name, owner FROM [SHOW DATABASES]`,
[][]string{
{"data", security.RootUser},
{"data2", security.RootUser},
{"defaultdb", security.RootUser},
{"postgres", security.RootUser},
{"system", security.NodeUser},
})
})
t.Run("ensure all schemas are restored", func(t *testing.T) {
expectedSchemas := map[string][][]string{
"defaultdb": {{"crdb_internal"}, {"information_schema"}, {"pg_catalog"}, {"pg_extension"}, {"public"}, {"test_schema"}},
"data": {{"crdb_internal"}, {"information_schema"}, {"pg_catalog"}, {"pg_extension"}, {"public"}, {"test_data_schema"}},
"data2": {{"crdb_internal"}, {"empty_schema"}, {"information_schema"}, {"pg_catalog"}, {"pg_extension"}, {"public"}},
}
for dbName, expectedSchemas := range expectedSchemas {
sqlDBRestore.CheckQueryResults(t,
fmt.Sprintf(`USE %s; SELECT schema_name FROM [SHOW SCHEMAS] ORDER BY schema_name;`, dbName),
expectedSchemas)
}
})
t.Run("ensure system table data restored", func(t *testing.T) {
// Note the absence of the jobs table. Jobs are tested by another test as
// jobs are created during the RESTORE process.
systemTablesToVerify := []string{
systemschema.CommentsTable.GetName(),
systemschema.LocationsTable.GetName(),
systemschema.RoleMembersTable.GetName(),
systemschema.RoleOptionsTable.GetName(),
systemschema.SettingsTable.GetName(),
systemschema.TableStatisticsTable.GetName(),
systemschema.UITable.GetName(),
systemschema.UsersTable.GetName(),
systemschema.ScheduledJobsTable.GetName(),
}
verificationQueries := make([]string, len(systemTablesToVerify))
// Populate the list of tables we expect to be restored as well as queries
// that can be used to ensure that data in those tables is restored.
for i, table := range systemTablesToVerify {
switch table {
case systemschema.TableStatisticsTable.GetName():
// createdAt and statisticsID are re-generated on RESTORE.
query := `SELECT "tableID", name, "columnIDs", "rowCount" FROM system.table_statistics`
verificationQueries[i] = query
case systemschema.SettingsTable.GetName():
// We don't include the cluster version.
query := fmt.Sprintf("SELECT * FROM system.%s WHERE name <> 'version'", table)
verificationQueries[i] = query
default:
query := fmt.Sprintf("SELECT * FROM system.%s", table)
verificationQueries[i] = query
}
}
for _, read := range verificationQueries {
sqlDBRestore.CheckQueryResults(t, read, sqlDB.QueryStr(t, read))
}
})
t.Run("ensure table IDs have not changed", func(t *testing.T) {
// Check that all tables have been restored. DISTINCT is needed in order to
// deal with the inclusion of schemas in the system.namespace table.
tableIDCheck := "SELECT * FROM system.namespace ORDER BY id"
sqlDBRestore.CheckQueryResults(t, tableIDCheck, sqlDB.QueryStr(t, tableIDCheck))
})
t.Run("ensure user table data restored", func(t *testing.T) {
expectedUserTables := [][]string{
{"data", "bank"},
{"data2", "foo"},
{"defaultdb", "foo"},
}
for _, table := range expectedUserTables {
query := fmt.Sprintf("SELECT * FROM %s.%s", table[0], table[1])
sqlDBRestore.CheckQueryResults(t, query, sqlDB.QueryStr(t, query))
}
})
t.Run("ensure that grants are restored", func(t *testing.T) {
grantCheck := "use system; SHOW grants"
sqlDBRestore.CheckQueryResults(t, grantCheck, sqlDB.QueryStr(t, grantCheck))
grantCheck = "use data; SHOW grants"
sqlDBRestore.CheckQueryResults(t, grantCheck, sqlDB.QueryStr(t, grantCheck))
})
t.Run("ensure that jobs are restored", func(t *testing.T) {
// Ensure that the jobs in the RESTORE cluster is a superset of the jobs
// that were in the BACKUP cluster (before the full cluster BACKUP job was
// run). There may be more jobs now because the restore can run jobs of
// its own.
newJobsStr := sqlDBRestore.QueryStr(t, jobsQuery)
newJobs := make(map[string][]string)
for _, newJob := range newJobsStr {
// The first element of the slice is the job id.
newJobs[newJob[0]] = newJob
}
for _, oldJob := range preBackupJobs {
newJob, ok := newJobs[oldJob[0]]
if !ok {
t.Errorf("Expected to find job %+v in RESTORE cluster, but not found", oldJob)
}
require.Equal(t, oldJob, newJob)
}
})
t.Run("zone_configs", func(t *testing.T) {
// The restored zones should be a superset of the zones in the backed up
// cluster.
zoneIDsResult := sqlDB.QueryStr(t, `SELECT id FROM system.zones`)
var q strings.Builder
q.WriteString("SELECT * FROM system.zones WHERE id IN (")
for i, restoreZoneIDRow := range zoneIDsResult {
if i > 0 {
q.WriteString(", ")
}
q.WriteString(restoreZoneIDRow[0])
}
q.WriteString(")")
sqlDBRestore.CheckQueryResults(t, q.String(), sqlDB.QueryStr(t, q.String()))
})
t.Run("ensure that tables can be created at the excepted ID", func(t *testing.T) {
var maxID, dbID, tableID int
sqlDBRestore.QueryRow(t, "SELECT max(id) FROM system.namespace").Scan(&maxID)
dbName, tableName := "new_db", "new_table"
sqlDBRestore.Exec(t, fmt.Sprintf("CREATE DATABASE %s", dbName))
sqlDBRestore.Exec(t, fmt.Sprintf("CREATE TABLE %s.%s (a int)", dbName, tableName))
sqlDBRestore.QueryRow(t,
fmt.Sprintf("SELECT id FROM system.namespace WHERE name = '%s'", dbName)).Scan(&dbID)
require.True(t, dbID > maxID)
sqlDBRestore.QueryRow(t,
fmt.Sprintf("SELECT id FROM system.namespace WHERE name = '%s'", tableName)).Scan(&tableID)
require.True(t, tableID > maxID)
require.NotEqual(t, dbID, tableID)
})
}
// TestSingletonSpanConfigJobPostRestore ensures that there's a single span
// config reconciliation job running post restore.
func TestSingletonSpanConfigJobPostRestore(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
params := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(),
},
},
}
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetupWithParams(t, singleNode, numAccounts, InitManualReplication, params)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, params)
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
const numRunningReconciliationJobQuery = `
SELECT count(*) FROM [SHOW AUTOMATIC JOBS]
WHERE job_type = 'AUTO SPAN CONFIG RECONCILIATION' AND status = 'running'
`
testutils.SucceedsSoon(t, func() error {
var numRunningJobs int
sqlDBRestore.QueryRow(t, numRunningReconciliationJobQuery).Scan(&numRunningJobs)
if numRunningJobs != 1 {
return errors.Newf("expected single running reconciliation job, found %d", numRunningJobs)
}
return nil
})
}
func TestIncrementalFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
const incrementalBackupLocation = "nodelocal://0/inc-full-backup"
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, "CREATE USER maxroach1")
sqlDB.Exec(t, `BACKUP TO $1 INCREMENTAL FROM $2`, incrementalBackupLocation, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1, $2`, localFoo, incrementalBackupLocation)
checkQuery := "SELECT * FROM system.users"
sqlDBRestore.CheckQueryResults(t, checkQuery, sqlDB.QueryStr(t, checkQuery))
}
// TestEmptyFullClusterResotre ensures that we can backup and restore a full
// cluster backup with only metadata (no user data). Regression test for #49573.
func TestEmptyFullClusterRestore(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `CREATE USER alice`)
sqlDB.Exec(t, `CREATE USER bob`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
checkQuery := "SELECT * FROM system.users"
sqlDBRestore.CheckQueryResults(t, checkQuery, sqlDB.QueryStr(t, checkQuery))
}
// Regression test for #50561.
func TestClusterRestoreEmptyDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `CREATE DATABASE some_db`)
sqlDB.Exec(t, `CREATE DATABASE some_db_2`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
checkQuery := "SHOW DATABASES"
sqlDBRestore.CheckQueryResults(t, checkQuery, sqlDB.QueryStr(t, checkQuery))
}
func TestDisallowFullClusterRestoreOnNonFreshCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `CREATE DATABASE foo`)
sqlDBRestore.ExpectErr(t,
"pq: full cluster restore can only be run on a cluster with no tables or databases but found 2 descriptors: foo, public",
`RESTORE FROM $1`, localFoo,
)
}
func TestClusterRestoreSystemTableOrdering(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode,
tempDir,
InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
restoredSystemTables := make([]string, 0)
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.duringSystemTableRestoration = func(systemTableName string) error {
restoredSystemTables = append(restoredSystemTables, systemTableName)
return nil
}
return r
},
}
}
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
// Check that the settings table is the last of the system tables to be
// restored.
require.Equal(t, restoredSystemTables[len(restoredSystemTables)-1],
systemschema.SettingsTable.GetName())
}
func TestDisallowFullClusterRestoreOfNonFullBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP data.bank TO $1`, localFoo)
sqlDBRestore.ExpectErr(
t, "pq: full cluster RESTORE can only be used on full cluster BACKUP files",
`RESTORE FROM $1`, localFoo,
)
}
func TestAllowNonFullClusterRestoreOfFullBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE data2`)
sqlDB.Exec(t, `RESTORE data.bank FROM $1 WITH into_db='data2'`, localFoo)
checkResults := "SELECT * FROM data.bank"
sqlDB.CheckQueryResults(t, checkResults, sqlDB.QueryStr(t, checkResults))
}
func TestRestoreFromFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, `DROP DATABASE data`)
t.Run("database", func(t *testing.T) {
sqlDB.Exec(t, `RESTORE DATABASE data FROM $1`, localFoo)
defer sqlDB.Exec(t, `DROP DATABASE data`)
sqlDB.CheckQueryResults(t, "SELECT count(*) FROM data.bank", [][]string{{"10"}})
})
t.Run("table", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE data`)
defer sqlDB.Exec(t, `DROP DATABASE data`)
sqlDB.Exec(t, `RESTORE data.bank FROM $1`, localFoo)
sqlDB.CheckQueryResults(t, "SELECT count(*) FROM data.bank", [][]string{{"10"}})
})
t.Run("tables", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE data`)
defer sqlDB.Exec(t, `DROP DATABASE data`)
sqlDB.Exec(t, `RESTORE data.* FROM $1`, localFoo)
sqlDB.CheckQueryResults(t, "SELECT count(*) FROM data.bank", [][]string{{"10"}})
})
t.Run("system tables", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE temp_sys`)
sqlDB.Exec(t, `RESTORE system.users FROM $1 WITH into_db='temp_sys'`, localFoo)
sqlDB.CheckQueryResults(t, "SELECT * FROM temp_sys.users", sqlDB.QueryStr(t, "SELECT * FROM system.users"))
})
}
func TestCreateDBAndTableIncrementalFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, 0, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE foo`)
sqlDB.Exec(t, `CREATE TABLE foo.bar (a int)`)
// Ensure that the new backup succeeds.
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
}
// TestClusterRestoreFailCleanup tests that a failed RESTORE is cleaned up.
func TestClusterRestoreFailCleanup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t, "takes >1 min under race")
params := base.TestServerArgs{}
// Disable GC job so that the final check of crdb_internal.tables is
// guaranteed to not be cleaned up. Although this was never observed by a
// stress test, it is here for safety.
blockCh := make(chan struct{})
defer close(blockCh)
params.Knobs.GCJob = &sql.GCJobTestingKnobs{
RunBeforeResume: func(_ jobspb.JobID) error { <-blockCh; return nil },
}
const numAccounts = 1000
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
// Setup the system systemTablesToVerify to ensure that they are copied to the new cluster.
// Populate system.users.
for i := 0; i < 1000; i++ {
sqlDB.Exec(t, fmt.Sprintf("CREATE USER maxroach%d", i))
}
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/missing-ssts'`)
// Bugger the backup by removing the SST files. (Note this messes up all of
// the backups, but there is only one at this point.)
if err := filepath.Walk(tempDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
t.Fatal(err)
}
if info.Name() == backupManifestName || !strings.HasSuffix(path, ".sst") {
return nil
}
return os.Remove(path)
}); err != nil {
t.Fatal(err)
}
// Create a non-corrupted backup.
// Populate system.jobs.
// Note: this is not the backup under test, this just serves as a job which
// should appear in the restore.
// This job will eventually fail since it will run from a new cluster.
sqlDB.Exec(t, `BACKUP data.bank TO 'nodelocal://0/throwawayjob'`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
t.Run("during restoration of data", func(t *testing.T) {
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
sqlDBRestore.ExpectErr(t, "sst: no such file", `RESTORE FROM 'nodelocal://1/missing-ssts'`)
// Verify the failed RESTORE added some DROP tables.
// Note that the system tables here correspond to the temporary tables
// imported, not the system tables themselves.
sqlDBRestore.CheckQueryResults(t,
`SELECT name FROM system.crdb_internal.tables WHERE state = 'DROP' ORDER BY name`,
[][]string{
{"bank"},
{"comments"},
{"database_role_settings"},
{"jobs"},
{"locations"},
{"role_members"},
{"role_options"},
{"scheduled_jobs"},
{"settings"},
{"tenant_settings"},
{"ui"},
{"users"},
{"zones"},
},
)
})
// This test retries the job (by injected a retry error) after restoring a
// every system table that has a custom restore function. This tried to tease
// out any errors that may occur if some of the system table restoration
// functions are not idempotent.
t.Run("retry-during-custom-system-table-restore", func(t *testing.T) {
customRestoreSystemTables := make([]string, 0)
for table, config := range systemTableBackupConfiguration {
if config.customRestoreFunc != nil {
customRestoreSystemTables = append(customRestoreSystemTables, table)
}
}
for _, customRestoreSystemTable := range customRestoreSystemTables {
t.Run(customRestoreSystemTable, func(t *testing.T) {
args := base.TestClusterArgs{ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals()},
}}
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, args)
defer cleanupEmptyCluster()
// Inject a retry error, that returns once.
alreadyErrored := false
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.duringSystemTableRestoration = func(systemTableName string) error {
if !alreadyErrored && systemTableName == customRestoreSystemTable {
alreadyErrored = true
return jobs.MarkAsRetryJobError(errors.New("injected error"))
}
return nil
}
return r
},
}
}
// The initial restore will return an error, and restart.
sqlDBRestore.ExpectErr(t, `running execution from '.*' to '.*' on \d+ failed: injected error`, `RESTORE FROM $1`, localFoo)
// Reduce retry delays.
sqlDBRestore.Exec(t, "SET CLUSTER SETTING jobs.registry.retry.initial_delay = '1ms'")
// Expect the restore to succeed.
sqlDBRestore.CheckQueryResultsRetry(t,
`SELECT count(*) FROM [SHOW JOBS] WHERE job_type = 'RESTORE' AND status = 'succeeded'`,
[][]string{{"1"}})
})
}
})
t.Run("during system table restoration", func(t *testing.T) {
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
// Bugger the backup by injecting a failure while restoring the system data.
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.duringSystemTableRestoration = func(_ string) error {
return errors.New("injected error")
}
return r
},
}
}
sqlDBRestore.ExpectErr(t, "injected error", `RESTORE FROM $1`, localFoo)
// Verify the failed RESTORE added some DROP tables.
// Note that the system tables here correspond to the temporary tables
// imported, not the system tables themselves.
sqlDBRestore.CheckQueryResults(t,
`SELECT name FROM system.crdb_internal.tables WHERE state = 'DROP' ORDER BY name`,
[][]string{
{"bank"},
{"comments"},
{"database_role_settings"},
{"jobs"},
{"locations"},
{"role_members"},
{"role_options"},
{"scheduled_jobs"},
{"settings"},
{"tenant_settings"},
{"ui"},
{"users"},
{"zones"},
},
)
})
t.Run("after offline tables", func(t *testing.T) {
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
// Bugger the backup by injecting a failure while restoring the system data.
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.afterOfflineTableCreation = func() error {
return errors.New("injected error")
}
return r
},
}
}
sqlDBRestore.ExpectErr(t, "injected error", `RESTORE FROM $1`, localFoo)
})
}
// A regression test where dropped descriptors would appear in the set of
// `Descriptors`.
func TestDropDatabaseRevisionHistory(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 1
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE same_name_db;`)
sqlDB.Exec(t, `DROP DATABASE same_name_db;`)
sqlDB.Exec(t, `CREATE DATABASE same_name_db;`)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.ExpectErr(t, `database "same_name_db" already exists`, `CREATE DATABASE same_name_db`)
}
// TestClusterRevisionHistory tests that cluster backups can be taken with
// revision_history and correctly restore into various points in time.
func TestClusterRevisionHistory(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
type testCase struct {
ts string
check func(t *testing.T, runner *sqlutils.SQLRunner)
}
testCases := make([]testCase, 0)
ts := make([]string, 6)
var tc testCase
const numAccounts = 1
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `CREATE DATABASE d1`)
sqlDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[0])
tc = testCase{
ts: ts[0],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.ExpectErr(t, `database "d1" already exists`, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE DATABASE d2`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `CREATE DATABASE d2`)
sqlDB.Exec(t, `CREATE TABLE d2.t (a INT)`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[1])
tc = testCase{
ts: ts[1],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
// Expect both databases to exist at this point.
checkSQLDB.ExpectErr(t, `database "d1" already exists`, `CREATE DATABASE d1`)
checkSQLDB.ExpectErr(t, `database "d2" already exists`, `CREATE DATABASE d2`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `DROP DATABASE d1`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[2])
tc = testCase{
ts: ts[2],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.Exec(t, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.ExpectErr(t, `database "d2" already exists`, `CREATE DATABASE d2`)
checkSQLDB.ExpectErr(t, `relation "d2.public.t" already exists`, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[3])
sqlDB.Exec(t, `DROP DATABASE d2;`)
tc = testCase{
ts: ts[3],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.Exec(t, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.ExpectErr(t, `database "d2" already exists`, `CREATE DATABASE d2`)
checkSQLDB.ExpectErr(t, `relation "d2.public.t" already exists`, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE d1`)
sqlDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[4])
tc = testCase{
ts: ts[4],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.ExpectErr(t, `database "d1" already exists`, `CREATE DATABASE d1`)
checkSQLDB.ExpectErr(t, `relation "d1.public.t" already exists`, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.Exec(t, `CREATE DATABASE d2`)
checkSQLDB.Exec(t, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `DROP DATABASE d1`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[5])
tc = testCase{
ts: ts[5],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.Exec(t, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.Exec(t, `CREATE DATABASE d2`)
checkSQLDB.Exec(t, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
for i, testCase := range testCases {
t.Run(fmt.Sprintf("t%d", i), func(t *testing.T) {
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
sqlDBRestore.Exec(t, `RESTORE FROM $1 AS OF SYSTEM TIME `+testCase.ts, localFoo)
testCase.check(t, sqlDBRestore)
})
}
}
// TestReintroduceOfflineSpans is a regression test for #62564, which tracks a
// bug where AddSSTable requests to OFFLINE tables may be missed by cluster
// incremental backups since they can write at a timestamp older than the last
// backup.
func TestReintroduceOfflineSpans(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t, "likely slow under race")
// Block restores on the source cluster.
blockDBRestore := make(chan struct{})
dbRestoreStarted := make(chan struct{})
// The data is split such that there will be 10 span entries to process.
restoreBlockEntiresThreshold := 4
entriesCount := 0
params := base.TestClusterArgs{}
knobs := base.TestingKnobs{
DistSQL: &execinfra.TestingKnobs{
BackupRestoreTestingKnobs: &sql.BackupRestoreTestingKnobs{
RunAfterProcessingRestoreSpanEntry: func(_ context.Context) {
if entriesCount == 0 {
close(dbRestoreStarted)
}
if entriesCount == restoreBlockEntiresThreshold {
<-blockDBRestore
}
entriesCount++
},
}},
}
params.ServerArgs.Knobs = knobs
const numAccounts = 1000
ctx := context.Background()
_, srcDB, tempDir, cleanupSrc := backupRestoreTestSetupWithParams(t, singleNode, numAccounts, InitManualReplication, params)
defer cleanupSrc()
dbBackupLoc := "nodelocal://0/my_db_backup"
clusterBackupLoc := "nodelocal://0/my_cluster_backup"
// the small test-case will get entirely buffered/merged by small-file merging
// and not report any progress in the meantime unless it is disabled.
srcDB.Exec(t, `SET CLUSTER SETTING bulkio.backup.file_size = '1'`)
// Test servers only have 128MB root memory monitors, reduce the buffer size
// so we don't see memory errors.
srcDB.Exec(t, `SET CLUSTER SETTING bulkio.backup.merge_file_buffer_size = '1MiB'`)
// Take a backup that we'll use to create an OFFLINE descriptor.
srcDB.Exec(t, `CREATE INDEX new_idx ON data.bank (balance)`)
srcDB.Exec(t, `BACKUP DATABASE data TO $1 WITH revision_history`, dbBackupLoc)
srcDB.Exec(t, `CREATE DATABASE restoredb;`)
// Take a base full backup.
srcDB.Exec(t, `BACKUP TO $1 WITH revision_history`, clusterBackupLoc)
var g errgroup.Group
g.Go(func() error {
_, err := srcDB.DB.ExecContext(ctx, `RESTORE data.bank FROM $1 WITH into_db='restoredb'`, dbBackupLoc)
return err
})
// Take an incremental backup after the database restore starts.
<-dbRestoreStarted
srcDB.Exec(t, `BACKUP TO $1 WITH revision_history`, clusterBackupLoc)
var tsMidRestore string
srcDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&tsMidRestore)
// Allow the restore to finish. This will issue AddSSTable requests at a
// timestamp that is before the last incremental we just took.
close(blockDBRestore)
// Wait for the database restore to finish, and take another incremental
// backup that will miss the AddSSTable writes.
require.NoError(t, g.Wait())
var tsBefore string
srcDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&tsBefore)
// Drop an index on the restored table to ensure that the dropped index was
// also re-included.
srcDB.Exec(t, `DROP INDEX new_idx`)
srcDB.Exec(t, `BACKUP TO $1 WITH revision_history`, clusterBackupLoc)
t.Run("spans-reintroduced", func(t *testing.T) {
_, destDB, cleanupDst := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupDst()
// Restore the incremental backup chain that has missing writes.
destDB.Exec(t, `RESTORE FROM $1 AS OF SYSTEM TIME `+tsBefore, clusterBackupLoc)
// Assert that the restored database has the same number of rows in both the
// source and destination cluster.
checkQuery := `SELECT count(*) FROM restoredb.bank AS OF SYSTEM TIME ` + tsBefore
expectedCount := srcDB.QueryStr(t, checkQuery)
destDB.CheckQueryResults(t, `SELECT count(*) FROM restoredb.bank`, expectedCount)
checkQuery = `SELECT count(*) FROM restoredb.bank@new_idx AS OF SYSTEM TIME ` + tsBefore
expectedCount = srcDB.QueryStr(t, checkQuery)
destDB.CheckQueryResults(t, `SELECT count(*) FROM restoredb.bank@new_idx`, expectedCount)
})
t.Run("restore-canceled", func(t *testing.T) {
args := base.TestClusterArgs{ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals()}},
}
_, destDB, cleanupDst := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, args)
defer cleanupDst()
destDB.Exec(t, `RESTORE FROM $1 AS OF SYSTEM TIME `+tsMidRestore, clusterBackupLoc)
// Wait for the cluster restore job to finish, as well as the restored RESTORE TABLE
// job to cancel.
destDB.CheckQueryResultsRetry(t, `
SELECT description, status FROM [SHOW JOBS]
WHERE job_type = 'RESTORE' AND status NOT IN ('succeeded', 'canceled')`,
[][]string{},
)
// The cluster restore should succeed, but the table restore should have failed.
destDB.CheckQueryResults(t,
`SELECT status, count(*) FROM [SHOW JOBS] WHERE job_type = 'RESTORE' GROUP BY status ORDER BY status`,
[][]string{{"canceled", "1"}, {"succeeded", "1"}})
destDB.ExpectErr(t, `relation "restoredb.bank" does not exist`, `SELECT count(*) FROM restoredb.bank`)
})
}
// TestClusterRevisionDoesNotBackupOptOutSystemTables is a regression test for a
// bug that was introduced where we would include revisions for descriptors that
// are not supposed to be backed up egs: system tables that are opted out.
//
// The test would previously fail with an error that the descriptors table (an
// opt out system table) did not have a span covering the time between the
// `EndTime` of the first backup and second backup, since there are no revisions
// to it between those backups.
func TestClusterRevisionDoesNotBackupOptOutSystemTables(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
tc, _, _, cleanup := backupRestoreTestSetup(t, singleNode, 10, InitManualReplication)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
defer cleanup()
sqlDB.Exec(t, `CREATE DATABASE test;`)
sqlDB.Exec(t, `USE test;`)
sqlDB.Exec(t, `CREATE TABLE foo (id INT);`)
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
sqlDB.Exec(t, `CREATE TABLE bar (id INT);`)
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
}
func TestRestoreWithRecreatedDefaultDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `
DROP DATABASE defaultdb;
CREATE DATABASE defaultdb;
`)
row := sqlDB.QueryRow(t, `SELECT id FROM system.namespace WHERE name = 'defaultdb'`)
var expectedDefaultDBID string
row.Scan(&expectedDefaultDBID)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SELECT * FROM system.namespace WHERE name = 'defaultdb'`, [][]string{
{"0", "0", "defaultdb", expectedDefaultDBID},
})
}
func TestRestoreWithDroppedDefaultDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `
DROP DATABASE defaultdb;
`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SELECT count(*) FROM system.namespace WHERE name = 'defaultdb'`, [][]string{
{"0"},
})
}
func TestRestoreToClusterWithDroppedDefaultDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
expectedRow := sqlDB.QueryRow(t, `SELECT * FROM system.namespace WHERE name = 'defaultdb'`)
var parentID, parentSchemaID, ID int
var name string
expectedRow.Scan(&parentID, &parentSchemaID, &name, &ID)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `
DROP DATABASE defaultdb;
`)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SELECT * FROM system.namespace WHERE name = 'defaultdb'`, [][]string{
{fmt.Sprint(parentID), fmt.Sprint(parentSchemaID), name, fmt.Sprint(ID)},
})
}
| pkg/ccl/backupccl/full_cluster_backup_restore_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.9978651404380798,
0.05955187603831291,
0.00016103382222354412,
0.00017730621038936079,
0.22828781604766846
] |
{
"id": 0,
"code_window": [
"\tdefer leaktest.AfterTest(t)()\n",
"\tdefer log.Scope(t).Close(t)\n",
"\n",
"\tparams := base.TestClusterArgs{\n",
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\tDisableSpanConfigs: true, // TODO(irfansharif): #75060.\n",
"\t\t\tKnobs: base.TestingKnobs{\n",
"\t\t\t\tSpanConfig: &spanconfig.TestingKnobs{\n",
"\t\t\t\t\t// We compare job progress before and after a restore. Disable\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/backupccl/full_cluster_backup_restore_test.go",
"type": "replace",
"edit_start_line_idx": 52
} | <?xml version="1.0" encoding="UTF-8"?>
<svg width="16px" height="16px" viewBox="0 0 16 16" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!-- Generator: Sketch 48.2 (47327) - http://www.bohemiancoding.com/sketch -->
<title>Expand icon</title>
<desc>Created with Sketch.</desc>
<defs></defs>
<g id="Expand-job-B" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd" transform="translate(-725.000000, -207.000000)">
<g id="Expand-icon" transform="translate(725.000000, 207.000000)">
<rect id="Rectangle-14" fill="#C8CBD4" x="0" y="0" width="16" height="16" rx="8"></rect>
<g id="select-83" transform="translate(5.714286, 3.428571)" fill="#FFFFFF" fill-rule="nonzero">
<polygon id="Shape" points="0 3.35860058 4.57142857 3.35860058 2.28571429 0"></polygon>
<polygon id="Shape" points="2.28571429 9.14285714 4.57142857 5.78425656 0 5.78425656"></polygon>
</g>
</g>
</g>
</svg> | pkg/ui/workspaces/db-console/assets/expand.svg | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.00018071506929118186,
0.00017399636271875352,
0.00016727765614632517,
0.00017399636271875352,
0.000006718706572428346
] |
{
"id": 0,
"code_window": [
"\tdefer leaktest.AfterTest(t)()\n",
"\tdefer log.Scope(t).Close(t)\n",
"\n",
"\tparams := base.TestClusterArgs{\n",
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\tDisableSpanConfigs: true, // TODO(irfansharif): #75060.\n",
"\t\t\tKnobs: base.TestingKnobs{\n",
"\t\t\t\tSpanConfig: &spanconfig.TestingKnobs{\n",
"\t\t\t\t\t// We compare job progress before and after a restore. Disable\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/backupccl/full_cluster_backup_restore_test.go",
"type": "replace",
"edit_start_line_idx": 52
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colinfo_test
import (
"bytes"
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo/colinfotestutils"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
)
// fakeSource represents a fake column resolution environment for tests.
type fakeSource struct {
t *testing.T
knownTables []knownTable
}
type knownTable struct {
srcName tree.TableName
columns []tree.Name
}
type colsRes tree.NameList
func (c colsRes) ColumnSourceMeta() {}
// FindSourceMatchingName is part of the ColumnItemResolver interface.
func (f *fakeSource) FindSourceMatchingName(
_ context.Context, tn tree.TableName,
) (
res colinfo.NumResolutionResults,
prefix *tree.TableName,
srcMeta colinfo.ColumnSourceMeta,
err error,
) {
defer func() {
f.t.Logf("FindSourceMatchingName(%s) -> res %d prefix %s meta %v err %v",
&tn, res, prefix, srcMeta, err)
}()
found := false
var columns colsRes
for i := range f.knownTables {
t := &f.knownTables[i]
if t.srcName.ObjectName != tn.ObjectName {
continue
}
if tn.ExplicitSchema {
if !t.srcName.ExplicitSchema || t.srcName.SchemaName != tn.SchemaName {
continue
}
if tn.ExplicitCatalog {
if !t.srcName.ExplicitCatalog || t.srcName.CatalogName != tn.CatalogName {
continue
}
}
}
if found {
return colinfo.MoreThanOne, nil, nil, fmt.Errorf("ambiguous source name: %q", &tn)
}
found = true
prefix = &t.srcName
columns = colsRes(t.columns)
}
if !found {
return colinfo.NoResults, nil, nil, nil
}
return colinfo.ExactlyOne, prefix, columns, nil
}
// FindSourceProvidingColumn is part of the ColumnItemResolver interface.
func (f *fakeSource) FindSourceProvidingColumn(
_ context.Context, col tree.Name,
) (prefix *tree.TableName, srcMeta colinfo.ColumnSourceMeta, colHint int, err error) {
defer func() {
f.t.Logf("FindSourceProvidingColumn(%s) -> prefix %s meta %v hint %d err %v",
col, prefix, srcMeta, colHint, err)
}()
found := false
var columns colsRes
for i := range f.knownTables {
t := &f.knownTables[i]
for c, cn := range t.columns {
if cn != col {
continue
}
if found {
return nil, nil, -1, f.ambiguousColumnErr(col)
}
found = true
colHint = c
columns = colsRes(t.columns)
prefix = &t.srcName
break
}
}
if !found {
return nil, nil, -1, fmt.Errorf("column %q does not exist", &col)
}
return prefix, columns, colHint, nil
}
func (f *fakeSource) ambiguousColumnErr(col tree.Name) error {
var candidates bytes.Buffer
sep := ""
for i := range f.knownTables {
t := &f.knownTables[i]
for _, cn := range t.columns {
if cn == col {
fmt.Fprintf(&candidates, "%s%s.%s", sep, tree.ErrString(&t.srcName), cn)
sep = ", "
}
}
}
return fmt.Errorf("column reference %q is ambiguous (candidates: %s)", &col, candidates.String())
}
type colRes string
func (c colRes) ColumnResolutionResult() {}
// Resolve is part of the ColumnItemResolver interface.
func (f *fakeSource) Resolve(
_ context.Context,
prefix *tree.TableName,
srcMeta colinfo.ColumnSourceMeta,
colHint int,
col tree.Name,
) (colinfo.ColumnResolutionResult, error) {
f.t.Logf("in Resolve: prefix %s meta %v colHint %d col %s",
prefix, srcMeta, colHint, col)
columns, ok := srcMeta.(colsRes)
if !ok {
return nil, fmt.Errorf("programming error: srcMeta invalid")
}
if colHint >= 0 {
// Resolution succeeded. Let's do some sanity checking.
if columns[colHint] != col {
return nil, fmt.Errorf("programming error: invalid colHint %d", colHint)
}
return colRes(fmt.Sprintf("%s.%s", prefix, col)), nil
}
for _, cn := range columns {
if col == cn {
// Resolution succeeded.
return colRes(fmt.Sprintf("%s.%s", prefix, col)), nil
}
}
return nil, fmt.Errorf("unknown column name: %s", &col)
}
var _ colinfotestutils.ColumnItemResolverTester = &fakeSource{}
// GetColumnItemResolver is part of the sqlutils.ColumnItemResolverTester
// interface.
func (f *fakeSource) GetColumnItemResolver() colinfo.ColumnItemResolver {
return f
}
// AddTable is part of the sqlutils.ColumnItemResolverTester interface.
func (f *fakeSource) AddTable(tabName tree.TableName, colNames []tree.Name) {
f.knownTables = append(f.knownTables, knownTable{srcName: tabName, columns: colNames})
}
// ResolveQualifiedStarTestResults is part of the
// sqlutils.ColumnItemResolverTester interface.
func (f *fakeSource) ResolveQualifiedStarTestResults(
srcName *tree.TableName, srcMeta colinfo.ColumnSourceMeta,
) (string, string, error) {
cs, ok := srcMeta.(colsRes)
if !ok {
return "", "", fmt.Errorf("fake resolver did not return colsRes, found %T instead", srcMeta)
}
nl := tree.NameList(cs)
return srcName.String(), nl.String(), nil
}
// ResolveColumnItemTestResults is part of the
// sqlutils.ColumnItemResolverTester interface.
func (f *fakeSource) ResolveColumnItemTestResults(
res colinfo.ColumnResolutionResult,
) (string, error) {
c, ok := res.(colRes)
if !ok {
return "", fmt.Errorf("fake resolver did not return colRes, found %T instead", res)
}
return string(c), nil
}
func TestResolveQualifiedStar(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
f := &fakeSource{t: t}
colinfotestutils.RunResolveQualifiedStarTest(t, f)
}
func TestResolveColumnItem(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
f := &fakeSource{t: t}
colinfotestutils.RunResolveColumnItemTest(t, f)
}
| pkg/sql/catalog/colinfo/column_item_resolver_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.002183688571676612,
0.00042803335236385465,
0.00016465845692437142,
0.00017516346997581422,
0.0006312026525847614
] |
{
"id": 0,
"code_window": [
"\tdefer leaktest.AfterTest(t)()\n",
"\tdefer log.Scope(t).Close(t)\n",
"\n",
"\tparams := base.TestClusterArgs{\n",
"\t\tServerArgs: base.TestServerArgs{\n",
"\t\t\tDisableSpanConfigs: true, // TODO(irfansharif): #75060.\n",
"\t\t\tKnobs: base.TestingKnobs{\n",
"\t\t\t\tSpanConfig: &spanconfig.TestingKnobs{\n",
"\t\t\t\t\t// We compare job progress before and after a restore. Disable\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/ccl/backupccl/full_cluster_backup_restore_test.go",
"type": "replace",
"edit_start_line_idx": 52
} | - Feature Name: SQL Query Planning
- Status: in-progress
- Start Date: 2017-12-13
- Authors: Peter Mattis
- RFC PR: #19135
- Cockroach Issue: (one or more # from the issue tracker)
# Summary
This RFC sketches the outlines of the high-level modules of a SQL
query planning including a full-featured optimizer.
# Motivation
SQL query planning is concerned with transforming the AST of a SQL
query into a physical query plan for execution. Naive execution of a
SQL query can be prohibitively expensive, because SQL specifies the
desired results and not how to achieve them. A given SQL query can
have thousands of alternate query plans with vastly different
execution times. The techniques used to generate and select a good
query plan involve significant engineering challenges.
This RFC is intended to provide guidance for both short term and long
term work on the SQL optimizer, and highlight areas of the current
system that will need to evolve.
# Guide-level explanation
## Overview
SQL query planning is often described in terms of 8 modules:
1. [Stats](#stats)
2. [Prep](#prep)
3. [Rewrite](#rewrite)
4. [Memo](#memo)
5. [Cost Model](#cost-model)
6. [Search](#search-aka-enumeration)
7. [Properties](#properties)
8. [Transformations](#transformations)
Note that Stats, Cost Model, Memo, Properties and Transformations
could be considered modules, while Prep, Rewrite and Search could be
considered phases, though we'll refer to all 8 uniformly as modules in
this document. Memo is a technique for compactly representing the
forest of trees generated during Search. Stats, Properties, Cost Model
and Transformations are modules that power Prep, Rewrite and Search.
```
SQL query text
|
+-----v-----+
| Parse |
+-----+-----+
|
(ast)
|
+-------+ +-----v-----+ - constant folding, type checking, name resolution
| Stats +-----> Prep | - computes initial properties
+-------+ +-----+-----+ - retrieves and attaches stats
| - done once per PREPARE
(expr)
|
+-----v-----+ - capture placeholder values / timestamps
+--> Rewrite | - cost-agnostic transformations, eg. predicate push-down
+------------+ | +-----+-----+ - done once per EXECUTE
| Transforms +--+ |
+------------+ | (expr)
| |
+-->-----v-----+ - cost-based transformations
+------------+ | Search | - finds lowest cost physical plan
| Cost Model +----->-----+-----+ - includes DistSQL physical planning
+------------+ |
(physical plan)
|
+-----v-----+
| Execution |
+-----------+
```
CockroachDB already has implementations of portions of these modules
except for Stats and Memo. For example, CockroachDB performs name
resolution and type checking which is part of Prep, and performs
predicate push down through joins which traditionally happens during
Rewrite. CockroachDB utilizes a primitive Cost model during index
selection (a portion of Search) to choose which index to use based on
filters and desired ordering.
In addition to the 8 modules, another aspect of the optimizer that
needs discussion is [Testing](#testing) and test infrastructure.
Lastly, a strawman [Roadmap](#roadmap) is proposed for how to break up
this work over the next several releases.
## Glossary
The following terms are introduced/defined in this RFC:
- [**algebraic equivalence**](#properties)
- [**attributes** of expressions](#properties-vs-attributes)
- [**cardinality**](#stats)
- [**decorrelating**](#rewrite), syn. "unnesting"
- [**derived** vs **required** properties](#properties)
- [**enforcer** operator for properties](#properties)
- [**equivalence class**](#memo)
- [**exploration** vs **implementation** transformations](#search)
- [**expressions** in queries](#prep)
- [**functional dependencies**](#prep)
- [**logical** vs **physical** properties](#memo)
- [**logical** vs **physical** vs **scalar** operators](#prep)
- [**memo-expressions**](#memo)
- [**operator** in query expressions](#prep)
- [**pattern** in transformations](#memo)
- [**predicate push-down**](#rewrite)
- [**prep** phase](#prep)
- **properties** of expressions [1](#memo) [2](#properties)
- [**pruning** during search](#search)
- [**query text**](#modules)
- [**rewrite** phase](#rewrite)
- [**scalar** vs **relational** properties](#properties)
- [**search** phase](#search)
- [**selectivity**](#stats)
- [**tracked** vs **computed** properties](#properties)
- [**transformation** of expressions](#rewrite)
- [**unnesting**](#rewrite), syn. "decorrelating"
## Modules
The parse phase is not discussed in this RFC. It handles the
transformation of the *SQL query text* into an abstract syntax tree
(AST).
### Prep
*Prep* (short for "prepare") is the first phase of query optimization
where the AST is transformed into a form more suitable for
optimization and annotated with information that will be used by later
phases. Prep includes resolving table and column references (i.e. name
resolution) and type checking, both of which are already performed by
CockroachDB.
During Prep, the AST is transformed from the raw output of the parser
into an expression "tree".
```go
type operator int16
type expr struct {
op operator
children []*expr
relationalProps *relationalProps // See [relational properties](#tracked_properties)
scalarProps *scalarProps // See [scalar properties](#tracked_properties)
physicalProps *physicalProps // See [physical properties](#tracked_properties)
private interface{}
}
```
The term *"expression"* here is based on usage from literature, though
it is mildly confusing as the current SQL code uses "expression" to
refer to scalar expressions. In this document, "expression" refers to
either a relational or a scalar expression. Using a uniform node type
for expressions facilitates transforms used during the Rewrite and
Search phases of optimization.
Each expression has an *operator* and zero or more operands
(`expr.children`). Operators can be *relational* (e.g. `join`) or
*scalar* (e.g. `<`). Relational operators can be *logical* (only
specifies results) or *physical* (specifies both result and a
particular implementation).
During Prep all the columns are given a unique index (number). Column
numbering involves assigning every base column and non-trivial
projection in a query a unique query-specific index.
Giving each column a unique index allows the expression nodes
mentioned above to track input and output columns, or really any set
of columns during Prep and later phases, using a bitmap. The bitmap
representation allows fast determination of compatibility between
expression nodes and is utilized during rewrites and transformations
to determine the legality of such operations.
The Prep phase also computes *logical properties*, such as the input
and output columns of each (sub-)expression, equivalent columns,
not-null columns and functional dependencies.
The functional dependencies for an expression are constraints over one
or more sets of columns. Specific examples of functional dependencies
are the projections, where 1 or more input columns determine an output
column, and "keys" which are a set of columns where no two rows output
by the expression are equal after projection on to that set (e.g. a
unique index for a table where all of the columns are NOT
NULL). Conceptually, the functional dependencies form a graph, though
they are not represented as such in code.
### Rewrite
The second phase of query optimization is *rewrite*. The rewrite phase
performs *transformations* on the logical query tree which are always
beneficial (i.e. cost-agnostic).
A transformation transforms a (part of a) query into another. Note
that there is conceptual overlap with the Search phase which also
performs transformations on the query. Both phases employ
transformations, yet Search needs to track and cost the alternatives
while Rewrite does not. In the specific context of the rewrite phase,
transformations are commonly called *rewrites*.
During Rewrite, the previous version of an expression is
discarded. During Search, both the original and new expression are
preserved side-by-side as alternatives, see the [section
below](#search) for details.
Also note that some of the transformations performed by Rewrite need
not be performed again by Search (decorrelation is the prime
example). The vast majority of transforms performed by Search are not
used by Rewrite.
Rewrite is the phase where e.g. correlated subqueries are
*decorrelated* (synonym: *unnesting*), additional predicates are
inferred and *predicate push down* occurs, and various other
simplifications to the relational algebra tree (e.g. projection & join
elimination). As an example of predicate push down, consider the
query:
```sql
SELECT * FROM a, b USING (x) WHERE a.x < 10
```
The naive execution of this query retrieves all rows from `a` and `b`,
joins (i.e. filters) them on the variable `x`, and then filters them
again on `a.x < 10`. Predicate push down attempts to push down the
predicate `a.x < 10` below the join. This can obviously be done for
the scan from `a`:
```sql
SELECT * FROM (SELECT * FROM a WHERE a.x < 10), b USING (x)
```
Slightly more complicated, we can also generate a new predicate using
the functional dependence that `a.x = b.x` (due to the join
predicate):
```sql
SELECT * FROM
(SELECT * FROM a WHERE a.x < 10),
(SELECT * FROM b WHERE b.x < 10) USING (x)
```
Predicate push down is aided by predicate inference. Consider the query:
```sql
SELECT * FROM a, b USING (x)
```
Due to the join condition, we can infer the predicates `a.x IS NOT
NULL` and `b.x IS NOT NULL`:
```sql
SELECT * FROM a, b USING (x)
WHERE a.x IS NOT NULL AND b.x IS NOT NULL
```
And predicate push down can push these predicates through the join:
```sql
SELECT * FROM
(SELECT * FROM a WHERE a.x IS NOT NULL),
(SELECT * FROM b WHERE b.x IS NOT NULL) USING (x)
```
### Stats
Table statistics power both the cost model and the search of alternate
query plans. A simple example of where statistics guide the search of
alternate query plans is in join ordering:
```sql
SELECT * FROM a JOIN b
```
In the absence of other opportunities, this might be implemented as a
hash join. With a hash join, we want to load the smaller set of rows
(either from `a` or `b`) into the hash table and then query that table
while looping through the larger set of rows. How do we know whether
`a` or `b` is larger? We keep statistics about the *cardinality* of `a`
and `b`, i.e. the (approximate) number of different values.
Simple table cardinality is sufficient for the above query but fails
in other queries. Consider:
```sql
SELECT * FROM a JOIN b ON a.x = b.x WHERE a.y > 10
```
Table statistics might indicate that `a` contains 10x more data than
`b`, but the predicate `a.y > 10` is filtering a chunk of the
table. What we care about is whether the result of the scan of `a`
after filtering returns more rows than the scan of `b`. This can be
accomplished by making a determination of the *selectivity* of the
predicate `a.y > 10` (the % of rows it will filter) and then
multiplying that selectivity by the cardinality of `a`. The common
technique for estimating selectivity is to collect a histogram on
`a.y` (prior to running the query).
The collection of table statistics occurs prior to receiving the
query. As such, the statistics are necessarily out of date and may be
inaccurate. The system may bound the inaccuracy by recomputing the
stats based on how fast a table is being modified. Or the system may
notice when stat estimations are inaccurate during query execution.
[A separate RFC covers statistics collection in
CockroachDB.](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20170908_sql_optimizer_statistics.md)
### Memo
Memo is a data structure for efficiently storing a forest of query
plans. Conceptually, the memo is composed of a numbered set of
**equivalency classes** called **groups** where each group contains a
set of logically equivalent expressions. The different expressions in
a single group are called **memo-expressions** (memo-ized
expressions). While an expression node outside of the memo contains a
list of child expressions, a memo-expression contains a list of child
groups.
By definition, all the memo-expressions in a group share the same
*logical properties*, a concept explored more in depth in the [section
below](#properties). The memo-expression structure mirrors the
expression structure:
```go
type exprID int32
type groupID int32
type memoExpr struct {
op operator
children []groupID
physicalProps *physicalProps
private interface{}
}
type memoGroup struct {
exprs []memoExpr
relationalProps *relationalProps
scalarProps *scalarProps
}
```
Transformations are *not* performed directly on the memo because
transformations operate on trees while the memo models a forest of
trees. Instead, expression fragments are extracted from the memo,
transformed, and re-inserted into the memo. At first glance, this
seems onerous and inefficient, but it allows transformations to be
rewritten more naturally and the extraction of expression fragments
can be performed efficiently.
Extracting an expression fragment for transformation is performed via
a process called *binding*. Binding allows iterating over all of the
expressions matching a pattern that are rooted at a particular
memo-expression. A pattern is specified using the same expression
structure that is to be extracted, with the addition of "pattern-leaf"
and "pattern-tree" placeholders that act as wildcards:
* A **pattern leaf** matches any expression tree, with only the root
of the tree being retained in the bound expression. It is used when
the expression is used opaquely by the transformation. In other
words, the transformation doesn't care what's inside the subtree. It
is a "leaf" in the sense that it's a leaf in any binding matching a
pattern.
* A **pattern tree** matches any expression tree and indicates that
recursive extraction of the full subtree is required. It is
typically used for scalar expressions when some manipulation of that
expression is required by the transformation. Note that a pattern
tree results in all possible subtrees being enumerated, however
scalar expressions typically don't have many subtrees (if there are
no subqueries, there is only one subtree). [TODO(peter): what to do
about subqueries in a scalar context? Iterating over all of the
subquery expressions doesn't seem right. There is a TODO in `opttoy`
to cache scalar expressions in `memoGroup`. Need to investigate this
further.]
To better understand the structure of the memo, consider the query:
```sql
SELECT * FROM a, b WHERE a.x = b.x
```
Converted to the expression structure which models the extended
relational algebra the query looks like:
```
inner-join [columns: a.x a.y b.x b.z]
filters:
eq
inputs:
variable (a.x)
variable (b.x)
inputs:
scan [columns: a.x a.y]
scan [columns: b.x b.z]
```
Inserting the expression tree into the memo results in:
```
6: [inner-join [1 2 5]]
5: [eq [3 4]]
4: [variable b.x]
3: [variable a.x]
2: [scan b]
1: [scan a]
```
Memo groups are numbered by when they were created and the groups are
topologically sorted for display (this is an implementation detail and
not intended to be prescriptive). In the above example, each group
contains only a single memo-expression. After performing the join
commutativity transformation, the memo would expand:
```
6: [inner-join [1 2 5]] [inner-join [2 1 5]]
5: [eq [3 4]]
4: [variable b.x]
3: [variable a.x]
2: [scan b]
1: [scan a]
```
Memo groups contain logically equivalent expressions, but two
logically equivalent expression may not be placed in the same memo
group. This occurs because determining logical equivalency of two
relational expressions is complex to perform 100% correctly. A
correctness failure (i.e. considering two expressions logically
equivalent when they are not) results in invalid transformations and
invalid plans. Placing two logically equivalent expressions in
different groups has a much gentler failure mode: the memo and search
are less efficient.
Insertion of an expression into the memo is performed by recursively
inserting all of the sub-expressions into the memo and then computing
a **fingerprint** for the memo-expression. The fingerprint for a
memo-expression is simply the expression operator and the list of
child groups. For example, in the memo examples above, the fingerprint
for the first inner-join expression is `[inner-join [1 2 5]]`. The
memo maintains a map from expression fingerprint to memo group which
allows quick determination if an expression fragment already exists in
the memo. A small amount of operator-specific normalization is
performed when computing the group fingerprint for a
memo-expression. For example, the left and right inputs of an
inner-join are output in sorted order which results in the expressions
`[inner-join [1 2 5]]` and `[inner-join [2 1 5]]` having the same
group fingerprint. The operator-specific normalization is
conservative. The common case for placing logically equivalent
expressions in the same group is adherence to the invariant that
transformed expressions are logically equivalent to their input.
```go
type memo struct {
// Map from memo-expression "group" fingerprint to group ID.
groupMap map[string]groupID
groups []memoGroup
}
```
In addition to memo expressions, memo groups also contain a map from
desired physical properties to optimization state for the group for
those properties. This state is discussed more in
[Search](#search-aka-enumeration-or-transformation).
A **location** within the memo identifies a particular memo-expression
by its group and expression number. When an expression fragment is
extracted from the memo, each `expr` is tagged with the location it
originated from in the memo. This allows subsequent reinsertion of a
transformed expression into the memo to quickly determine which groups
the expression nodes should be added to.
```go
type memoLoc struct {
group groupID
expr exprID
}
...
type expr struct {
op operator
loc memoLoc
...
}
```
The above depictions of the memo structures are simplified for
explanatory purposes. The actual structures are similar, though
optimized to reduce allocations.
### Properties
Properties are meta-information that are maintained at each node in an
expression. Properties power transformations and optimization.
#### Properties vs attributes
The term "property" encompasses information that is well-defined over
any expression in its group: a given scalar property is well-defined
for all scalar operators; a relational property is well-defined for
all relational operators. For example, "nullability" is a property
that is properly defined (and says something meaningful for) any any
scalar expression.
In contrast, some bits of information are only relevant for specific
operators. For example, the "join algorithm" is only relevant for join
operators; the "index name" is only relevant for table scan operators,
etc. This operator-specific data is called an *attribute* and is
attached to a particular memo-expression.
#### Logical vs physical properties
Logical properties are maintained for both relational and scalar
operators. A logical property refers to logical information about the
expression such as column equivalencies or functional dependencies.
Physical properties are those that exist outside of the relational
algebra such as row order and data distribution. Physical property
requirements arise from both the query itself (the non-relational
`ORDER BY` operator) and by the selection of specific implementations
during optimization (e.g. a merge-join requires the inputs to be
sorted in a particular order).
By definition, two memo-expressions in the same group have the same
logical properties and the logical properties are attached to the memo
group. The physical properties for the memo-expressions in a group may
differ. For example, a memo group containing inner-join will also have
hash-join and merge-join implementations which produce the same set of
output rows but in different orders.
#### Relational vs scalar properties
The memo contains memo-expressions with either scalar (e.g. `+`, `<`,
etc.) or relational (e.g. `join`, `project`, etc.) operators,
distinguished as scalar expressions vs relational expressions.
Scalar and relational properties are maintained in separate data
structures, but note that both scalar and relational properties are
considered *logical*.
#### Derived vs required properties
Properties can be *required* or *derived*.
A required property is one specified by the SQL query text. For
example, a DISTINCT clause is a required property on the set of
columns of the corresponding projection -- that the tuple of columns
forms a key (unique values) in the results.
A derived property is one derived by the optimizer for an
expression based on the properties of the children nodes.
For example, in `SELECT k+1 FROM kv`, once the ordering of "k" is
known from kv's descriptor, the same ordering property can be derived
for `k+1`.
During optimization, for each node with required properties the
optimizer will look at the children node to check whether their actual
properties (which can be derived) match the requirement. If they don't
the optimizer must introduce an *enforcer* operator in the plan that
provides the required property.
For example, an `ORDER BY` clause creates a required ordering property
can cause the optimizer to add a sort node as an enforcer of that
property.
#### Tracked vs computed properties
A [tracked property](#tracked_properties) is one which is maintained
in a data structure (e.g. `relationalProps`, `scalarProps`,
`physicalProps`). A computed property is one which is computed from an
expression or an expression fragment as needed. For intermediate
nodes, all properties can be computed which makes tracked properties
akin to a cache. The decision for whether to track or compute a
property is pragmatic. Tracking a property requires overhead whether
the property is used or not, but makes accessing the property in a
transformation fast. Computing a property can be done only when the
property is used, but is not feasible if the computation requires an
entire sub-expression tree (as opposed to a fragment). [Computed
properties](#computed_properties) primarily occur for scalar properties
for which transformations often have the entire scalar expression.
#### Tracked properties
The determination of the properties to track is a key aspect of the
design of an optimizer. Track too many and adding new operators
becomes onerous and maintaining the properties through transformations
becomes expensive. Track too few and certain transformations become
difficult.
Relational properties:
* Output columns [`intset`]. The set of columns output by an
expression. Used to determine if a predicate is compatible with an
expression.
* Outer columns [`intset`]. The set of columns that are used by the
operator but not defined in the underlying expression tree (i.e. not
supplied by the inputs to the current expression). Synonym: *free
vars*.
* Not-NULL columns [`intset`]. Column nullability is associated with
keys which are a factor in many transformations such as join
elimination, group-by simplification
* Keys [`[]intset`]. A set of columns for which no two rows are equal
after projection onto that set. The simplest example of a key is the
primary key for a table. Note that a key requires all of the columns
in the key to be not-NULL.
* Weak keys [`[]intset`]. A set of columns where no two rows
containing non-NULL values are equal after projection onto that
set. A UNIQUE index on a table is a weak key and possibly a key if
all of the columns are not-NULL. Weak keys are tracked because they
can become keys at higher levels of a query due to null-intolerant
predicates.
* Foreign keys [`map[intset]intset`]. A set of columns that uniquely
identify a single row in another relation. In practice, this is a
map from one set of columns to another set of columns.
* Equivalency groups [`[]intset`]. A set of column groups (sets) where all columns
in a group are equal with each other.
* Constant columns [`intset`]. Columns for which we know we have a
single value.
Scalar properties:
* Input columns [`intset`]. The set of columns used by the scalar
expression. Used to determine if a scalar expression is compatible
with the output columns of a relational expression.
* Defined columns [`intset`]. The set of columns defined by the scalar
expression.
Physical properties:
* Column ordering. Specified by a top-level projection.
* Row ordering. Specified by an `ORDER BY` clause or required by a
physical operator (e.g. merge-join). Row ordering is enforced by the
**sort** operator.
* Rewindability. Required by multi-use CTEs. Every reference to the
CTE in the query needs to return the same results. A read-only query
has this property by default, though care must be taken with regards
to the [Halloween
Problem](https://en.wikipedia.org/wiki/Halloween_Problem) if the
read-only query exists in the context of a DML query. A CTE
containing a DML (such as `INSERT` or `UPDATE`) needs to have its
results materialized in temporary storage and thus provide
*rewindability*. This property is enforced using a **spool**
operator.
Note that this list of properties is not exhaustive. In particular,
there are a large number of scalar properties for which it isn't clear
if the property should be tracked or computed when necessary. For
example, null-tolerance (does a predicate ever return true for a NULL
value) can be computed from a scalar expression when needed. It is an
open question as to whether it is utilized frequently enough that it
should be tracked.
Tracking is a bit more than caching of computed properties: we can't
compute certain relational properties without the entire
sub-expression. Keys are an example: if you have a deeply nested join,
in order to compute the keys after performing a join associativity
transform, you would need to have the entire expression tree. By
tracking the keys property and maintaining it at each relational
expression, we only need the fragment of the expression needed by the
transform.
### Computed properties
Computed properties are used primarily in conjunction with scalar
expressions. The properties are computed rather than tracked because
we usually have the full scalar expression vs just a fragment for
relational expressions.
Computed scalar properties:
* Injectivity. An injective expression preserves distinctness: it
never maps distinct elements of its domain to the same element of
its codomain. `exp(x) = e^x` is injective.
* Monotonicity. An monotonic expression preserves ordering. The
preservation may be positive or negative (maintains order or inverts
order) and strict or weak (maintains uniqueness or invalidates it).
`floor(x)` is a positive-weak monotonic expression. `-x` is a
negative-strict monotonic expression.
* Null-intolerance. A null-intolerant expression is a predicate which
never returns `true` for a `NULL` input column. Null-intolerance is
used to infer nullability of columns. `x = y` (where `x` and `y` are
columns) is a null-intolerant expression.
* Contains-aggregate. Does the scalar expression contain any aggregate
functions?
* Contains-subquery. Does the scalar expression contain any
subqueries?
### Transformations
Transformations convert an input expression tree into zero or more
logically equivalent trees. Transformations utilize properties in
order to determine the validity of the transformation. Transforms are
configured with an expression pattern, a check method and an apply
method. The expression pattern is used to identify locations within
the full expression where the transform can be applied. The check
method performs additional checks to determine the validity of a
transformation. And the apply method applies the transformation,
generating zero or more logically equivalent expressions.
Transformations are categorized as *exploration* or
*implementation*. An exploration transformation creates a logical
expression from an existing logical expression. An implementation
transform creates a physical expression from a logical
expression. Note that both exploration and implementation transforms
take as input logical expressions.
Some examples of transformations:
* Join commutativity swaps the order of the inputs to an inner join:
`[join a b] -> [join b a]`.
* Join associativity reorders the children of a parent and child join:
`[join [join a b] c]` -> `[join [join a c] b]`
* Join elimination removes unnecessary joins based on projected
columns and foreign keys.
* Distinct/group-by elimination removes unnecessary distinct/group-by
operations based on keys.
* Decorrelation replaces correlated subqueries with semi-join,
anti-join and apply operators.
* Scan to index scan transforms the logical scan operator into one or
more index scans on covering indexes.
* Inner join to merge-join transforms a logical inner join operator
into a merge-join operator.
An example transformation is join commutativity. The pattern for join
commutativity is an inner-join:
```
inner-join
|
+-- pattern leaf // left input
|
+-- pattern leaf // right input
|
+-- pattern leaf // join condition
```
An inner-join always has 3 children: the left and right inputs and the
join condition. Join commutativity only needs to swap the left and
right inputs an this specifies pattern leaf for all 3 children.
The actual join commutativity transform is straightforward:
```go
// This is demonstration code, the real implementation will be mildly
// more complex in order to reduce heap allocations.
func (joinCommutativity) apply(e *expr) *expr {
return &expr{
op: innerJoinOp,
children: []*expr{
e.children[1],
e.children[0],
e.children[2],
}
props: e.props,
}
}
```
Note that join commutativity is the simplest transform. More
sophisticated transforms have to perform complex checks for whether
they can be applied to an expression and for generating the resulting
transformed expression. For a slightly more complex example, join
associativity sorts the join conditions between the upper and lower
joins and checks to see if it is creating an undesirable cross-join.
Implicit in the join commutativity example above is that
transformations are written in code. An alternative is to create a
domain specific language for expressing transformations. The benefit
of such a language is the potential for more compact and expressive
transformations. The downside is the need to write a compiler for the
DSL. The current decision is to eschew a DSL for transformations as
the work involved seems strictly greater than writing transformations
in Go. In particular, a DSL would require both the author and reviewer
to learn the DSL. And a DSL doesn't necessarily ease writing a
transformation. Complex transformations may require extensions to the
DSL and the DSL compiler and thus not simplify writing the
transformation at all. In the short and medium term, the set of
transformations is expected to remain small as energies go into
fleshing out other query planning modules. The decision about a DSL
for transformations should be revisited as the transformation set
grows or in the light of experimentation with a DSL that proves its
worth.
### Cost model
The cost model takes as input a physical query plan and computes an
estimated "cost" to execute the plan. The unit of "cost" can be
arbitrary, though it is desirable if it has some real world meaning
such as expected execution time. What is required is for the costs of
different query plans to be comparable. A SQL optimizer is seeking to
find the shortest expected execution time for a query and uses cost as
a proxy for execution time.
Cost is roughly calculated by estimating how much time each node in
the expression tree will use to process all results and modelling how
data flows through the expression tree. [Table statistics](#stats) are
used to power cardinality estimates of base relations which in term
power cardinality estimates of intermediate relations. This is
accomplished by propagating histograms of column values from base
relations up through intermediate nodes (e.g. combining histograms
from the two join inputs into a single histogram). Operator-specific
computations model the network, disk and CPU costs. The cost model
should include data layout and the specific operating environment. For
example, network RTT in one cluster might be vastly different than
another.
The operator-specific computations model the work performed by the
operator. A hash-join needs to model if temporary disk will be needed
based on the estimated size of the inputs.
Because the cost for a query plan is an estimate, there is an
associated error. This error might be implicit in the cost, or could
be explicitly tracked. One advantage to explicitly tracking the
expected error is that it can allow selecting a higher cost but lower
expected error plan over a lower cost but higher expected error
plan. Where does the error come from? One source is the innate
inaccuracy of stats: selectivity estimation might be wildly off due to
an outlier value. Another source is the accumulated build up of
estimation errors the higher up in the query tree. Lastly, the cost
model is making an estimation for the execution time of an operation
such as a network RTT. This estimate can also be wildly inaccurate due
to bursts of activity.
Search finds the lowest cost plan using dynamic programming. That
imposes a restriction on the cost model: it must exhibit optimal
substructure. An optimal solution can be constructed from optimal
solutions of its subproblems.
### Search (a.k.a. Enumeration)
Search is the final phase of optimization where many alternative
logical and physical query plans are explored in order to find the
best physical query plan. The output of Search is a physical query
plan to execute. Note that in this context, a physical query plan
refers to a query plan for which the leaves of the tree are table
scans or index scans. In the long term, DistSQL planning will be
incorporated into Search, though in the short term it may be kept
separate.
In order to avoid a combinatorial explosion in the number of
expression trees, Search utilizes the Memo structure. Due to the large
number of possible plans for some queries, Search cannot explore all
of them and thus requires *pruning* heuristics. For example, Search
can cost query plans early and stop exploring a branch of plans if the
cost is greater than the current best cost so far.
Search begins with a Memo populated with the expression provided by
Rewrite. Search is modelled as a series of tasks that optimize an
expression. Conceptually, the tasks form a dependency tree very much
like the dependency tree formed by tools like make. Each task has a
count of its unfinished dependencies and a pointer to its parent
task. When a task is run it is passed its parent task and as part of
running it can add additional dependencies to its parent, thus making
the tree of dependencies dynamic. After a task is run, it decrements
its parent tasks and schedules it for execution if it was the last
dependency. Note that new tasks are only created if new expressions
were added to the memo. Search will not terminate if we continually
created new expressions via transformations, but that would also
indicate that we have an unbounded growth in expressions. In practice,
Search will have some limits on the number of steps it performs or
time it can take.
The initial task for Search is to optimize the "root" group. The tasks
described are the standard Cascades-style search tasks:
1. `OptimizeGroup(reqProps)`. Implements the group (via
`ImplementGroup`) which generates implementations for the
expressions in the group, then selects the plan with the least
estimated cost. Enforcers (e.g. sort) are added as needed.
2. `ImplementGroup`. Explores the group (via `ExploreGroup`) which
generates more logical expressions in the group and in child
groups, then generates implementations for all of the logical
expressions (via `ImplementGroupExpr`). `ImplementGroup` itself
does not perform any transformations, but acts as a synchronization
point for dependent tasks.
3. `ImplementGroupExpr`. Implements all of the child groups (via
`ImplementGroup`), then applies any applicable implementation
transformations (via `Transform`) to the forest of expressions
rooted at the specified memo-expression. Example transformation:
inner-join to merge-join and hash-join.
4. `ExploreGroup`. Explores each expression in the group (via
`ExploreGroupExpr`). `ExploreGroup` itself does not perform any
transformations, but acts as a synchronization point for dependent
tasks.
5. `ExploreGroupExpr`. Explores all of the child groups (via
`ExploreGroup`), then applies any applicable exploration
transformations (via `Transform`) to the forest of expressions
rooted at the specified memo-expression. Example transformations:
join commutativity and join associativity.
6. `Transform`. Applies a transform to the forest of expressions
rooted at a particular memo-expression. There are two flavors of
transformation task: exploration and implementation. The primary
difference is the state transition after the task finishes. An
exploration transform task recursively schedules exploration of the
group it is associated with. An implementation transform task
schedules optimization of the group.
A search *stage* is configured by a set of exploration and
implementation transforms, and a *budget*. The budget is used to prune
branches of the search tree which appear undesirable. The initial
search stage has a limited set of exploration and implementation
transforms (perhaps 0 exploration transforms), an unlimited budget,
and aims to quickly find a workable, though possibly slow, plan. Each
subsequent stage uses the cost from the best plan of the previous
stage for pruning. [TODO(peter): my understanding of how this will
work is slightly fuzzy. My usage of the term budget might be
off. Perhaps better to describe it as "max cost".]
Full featured optimizers can contain hundreds of
transformations. Checking whether each transformation is applicable at
each node would be prohibitively expensive, so the transformations are
indexed by the root operator of their pattern. Transformations are
further categorized as exploration and implementation and divided
amongst the search stages based on generality and expected benefit.
Search is naturally parallelizable, yet exploiting that parallelism
involves synchronization overhead. Parallelization also can allow one
query to utilize more planning resources than other queries. Rather
than support parallelization of search, energy will instead be
directed at making search and transformations fast and memory
efficient.
### Testing
Historically, SQL databases have introduced subtle bugs that have
lasted for years through invalid transformations. Search should be
designed for testability. One example of this is to allow verification
that all of the alternate plans generated by Search actually produce
the same result.
In addition to testing the alternative query plans, there is utility
in generating a large number of valid SQL statements. The existing
Random Syntax Generator does one level of this by generating
syntactically valid SQL. An additional level would be to generate
semantically valid queries which might be more feasible by random
generation at the expression level.
The relational algebra expression trees should provide a textual
format to ease testing using infrastructure similar to the existing
logic tests where test files define queries and expected results.
Optimization is concerned with making queries faster and it is quite
disturbing to users when inadvertent regressions occur. A large test
suite needs to be developed over time which ensures that the addition
of new transformations or improvements to the various modules do not
cause regressions in the chosen plans.
Generating actual table data with various data distributions for
testing purposes would be both onerous and slow. Table statistics are
a key factor in the decisions performed by search. In order to
adequately test how the behavior of search changes with changing table
statistics, we need an easy mechanism for injecting fake statistics.
## Roadmap
The above outline sketches a large amount of work. How do we get there
from here? A strawman proposal divides the work into several
releases. The farther out the proposed work, the fuzzier the proposal
becomes.
### 2.0
* Stats. Stats are not dependent on other planning modules but are a
prerequisite to cost-based transformations. Stats are only generated
explicitly via `CREATE STATISTICS`.
* Prep. Introduce the expression tree. Construct the expression tree
from the existing AST output by the parser. Use the AST-based type
checking and name resolution. The existing AST-based planning code
will be left in place and a parallel world of expression-based
planning will be erected. The new planning code will not be used in
this release.
* Rewrite. Predicate inference and predicate push down.
* Memo. Introduce the memo structure.
* Testing. Use ugly hacks to hook up a hobbled version of something as
an alternate query planner. Perhaps a flag to pass queries through
the expression format and memo and then translate them back into the
AST in order to use the legacy planner.
### 2.1
* Stats. Automatically gather stats on PKs and index columns.
* Prep. Perform name resolution and type checking on the expression
tree. Support non-recursive CTEs. Fall-back to legacy planning code
for unsupported queries.
* Rewrite. Transform correlated subqueries into apply
variants. Transform common apply variants into joins.
* Execution. Nested-loop-join, semi-join, anti-join and apply
processors.
* Cost model. Basic cost model that is powered by stats.
* Search. Task-based single stage search. No pruning. Use existing
DistSQL planning. Facility for time-travel debugging of the search
process and inspecting the memo state (e.g. logical and physical
properties). Global and per-session disablement of individual
transforms.
* Transforms. Join elimination, distinct/group-by elimination, join
commutativity, join associativity, index selection, and scalar
normalization.
* Testing. Random generation of table data based on schema and query
to exercise corner conditions. Random sampling and execution of
alternate query plans to verify equivalence. Test suite for plan
selection using injected stats.
### 2.2
* Stats. Support more advanced statistics (e.g. filtered statistics).
* Prep. Support 100% of queries, enabling the deletion of the legacy
planning code.
* Cost model. Make the cost model more sophisticated by taking into
account measurements of network bandwidth and latency. Validate cost
model against actual queries.
* Search. Add multiple stages with pruning heuristics. Integrate
DistSQL planning.
* Transforms. Pull group-by above a join. Push group-by below a
join. Split group-by into local and global components. Simplify
outer joins.
* Execution. Stream-group-by.
## Unresolved questions
* Flesh out understanding of where physical properties such as
ordering can be imposed by the query itself. For example, a
top-level `ORDER BY` clause definitely imposes ordering. But so does
an `ORDER BY` clause that is the immediate sub-expression of
`LIMIT/OFFSET`, `DISTINCT ON`, `WITH ORDINALITY`,
`{INSERT,UPSERT,DELETE,UPDATE}` and `CREATE TABLE ... AS ...`. We
also need to pay attention to `ORDER BY INDEX` and `ORDER BY PRIMARY
KEY`, though those clauses likely degenerate into `ORDER
BY`. Are there other places we need to pay attention to physical
properties? Are there other physical properties to capture at
intermediate nodes?
* Which parts of query planning can be performed during PREPARE vs
EXECUTE? Most (all?) of the transformations that are part of Rewrite
can be performed during PREPARE. For example, predicate push-down
and decorrelation do not require placeholder values. And some parts
of Search, such as join enumeration, can be performed during
PREPARE. The part that is restricted to EXECUTE are certain parts of
index selection and thus costing of query plans.
* The performance of the query planner itself is important because
query planning occurs for every query executed. What sorts of fast
paths are possible for simple queries?
* Window functions.
* Describe max1row operator and why it is necessary.
## Appendix
### Expr/Memo examples
Consider the query:
```sql
SELECT v, k FROM kv WHERE k < 3
```
Building the expression tree results in:
```
project [out=(0,1)]
columns: kv.v:1 kv.k:0
projections:
variable (kv.v) [in=(1)]
variable (kv.k) [in=(0)]
inputs:
select [out=(0,1)]
columns: kv.k:0* kv.v:1
filters:
lt [in=(0)]
inputs:
variable (kv.k) [in=(0)]
const (3)
inputs:
scan [out=(0,1)]
columns: kv.k:0 kv.v:1
```
Some points to notice above. The relational operators (`project`,
`select` and `scan`) track their output column set as a bitmap
(i.e. `out=(0,1)`). Scalar expressions such as `variable` and `lt`
track their required input columns. Relational operators have a slice
of children where the interpretation of the children is operator
specific. The `project` operator has 2 children: a relational input
and a list of projections. Note that the order of projections is
important and are stored using an `ordered-list` operator in the
memo. The `select` operator also has 2 children: a relational input
and a list of filters.
Inserting the expression tree into the memo results in:
```
8: [project [5 7]]
7: [ordered-list [6 2]]
6: [variable kv.v]
5: [select [1 4]]
4: [lt [2 3]]
3: [const 3]
2: [variable kv.k]
1: [scan kv]
```
Here we can see more clearly the child structure of the various
relational operators. The `select` expression in group 5 has 2
children: groups 1 and 4. Group 1 is a `scan` and group 4 is the
filter.
As another example, consider the query:
```sql
SELECT k, v FROM (SELECT v, k FROM kv)
```
Inserting into the memo we get:
```
7: [project [5 6]]
6: [ordered-list [3 2]]
5: [project [1 4]]
4: [ordered-list [2 3]]
3: [variable kv.k]
2: [variable kv.v]
1: [scan kv]
```
Notice that the variables (`kv.k` and `kv.v`) are only present once in
the memo and their groups are shared by both projection lists.
| docs/RFCS/20171213_sql_query_planning.md | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.00025658190133981407,
0.00017261173343285918,
0.00015768114826641977,
0.0001723339519230649,
0.000008940596671891399
] |
{
"id": 2,
"code_window": [
"\t\t\t},\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// The claim_session_id field in jobs is a uuid and so needs to be excluded\n",
"\t// when comparing jobs pre/post restore.\n",
"\tconst jobsQuery = `\n",
"SELECT id, status, created, payload, progress, created_by_type, created_by_id, claim_instance_id\n",
"FROM system.jobs\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// when comparing jobs pre/post restore. The span config reconciliation job\n",
"\t// too is something we exclude; because it's a singleton job, when restored\n",
"\t// into another cluster it self-terminates.\n"
],
"file_path": "pkg/ccl/backupccl/full_cluster_backup_restore_test.go",
"type": "replace",
"edit_start_line_idx": 93
} | // Copyright 2020 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package backupccl
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
_ "github.com/cockroachdb/cockroach/pkg/ccl/partitionccl"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/spanconfig"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
// Large test to ensure that all of the system table data is being restored in
// the new cluster. Ensures that all the moving pieces are working together.
func TestFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
params := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
DisableSpanConfigs: true, // TODO(irfansharif): #75060.
Knobs: base.TestingKnobs{
SpanConfig: &spanconfig.TestingKnobs{
// We compare job progress before and after a restore. Disable
// the automatic jobs checkpointing which could possibly mutate
// the progress data during the backup/restore process.
JobDisablePersistingCheckpoints: true,
},
GCJob: &sql.GCJobTestingKnobs{
DisableNewProtectedTimestampSubsystemCheck: true,
},
},
}}
const numAccounts = 10
tcBackup, sqlDB, tempDir, cleanupFn := backupRestoreTestSetupWithParams(t, singleNode, numAccounts, InitManualReplication, params)
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, params)
defer cleanupFn()
defer cleanupEmptyCluster()
backupKVDB := tcBackup.Server(0).DB()
// Closed when the restore is allowed to progress with the rest of the backup.
allowProgressAfterPreRestore := make(chan struct{})
// Closed to signal the the zones have been restored.
restoredZones := make(chan struct{})
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.afterPreRestore = func() error {
close(restoredZones)
<-allowProgressAfterPreRestore
return nil
}
return r
},
}
}
// The claim_session_id field in jobs is a uuid and so needs to be excluded
// when comparing jobs pre/post restore.
const jobsQuery = `
SELECT id, status, created, payload, progress, created_by_type, created_by_id, claim_instance_id
FROM system.jobs
`
// Pause SQL Stats compaction job to ensure the test is deterministic.
sqlDB.Exec(t, `PAUSE SCHEDULES SELECT id FROM [SHOW SCHEDULES FOR SQL STATISTICS]`)
// Disable automatic stats collection on the backup and restoring clusters to ensure
// the test is deterministic.
sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=false`)
sqlDBRestore.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=false`)
// Create some other descriptors as well.
sqlDB.Exec(t, `
USE data;
CREATE SCHEMA test_data_schema;
CREATE TABLE data.test_data_schema.test_table (a int);
INSERT INTO data.test_data_schema.test_table VALUES (1), (2);
USE defaultdb;
CREATE SCHEMA test_schema;
CREATE TABLE defaultdb.test_schema.test_table (a int);
INSERT INTO defaultdb.test_schema.test_table VALUES (1), (2);
CREATE TABLE defaultdb.foo (a int);
CREATE TYPE greeting AS ENUM ('hi');
CREATE TABLE welcomes (a greeting);
CREATE DATABASE data2;
USE data2;
CREATE SCHEMA empty_schema;
CREATE TABLE data2.foo (a int);
`)
tableDesc := desctestutils.TestingGetPublicTableDescriptor(backupKVDB, keys.SystemSQLCodec, "data2", "foo")
// Store the highest user-table ID for later assertions.
maxBackupTableID := tableDesc.GetID()
// Setup the system systemTablesToVerify to ensure that they are copied to the new cluster.
// Populate system.users.
numUsers := 1000
if util.RaceEnabled {
numUsers = 10
}
for i := 0; i < numUsers; i++ {
sqlDB.Exec(t, fmt.Sprintf("CREATE USER maxroach%d", i))
sqlDB.Exec(t, fmt.Sprintf("ALTER USER maxroach%d CREATEDB", i))
}
// Populate system.zones.
sqlDB.Exec(t, `ALTER TABLE data.bank CONFIGURE ZONE USING gc.ttlseconds = 3600`)
sqlDB.Exec(t, `ALTER TABLE defaultdb.foo CONFIGURE ZONE USING gc.ttlseconds = 45`)
sqlDB.Exec(t, `ALTER DATABASE data2 CONFIGURE ZONE USING gc.ttlseconds = 900`)
// Populate system.jobs.
// Note: this is not the backup under test, this just serves as a job which
// should appear in the restore.
// This job will eventually fail since it will run from a new cluster.
sqlDB.Exec(t, `BACKUP data.bank TO 'nodelocal://0/throwawayjob'`)
preBackupJobs := sqlDB.QueryStr(t, jobsQuery)
// Populate system.settings.
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_io_write.concurrent_addsstable_requests = 5`)
sqlDB.Exec(t, `INSERT INTO system.ui (key, value, "lastUpdated") VALUES ($1, $2, now())`, "some_key", "some_val")
// Populate system.comments.
sqlDB.Exec(t, `COMMENT ON TABLE data.bank IS 'table comment string'`)
sqlDB.Exec(t, `COMMENT ON DATABASE data IS 'database comment string'`)
sqlDB.Exec(t,
`INSERT INTO system.locations ("localityKey", "localityValue", latitude, longitude) VALUES ($1, $2, $3, $4)`,
"city", "New York City", 40.71427, -74.00597,
)
// Populate system.role_members.
sqlDB.Exec(t, `CREATE ROLE system_ops;`)
sqlDB.Exec(t, `GRANT system_ops TO maxroach1;`)
// Populate system.scheduled_jobs table with a first run in the future to prevent immediate adoption.
firstRun := timeutil.Now().Add(time.Hour).Format(timeutil.TimestampWithoutTZFormat)
sqlDB.Exec(t, `CREATE SCHEDULE FOR BACKUP data.bank INTO $1 RECURRING '@hourly' FULL BACKUP ALWAYS WITH SCHEDULE OPTIONS first_run = $2`, localFoo, firstRun)
sqlDB.Exec(t, `PAUSE SCHEDULES SELECT id FROM [SHOW SCHEDULES FOR BACKUP]`)
injectStats(t, sqlDB, "data.bank", "id")
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
// Create a bunch of user tables on the restoring cluster that we're going
// to delete.
numTables := 50
if util.RaceEnabled {
numTables = 2
}
for i := 0; i < numTables; i++ {
sqlDBRestore.Exec(t, `CREATE DATABASE db_to_drop`)
sqlDBRestore.Exec(t, `CREATE TABLE db_to_drop.table_to_drop (a int)`)
sqlDBRestore.Exec(t, `ALTER TABLE db_to_drop.table_to_drop CONFIGURE ZONE USING gc.ttlseconds=1`)
sqlDBRestore.Exec(t, `DROP DATABASE db_to_drop`)
}
// Wait for the GC job to finish to ensure the descriptors no longer exist.
sqlDBRestore.CheckQueryResultsRetry(
t, "SELECT count(*) FROM [SHOW JOBS] WHERE job_type = 'SCHEMA CHANGE GC' AND status = 'running'",
[][]string{{"0"}},
)
doneRestore := make(chan struct{})
go func() {
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
close(doneRestore)
}()
// Check that zones are restored during pre-restore.
t.Run("ensure zones are restored during pre-restore", func(t *testing.T) {
<-restoredZones
// Not specifying the schema makes the query search using defaultdb first.
// which ends up returning the error
// pq: database "defaultdb" is offline: restoring
checkZones := "SELECT * FROM system.public.zones"
sqlDBRestore.CheckQueryResults(t, checkZones, sqlDB.QueryStr(t, checkZones))
// Check that the user tables are still offline.
sqlDBRestore.ExpectErr(t, "database \"data\" is offline: restoring", "SELECT * FROM data.public.bank")
// Check there is no data in the span that we expect user data to be imported.
store := tcRestore.GetFirstStoreFromServer(t, 0)
startKey := keys.SystemSQLCodec.TablePrefix(bootstrap.TestingUserDescID(0))
endKey := keys.SystemSQLCodec.TablePrefix(uint32(maxBackupTableID)).PrefixEnd()
it := store.Engine().NewMVCCIterator(storage.MVCCKeyAndIntentsIterKind, storage.IterOptions{
UpperBound: endKey,
})
defer it.Close()
it.SeekGE(storage.MVCCKey{Key: startKey})
hasKey, err := it.Valid()
require.NoError(t, err)
require.False(t, hasKey)
})
// Allow the restore to make progress after we've checked the pre-restore
// stage.
close(allowProgressAfterPreRestore)
// Wait for the restore to finish before checking that it did the right thing.
<-doneRestore
t.Run("ensure all databases restored", func(t *testing.T) {
sqlDBRestore.CheckQueryResults(t,
`SELECT database_name, owner FROM [SHOW DATABASES]`,
[][]string{
{"data", security.RootUser},
{"data2", security.RootUser},
{"defaultdb", security.RootUser},
{"postgres", security.RootUser},
{"system", security.NodeUser},
})
})
t.Run("ensure all schemas are restored", func(t *testing.T) {
expectedSchemas := map[string][][]string{
"defaultdb": {{"crdb_internal"}, {"information_schema"}, {"pg_catalog"}, {"pg_extension"}, {"public"}, {"test_schema"}},
"data": {{"crdb_internal"}, {"information_schema"}, {"pg_catalog"}, {"pg_extension"}, {"public"}, {"test_data_schema"}},
"data2": {{"crdb_internal"}, {"empty_schema"}, {"information_schema"}, {"pg_catalog"}, {"pg_extension"}, {"public"}},
}
for dbName, expectedSchemas := range expectedSchemas {
sqlDBRestore.CheckQueryResults(t,
fmt.Sprintf(`USE %s; SELECT schema_name FROM [SHOW SCHEMAS] ORDER BY schema_name;`, dbName),
expectedSchemas)
}
})
t.Run("ensure system table data restored", func(t *testing.T) {
// Note the absence of the jobs table. Jobs are tested by another test as
// jobs are created during the RESTORE process.
systemTablesToVerify := []string{
systemschema.CommentsTable.GetName(),
systemschema.LocationsTable.GetName(),
systemschema.RoleMembersTable.GetName(),
systemschema.RoleOptionsTable.GetName(),
systemschema.SettingsTable.GetName(),
systemschema.TableStatisticsTable.GetName(),
systemschema.UITable.GetName(),
systemschema.UsersTable.GetName(),
systemschema.ScheduledJobsTable.GetName(),
}
verificationQueries := make([]string, len(systemTablesToVerify))
// Populate the list of tables we expect to be restored as well as queries
// that can be used to ensure that data in those tables is restored.
for i, table := range systemTablesToVerify {
switch table {
case systemschema.TableStatisticsTable.GetName():
// createdAt and statisticsID are re-generated on RESTORE.
query := `SELECT "tableID", name, "columnIDs", "rowCount" FROM system.table_statistics`
verificationQueries[i] = query
case systemschema.SettingsTable.GetName():
// We don't include the cluster version.
query := fmt.Sprintf("SELECT * FROM system.%s WHERE name <> 'version'", table)
verificationQueries[i] = query
default:
query := fmt.Sprintf("SELECT * FROM system.%s", table)
verificationQueries[i] = query
}
}
for _, read := range verificationQueries {
sqlDBRestore.CheckQueryResults(t, read, sqlDB.QueryStr(t, read))
}
})
t.Run("ensure table IDs have not changed", func(t *testing.T) {
// Check that all tables have been restored. DISTINCT is needed in order to
// deal with the inclusion of schemas in the system.namespace table.
tableIDCheck := "SELECT * FROM system.namespace ORDER BY id"
sqlDBRestore.CheckQueryResults(t, tableIDCheck, sqlDB.QueryStr(t, tableIDCheck))
})
t.Run("ensure user table data restored", func(t *testing.T) {
expectedUserTables := [][]string{
{"data", "bank"},
{"data2", "foo"},
{"defaultdb", "foo"},
}
for _, table := range expectedUserTables {
query := fmt.Sprintf("SELECT * FROM %s.%s", table[0], table[1])
sqlDBRestore.CheckQueryResults(t, query, sqlDB.QueryStr(t, query))
}
})
t.Run("ensure that grants are restored", func(t *testing.T) {
grantCheck := "use system; SHOW grants"
sqlDBRestore.CheckQueryResults(t, grantCheck, sqlDB.QueryStr(t, grantCheck))
grantCheck = "use data; SHOW grants"
sqlDBRestore.CheckQueryResults(t, grantCheck, sqlDB.QueryStr(t, grantCheck))
})
t.Run("ensure that jobs are restored", func(t *testing.T) {
// Ensure that the jobs in the RESTORE cluster is a superset of the jobs
// that were in the BACKUP cluster (before the full cluster BACKUP job was
// run). There may be more jobs now because the restore can run jobs of
// its own.
newJobsStr := sqlDBRestore.QueryStr(t, jobsQuery)
newJobs := make(map[string][]string)
for _, newJob := range newJobsStr {
// The first element of the slice is the job id.
newJobs[newJob[0]] = newJob
}
for _, oldJob := range preBackupJobs {
newJob, ok := newJobs[oldJob[0]]
if !ok {
t.Errorf("Expected to find job %+v in RESTORE cluster, but not found", oldJob)
}
require.Equal(t, oldJob, newJob)
}
})
t.Run("zone_configs", func(t *testing.T) {
// The restored zones should be a superset of the zones in the backed up
// cluster.
zoneIDsResult := sqlDB.QueryStr(t, `SELECT id FROM system.zones`)
var q strings.Builder
q.WriteString("SELECT * FROM system.zones WHERE id IN (")
for i, restoreZoneIDRow := range zoneIDsResult {
if i > 0 {
q.WriteString(", ")
}
q.WriteString(restoreZoneIDRow[0])
}
q.WriteString(")")
sqlDBRestore.CheckQueryResults(t, q.String(), sqlDB.QueryStr(t, q.String()))
})
t.Run("ensure that tables can be created at the excepted ID", func(t *testing.T) {
var maxID, dbID, tableID int
sqlDBRestore.QueryRow(t, "SELECT max(id) FROM system.namespace").Scan(&maxID)
dbName, tableName := "new_db", "new_table"
sqlDBRestore.Exec(t, fmt.Sprintf("CREATE DATABASE %s", dbName))
sqlDBRestore.Exec(t, fmt.Sprintf("CREATE TABLE %s.%s (a int)", dbName, tableName))
sqlDBRestore.QueryRow(t,
fmt.Sprintf("SELECT id FROM system.namespace WHERE name = '%s'", dbName)).Scan(&dbID)
require.True(t, dbID > maxID)
sqlDBRestore.QueryRow(t,
fmt.Sprintf("SELECT id FROM system.namespace WHERE name = '%s'", tableName)).Scan(&tableID)
require.True(t, tableID > maxID)
require.NotEqual(t, dbID, tableID)
})
}
// TestSingletonSpanConfigJobPostRestore ensures that there's a single span
// config reconciliation job running post restore.
func TestSingletonSpanConfigJobPostRestore(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
params := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(),
},
},
}
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetupWithParams(t, singleNode, numAccounts, InitManualReplication, params)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, params)
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
const numRunningReconciliationJobQuery = `
SELECT count(*) FROM [SHOW AUTOMATIC JOBS]
WHERE job_type = 'AUTO SPAN CONFIG RECONCILIATION' AND status = 'running'
`
testutils.SucceedsSoon(t, func() error {
var numRunningJobs int
sqlDBRestore.QueryRow(t, numRunningReconciliationJobQuery).Scan(&numRunningJobs)
if numRunningJobs != 1 {
return errors.Newf("expected single running reconciliation job, found %d", numRunningJobs)
}
return nil
})
}
func TestIncrementalFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
const incrementalBackupLocation = "nodelocal://0/inc-full-backup"
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, "CREATE USER maxroach1")
sqlDB.Exec(t, `BACKUP TO $1 INCREMENTAL FROM $2`, incrementalBackupLocation, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1, $2`, localFoo, incrementalBackupLocation)
checkQuery := "SELECT * FROM system.users"
sqlDBRestore.CheckQueryResults(t, checkQuery, sqlDB.QueryStr(t, checkQuery))
}
// TestEmptyFullClusterResotre ensures that we can backup and restore a full
// cluster backup with only metadata (no user data). Regression test for #49573.
func TestEmptyFullClusterRestore(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `CREATE USER alice`)
sqlDB.Exec(t, `CREATE USER bob`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
checkQuery := "SELECT * FROM system.users"
sqlDBRestore.CheckQueryResults(t, checkQuery, sqlDB.QueryStr(t, checkQuery))
}
// Regression test for #50561.
func TestClusterRestoreEmptyDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `CREATE DATABASE some_db`)
sqlDB.Exec(t, `CREATE DATABASE some_db_2`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
checkQuery := "SHOW DATABASES"
sqlDBRestore.CheckQueryResults(t, checkQuery, sqlDB.QueryStr(t, checkQuery))
}
func TestDisallowFullClusterRestoreOnNonFreshCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `CREATE DATABASE foo`)
sqlDBRestore.ExpectErr(t,
"pq: full cluster restore can only be run on a cluster with no tables or databases but found 2 descriptors: foo, public",
`RESTORE FROM $1`, localFoo,
)
}
func TestClusterRestoreSystemTableOrdering(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode,
tempDir,
InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
restoredSystemTables := make([]string, 0)
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.duringSystemTableRestoration = func(systemTableName string) error {
restoredSystemTables = append(restoredSystemTables, systemTableName)
return nil
}
return r
},
}
}
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
// Check that the settings table is the last of the system tables to be
// restored.
require.Equal(t, restoredSystemTables[len(restoredSystemTables)-1],
systemschema.SettingsTable.GetName())
}
func TestDisallowFullClusterRestoreOfNonFullBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP data.bank TO $1`, localFoo)
sqlDBRestore.ExpectErr(
t, "pq: full cluster RESTORE can only be used on full cluster BACKUP files",
`RESTORE FROM $1`, localFoo,
)
}
func TestAllowNonFullClusterRestoreOfFullBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE data2`)
sqlDB.Exec(t, `RESTORE data.bank FROM $1 WITH into_db='data2'`, localFoo)
checkResults := "SELECT * FROM data.bank"
sqlDB.CheckQueryResults(t, checkResults, sqlDB.QueryStr(t, checkResults))
}
func TestRestoreFromFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, `DROP DATABASE data`)
t.Run("database", func(t *testing.T) {
sqlDB.Exec(t, `RESTORE DATABASE data FROM $1`, localFoo)
defer sqlDB.Exec(t, `DROP DATABASE data`)
sqlDB.CheckQueryResults(t, "SELECT count(*) FROM data.bank", [][]string{{"10"}})
})
t.Run("table", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE data`)
defer sqlDB.Exec(t, `DROP DATABASE data`)
sqlDB.Exec(t, `RESTORE data.bank FROM $1`, localFoo)
sqlDB.CheckQueryResults(t, "SELECT count(*) FROM data.bank", [][]string{{"10"}})
})
t.Run("tables", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE data`)
defer sqlDB.Exec(t, `DROP DATABASE data`)
sqlDB.Exec(t, `RESTORE data.* FROM $1`, localFoo)
sqlDB.CheckQueryResults(t, "SELECT count(*) FROM data.bank", [][]string{{"10"}})
})
t.Run("system tables", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE temp_sys`)
sqlDB.Exec(t, `RESTORE system.users FROM $1 WITH into_db='temp_sys'`, localFoo)
sqlDB.CheckQueryResults(t, "SELECT * FROM temp_sys.users", sqlDB.QueryStr(t, "SELECT * FROM system.users"))
})
}
func TestCreateDBAndTableIncrementalFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, 0, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE foo`)
sqlDB.Exec(t, `CREATE TABLE foo.bar (a int)`)
// Ensure that the new backup succeeds.
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
}
// TestClusterRestoreFailCleanup tests that a failed RESTORE is cleaned up.
func TestClusterRestoreFailCleanup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t, "takes >1 min under race")
params := base.TestServerArgs{}
// Disable GC job so that the final check of crdb_internal.tables is
// guaranteed to not be cleaned up. Although this was never observed by a
// stress test, it is here for safety.
blockCh := make(chan struct{})
defer close(blockCh)
params.Knobs.GCJob = &sql.GCJobTestingKnobs{
RunBeforeResume: func(_ jobspb.JobID) error { <-blockCh; return nil },
}
const numAccounts = 1000
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
// Setup the system systemTablesToVerify to ensure that they are copied to the new cluster.
// Populate system.users.
for i := 0; i < 1000; i++ {
sqlDB.Exec(t, fmt.Sprintf("CREATE USER maxroach%d", i))
}
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/missing-ssts'`)
// Bugger the backup by removing the SST files. (Note this messes up all of
// the backups, but there is only one at this point.)
if err := filepath.Walk(tempDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
t.Fatal(err)
}
if info.Name() == backupManifestName || !strings.HasSuffix(path, ".sst") {
return nil
}
return os.Remove(path)
}); err != nil {
t.Fatal(err)
}
// Create a non-corrupted backup.
// Populate system.jobs.
// Note: this is not the backup under test, this just serves as a job which
// should appear in the restore.
// This job will eventually fail since it will run from a new cluster.
sqlDB.Exec(t, `BACKUP data.bank TO 'nodelocal://0/throwawayjob'`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
t.Run("during restoration of data", func(t *testing.T) {
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
sqlDBRestore.ExpectErr(t, "sst: no such file", `RESTORE FROM 'nodelocal://1/missing-ssts'`)
// Verify the failed RESTORE added some DROP tables.
// Note that the system tables here correspond to the temporary tables
// imported, not the system tables themselves.
sqlDBRestore.CheckQueryResults(t,
`SELECT name FROM system.crdb_internal.tables WHERE state = 'DROP' ORDER BY name`,
[][]string{
{"bank"},
{"comments"},
{"database_role_settings"},
{"jobs"},
{"locations"},
{"role_members"},
{"role_options"},
{"scheduled_jobs"},
{"settings"},
{"tenant_settings"},
{"ui"},
{"users"},
{"zones"},
},
)
})
// This test retries the job (by injected a retry error) after restoring a
// every system table that has a custom restore function. This tried to tease
// out any errors that may occur if some of the system table restoration
// functions are not idempotent.
t.Run("retry-during-custom-system-table-restore", func(t *testing.T) {
customRestoreSystemTables := make([]string, 0)
for table, config := range systemTableBackupConfiguration {
if config.customRestoreFunc != nil {
customRestoreSystemTables = append(customRestoreSystemTables, table)
}
}
for _, customRestoreSystemTable := range customRestoreSystemTables {
t.Run(customRestoreSystemTable, func(t *testing.T) {
args := base.TestClusterArgs{ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals()},
}}
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, args)
defer cleanupEmptyCluster()
// Inject a retry error, that returns once.
alreadyErrored := false
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.duringSystemTableRestoration = func(systemTableName string) error {
if !alreadyErrored && systemTableName == customRestoreSystemTable {
alreadyErrored = true
return jobs.MarkAsRetryJobError(errors.New("injected error"))
}
return nil
}
return r
},
}
}
// The initial restore will return an error, and restart.
sqlDBRestore.ExpectErr(t, `running execution from '.*' to '.*' on \d+ failed: injected error`, `RESTORE FROM $1`, localFoo)
// Reduce retry delays.
sqlDBRestore.Exec(t, "SET CLUSTER SETTING jobs.registry.retry.initial_delay = '1ms'")
// Expect the restore to succeed.
sqlDBRestore.CheckQueryResultsRetry(t,
`SELECT count(*) FROM [SHOW JOBS] WHERE job_type = 'RESTORE' AND status = 'succeeded'`,
[][]string{{"1"}})
})
}
})
t.Run("during system table restoration", func(t *testing.T) {
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
// Bugger the backup by injecting a failure while restoring the system data.
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.duringSystemTableRestoration = func(_ string) error {
return errors.New("injected error")
}
return r
},
}
}
sqlDBRestore.ExpectErr(t, "injected error", `RESTORE FROM $1`, localFoo)
// Verify the failed RESTORE added some DROP tables.
// Note that the system tables here correspond to the temporary tables
// imported, not the system tables themselves.
sqlDBRestore.CheckQueryResults(t,
`SELECT name FROM system.crdb_internal.tables WHERE state = 'DROP' ORDER BY name`,
[][]string{
{"bank"},
{"comments"},
{"database_role_settings"},
{"jobs"},
{"locations"},
{"role_members"},
{"role_options"},
{"scheduled_jobs"},
{"settings"},
{"tenant_settings"},
{"ui"},
{"users"},
{"zones"},
},
)
})
t.Run("after offline tables", func(t *testing.T) {
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
// Bugger the backup by injecting a failure while restoring the system data.
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.afterOfflineTableCreation = func() error {
return errors.New("injected error")
}
return r
},
}
}
sqlDBRestore.ExpectErr(t, "injected error", `RESTORE FROM $1`, localFoo)
})
}
// A regression test where dropped descriptors would appear in the set of
// `Descriptors`.
func TestDropDatabaseRevisionHistory(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 1
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE same_name_db;`)
sqlDB.Exec(t, `DROP DATABASE same_name_db;`)
sqlDB.Exec(t, `CREATE DATABASE same_name_db;`)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.ExpectErr(t, `database "same_name_db" already exists`, `CREATE DATABASE same_name_db`)
}
// TestClusterRevisionHistory tests that cluster backups can be taken with
// revision_history and correctly restore into various points in time.
func TestClusterRevisionHistory(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
type testCase struct {
ts string
check func(t *testing.T, runner *sqlutils.SQLRunner)
}
testCases := make([]testCase, 0)
ts := make([]string, 6)
var tc testCase
const numAccounts = 1
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `CREATE DATABASE d1`)
sqlDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[0])
tc = testCase{
ts: ts[0],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.ExpectErr(t, `database "d1" already exists`, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE DATABASE d2`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `CREATE DATABASE d2`)
sqlDB.Exec(t, `CREATE TABLE d2.t (a INT)`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[1])
tc = testCase{
ts: ts[1],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
// Expect both databases to exist at this point.
checkSQLDB.ExpectErr(t, `database "d1" already exists`, `CREATE DATABASE d1`)
checkSQLDB.ExpectErr(t, `database "d2" already exists`, `CREATE DATABASE d2`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `DROP DATABASE d1`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[2])
tc = testCase{
ts: ts[2],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.Exec(t, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.ExpectErr(t, `database "d2" already exists`, `CREATE DATABASE d2`)
checkSQLDB.ExpectErr(t, `relation "d2.public.t" already exists`, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[3])
sqlDB.Exec(t, `DROP DATABASE d2;`)
tc = testCase{
ts: ts[3],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.Exec(t, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.ExpectErr(t, `database "d2" already exists`, `CREATE DATABASE d2`)
checkSQLDB.ExpectErr(t, `relation "d2.public.t" already exists`, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE d1`)
sqlDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[4])
tc = testCase{
ts: ts[4],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.ExpectErr(t, `database "d1" already exists`, `CREATE DATABASE d1`)
checkSQLDB.ExpectErr(t, `relation "d1.public.t" already exists`, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.Exec(t, `CREATE DATABASE d2`)
checkSQLDB.Exec(t, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `DROP DATABASE d1`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[5])
tc = testCase{
ts: ts[5],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.Exec(t, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.Exec(t, `CREATE DATABASE d2`)
checkSQLDB.Exec(t, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
for i, testCase := range testCases {
t.Run(fmt.Sprintf("t%d", i), func(t *testing.T) {
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
sqlDBRestore.Exec(t, `RESTORE FROM $1 AS OF SYSTEM TIME `+testCase.ts, localFoo)
testCase.check(t, sqlDBRestore)
})
}
}
// TestReintroduceOfflineSpans is a regression test for #62564, which tracks a
// bug where AddSSTable requests to OFFLINE tables may be missed by cluster
// incremental backups since they can write at a timestamp older than the last
// backup.
func TestReintroduceOfflineSpans(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t, "likely slow under race")
// Block restores on the source cluster.
blockDBRestore := make(chan struct{})
dbRestoreStarted := make(chan struct{})
// The data is split such that there will be 10 span entries to process.
restoreBlockEntiresThreshold := 4
entriesCount := 0
params := base.TestClusterArgs{}
knobs := base.TestingKnobs{
DistSQL: &execinfra.TestingKnobs{
BackupRestoreTestingKnobs: &sql.BackupRestoreTestingKnobs{
RunAfterProcessingRestoreSpanEntry: func(_ context.Context) {
if entriesCount == 0 {
close(dbRestoreStarted)
}
if entriesCount == restoreBlockEntiresThreshold {
<-blockDBRestore
}
entriesCount++
},
}},
}
params.ServerArgs.Knobs = knobs
const numAccounts = 1000
ctx := context.Background()
_, srcDB, tempDir, cleanupSrc := backupRestoreTestSetupWithParams(t, singleNode, numAccounts, InitManualReplication, params)
defer cleanupSrc()
dbBackupLoc := "nodelocal://0/my_db_backup"
clusterBackupLoc := "nodelocal://0/my_cluster_backup"
// the small test-case will get entirely buffered/merged by small-file merging
// and not report any progress in the meantime unless it is disabled.
srcDB.Exec(t, `SET CLUSTER SETTING bulkio.backup.file_size = '1'`)
// Test servers only have 128MB root memory monitors, reduce the buffer size
// so we don't see memory errors.
srcDB.Exec(t, `SET CLUSTER SETTING bulkio.backup.merge_file_buffer_size = '1MiB'`)
// Take a backup that we'll use to create an OFFLINE descriptor.
srcDB.Exec(t, `CREATE INDEX new_idx ON data.bank (balance)`)
srcDB.Exec(t, `BACKUP DATABASE data TO $1 WITH revision_history`, dbBackupLoc)
srcDB.Exec(t, `CREATE DATABASE restoredb;`)
// Take a base full backup.
srcDB.Exec(t, `BACKUP TO $1 WITH revision_history`, clusterBackupLoc)
var g errgroup.Group
g.Go(func() error {
_, err := srcDB.DB.ExecContext(ctx, `RESTORE data.bank FROM $1 WITH into_db='restoredb'`, dbBackupLoc)
return err
})
// Take an incremental backup after the database restore starts.
<-dbRestoreStarted
srcDB.Exec(t, `BACKUP TO $1 WITH revision_history`, clusterBackupLoc)
var tsMidRestore string
srcDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&tsMidRestore)
// Allow the restore to finish. This will issue AddSSTable requests at a
// timestamp that is before the last incremental we just took.
close(blockDBRestore)
// Wait for the database restore to finish, and take another incremental
// backup that will miss the AddSSTable writes.
require.NoError(t, g.Wait())
var tsBefore string
srcDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&tsBefore)
// Drop an index on the restored table to ensure that the dropped index was
// also re-included.
srcDB.Exec(t, `DROP INDEX new_idx`)
srcDB.Exec(t, `BACKUP TO $1 WITH revision_history`, clusterBackupLoc)
t.Run("spans-reintroduced", func(t *testing.T) {
_, destDB, cleanupDst := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupDst()
// Restore the incremental backup chain that has missing writes.
destDB.Exec(t, `RESTORE FROM $1 AS OF SYSTEM TIME `+tsBefore, clusterBackupLoc)
// Assert that the restored database has the same number of rows in both the
// source and destination cluster.
checkQuery := `SELECT count(*) FROM restoredb.bank AS OF SYSTEM TIME ` + tsBefore
expectedCount := srcDB.QueryStr(t, checkQuery)
destDB.CheckQueryResults(t, `SELECT count(*) FROM restoredb.bank`, expectedCount)
checkQuery = `SELECT count(*) FROM restoredb.bank@new_idx AS OF SYSTEM TIME ` + tsBefore
expectedCount = srcDB.QueryStr(t, checkQuery)
destDB.CheckQueryResults(t, `SELECT count(*) FROM restoredb.bank@new_idx`, expectedCount)
})
t.Run("restore-canceled", func(t *testing.T) {
args := base.TestClusterArgs{ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals()}},
}
_, destDB, cleanupDst := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, args)
defer cleanupDst()
destDB.Exec(t, `RESTORE FROM $1 AS OF SYSTEM TIME `+tsMidRestore, clusterBackupLoc)
// Wait for the cluster restore job to finish, as well as the restored RESTORE TABLE
// job to cancel.
destDB.CheckQueryResultsRetry(t, `
SELECT description, status FROM [SHOW JOBS]
WHERE job_type = 'RESTORE' AND status NOT IN ('succeeded', 'canceled')`,
[][]string{},
)
// The cluster restore should succeed, but the table restore should have failed.
destDB.CheckQueryResults(t,
`SELECT status, count(*) FROM [SHOW JOBS] WHERE job_type = 'RESTORE' GROUP BY status ORDER BY status`,
[][]string{{"canceled", "1"}, {"succeeded", "1"}})
destDB.ExpectErr(t, `relation "restoredb.bank" does not exist`, `SELECT count(*) FROM restoredb.bank`)
})
}
// TestClusterRevisionDoesNotBackupOptOutSystemTables is a regression test for a
// bug that was introduced where we would include revisions for descriptors that
// are not supposed to be backed up egs: system tables that are opted out.
//
// The test would previously fail with an error that the descriptors table (an
// opt out system table) did not have a span covering the time between the
// `EndTime` of the first backup and second backup, since there are no revisions
// to it between those backups.
func TestClusterRevisionDoesNotBackupOptOutSystemTables(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
tc, _, _, cleanup := backupRestoreTestSetup(t, singleNode, 10, InitManualReplication)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
defer cleanup()
sqlDB.Exec(t, `CREATE DATABASE test;`)
sqlDB.Exec(t, `USE test;`)
sqlDB.Exec(t, `CREATE TABLE foo (id INT);`)
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
sqlDB.Exec(t, `CREATE TABLE bar (id INT);`)
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
}
func TestRestoreWithRecreatedDefaultDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `
DROP DATABASE defaultdb;
CREATE DATABASE defaultdb;
`)
row := sqlDB.QueryRow(t, `SELECT id FROM system.namespace WHERE name = 'defaultdb'`)
var expectedDefaultDBID string
row.Scan(&expectedDefaultDBID)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SELECT * FROM system.namespace WHERE name = 'defaultdb'`, [][]string{
{"0", "0", "defaultdb", expectedDefaultDBID},
})
}
func TestRestoreWithDroppedDefaultDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `
DROP DATABASE defaultdb;
`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SELECT count(*) FROM system.namespace WHERE name = 'defaultdb'`, [][]string{
{"0"},
})
}
func TestRestoreToClusterWithDroppedDefaultDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
expectedRow := sqlDB.QueryRow(t, `SELECT * FROM system.namespace WHERE name = 'defaultdb'`)
var parentID, parentSchemaID, ID int
var name string
expectedRow.Scan(&parentID, &parentSchemaID, &name, &ID)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `
DROP DATABASE defaultdb;
`)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SELECT * FROM system.namespace WHERE name = 'defaultdb'`, [][]string{
{fmt.Sprint(parentID), fmt.Sprint(parentSchemaID), name, fmt.Sprint(ID)},
})
}
| pkg/ccl/backupccl/full_cluster_backup_restore_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.9987990856170654,
0.02584574557840824,
0.00016268380568362772,
0.00016887192032299936,
0.15777817368507385
] |
{
"id": 2,
"code_window": [
"\t\t\t},\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// The claim_session_id field in jobs is a uuid and so needs to be excluded\n",
"\t// when comparing jobs pre/post restore.\n",
"\tconst jobsQuery = `\n",
"SELECT id, status, created, payload, progress, created_by_type, created_by_id, claim_instance_id\n",
"FROM system.jobs\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// when comparing jobs pre/post restore. The span config reconciliation job\n",
"\t// too is something we exclude; because it's a singleton job, when restored\n",
"\t// into another cluster it self-terminates.\n"
],
"file_path": "pkg/ccl/backupccl/full_cluster_backup_restore_test.go",
"type": "replace",
"edit_start_line_idx": 93
} | // Code generated by "stringer"; DO NOT EDIT.
package encoding
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[Unknown-0]
_ = x[Null-1]
_ = x[NotNull-2]
_ = x[Int-3]
_ = x[Float-4]
_ = x[Decimal-5]
_ = x[Bytes-6]
_ = x[BytesDesc-7]
_ = x[Time-8]
_ = x[Duration-9]
_ = x[True-10]
_ = x[False-11]
_ = x[UUID-12]
_ = x[Array-13]
_ = x[IPAddr-14]
_ = x[JSON-15]
_ = x[Tuple-16]
_ = x[BitArray-17]
_ = x[BitArrayDesc-18]
_ = x[TimeTZ-19]
_ = x[Geo-20]
_ = x[GeoDesc-21]
_ = x[ArrayKeyAsc-22]
_ = x[ArrayKeyDesc-23]
_ = x[Box2D-24]
_ = x[Void-25]
}
const _Type_name = "UnknownNullNotNullIntFloatDecimalBytesBytesDescTimeDurationTrueFalseUUIDArrayIPAddrJSONTupleBitArrayBitArrayDescTimeTZGeoGeoDescArrayKeyAscArrayKeyDescBox2DVoid"
var _Type_index = [...]uint8{0, 7, 11, 18, 21, 26, 33, 38, 47, 51, 59, 63, 68, 72, 77, 83, 87, 92, 100, 112, 118, 121, 128, 139, 151, 156, 160}
func (i Type) String() string {
if i < 0 || i >= Type(len(_Type_index)-1) {
return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Type_name[_Type_index[i]:_Type_index[i+1]]
}
| pkg/util/encoding/type_string.go | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.00017339493206236511,
0.00016797671560198069,
0.0001619138929527253,
0.00016807240899652243,
0.0000037698673622799106
] |
{
"id": 2,
"code_window": [
"\t\t\t},\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// The claim_session_id field in jobs is a uuid and so needs to be excluded\n",
"\t// when comparing jobs pre/post restore.\n",
"\tconst jobsQuery = `\n",
"SELECT id, status, created, payload, progress, created_by_type, created_by_id, claim_instance_id\n",
"FROM system.jobs\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// when comparing jobs pre/post restore. The span config reconciliation job\n",
"\t// too is something we exclude; because it's a singleton job, when restored\n",
"\t// into another cluster it self-terminates.\n"
],
"file_path": "pkg/ccl/backupccl/full_cluster_backup_restore_test.go",
"type": "replace",
"edit_start_line_idx": 93
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import { expectSaga } from "redux-saga-test-plan";
import sinon from "sinon";
import Analytics from "analytics-node";
import { signUpEmailSubscription } from "./customAnalyticsSagas";
import { signUpForEmailSubscription } from "./customAnanlyticsActions";
const sandbox = sinon.createSandbox();
describe("customAnalyticsSagas", () => {
describe("signUpEmailSubscription generator", () => {
afterEach(() => {
sandbox.reset();
});
it("calls analytics#identify with user email in args ", () => {
const analyticsIdentifyFn = sandbox.stub(Analytics.prototype, "identify");
const clusterId = "cluster-1";
const email = "[email protected]";
const action = signUpForEmailSubscription(clusterId, email);
return expectSaga(signUpEmailSubscription, action)
.dispatch(action)
.run()
.then(() => {
const expectedAnalyticsMessage = {
userId: clusterId,
traits: {
email,
},
};
analyticsIdentifyFn.calledOnceWith(expectedAnalyticsMessage);
});
});
});
});
| pkg/ui/workspaces/db-console/src/redux/customAnalytics/customAnalyticsSagas.spec.ts | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.0001792254188330844,
0.0001727430208120495,
0.0001666011376073584,
0.00017212997772730887,
0.0000040810245991451666
] |
{
"id": 2,
"code_window": [
"\t\t\t},\n",
"\t\t}\n",
"\t}\n",
"\n",
"\t// The claim_session_id field in jobs is a uuid and so needs to be excluded\n",
"\t// when comparing jobs pre/post restore.\n",
"\tconst jobsQuery = `\n",
"SELECT id, status, created, payload, progress, created_by_type, created_by_id, claim_instance_id\n",
"FROM system.jobs\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// when comparing jobs pre/post restore. The span config reconciliation job\n",
"\t// too is something we exclude; because it's a singleton job, when restored\n",
"\t// into another cluster it self-terminates.\n"
],
"file_path": "pkg/ccl/backupccl/full_cluster_backup_restore_test.go",
"type": "replace",
"edit_start_line_idx": 93
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import * as protos from "@cockroachlabs/crdb-protobuf-client";
import { stdDevLong, longToInt } from "src/util";
import { Duration, Bytes, Percentage } from "src/util/format";
import classNames from "classnames/bind";
import styles from "./barCharts.module.scss";
import { bar, formatTwoPlaces, approximify } from "./utils";
import { barChartFactory, BarChartOptions } from "./barChartFactory";
import { AggregateStatistics } from "src/statementsTable/statementsTable";
type StatementStatistics = protos.cockroach.server.serverpb.StatementsResponse.ICollectedStatementStatistics;
const cx = classNames.bind(styles);
const countBars = [
bar("count-first-try", (d: StatementStatistics) =>
longToInt(d.stats.first_attempt_count),
),
];
const rowsReadBars = [
bar("rows-read", (d: StatementStatistics) => d.stats.rows_read.mean),
];
const bytesReadBars = [
bar("bytes-read", (d: StatementStatistics) => d.stats.bytes_read.mean),
];
const rowsWrittenBars = [
bar("rows-written", (d: StatementStatistics) => d.stats.rows_written?.mean),
];
const latencyBars = [
bar("bar-chart__parse", (d: StatementStatistics) => d.stats.parse_lat.mean),
bar("bar-chart__plan", (d: StatementStatistics) => d.stats.plan_lat.mean),
bar("bar-chart__run", (d: StatementStatistics) => d.stats.run_lat.mean),
bar(
"bar-chart__overhead",
(d: StatementStatistics) => d.stats.overhead_lat.mean,
),
];
const contentionBars = [
bar(
"contention",
(d: StatementStatistics) => d.stats.exec_stats.contention_time?.mean,
),
];
const maxMemUsageBars = [
bar(
"max-mem-usage",
(d: StatementStatistics) => d.stats.exec_stats.max_mem_usage?.mean,
),
];
const networkBytesBars = [
bar(
"network-bytes",
(d: StatementStatistics) => d.stats.exec_stats.network_bytes?.mean,
),
];
const retryBars = [
bar(
"count-retry",
(d: StatementStatistics) =>
longToInt(d.stats.count) - longToInt(d.stats.first_attempt_count),
),
];
const rowsReadStdDev = bar(cx("rows-read-dev"), (d: StatementStatistics) =>
stdDevLong(d.stats.rows_read, d.stats.count),
);
const bytesReadStdDev = bar(cx("bytes-read-dev"), (d: StatementStatistics) =>
stdDevLong(d.stats.bytes_read, d.stats.count),
);
const rowsWrittenStdDev = bar(
cx("rows-written-dev"),
(d: StatementStatistics) => stdDevLong(d.stats.rows_written, d.stats.count),
);
const latencyStdDev = bar(
cx("bar-chart__overall-dev"),
(d: StatementStatistics) => stdDevLong(d.stats.service_lat, d.stats.count),
);
const contentionStdDev = bar(cx("contention-dev"), (d: StatementStatistics) =>
stdDevLong(d.stats.exec_stats.contention_time, d.stats.exec_stats.count),
);
const maxMemUsageStdDev = bar(
cx("max-mem-usage-dev"),
(d: StatementStatistics) =>
stdDevLong(d.stats.exec_stats.max_mem_usage, d.stats.exec_stats.count),
);
const networkBytesStdDev = bar(
cx("network-bytes-dev"),
(d: StatementStatistics) =>
stdDevLong(d.stats.exec_stats.network_bytes, d.stats.exec_stats.count),
);
export const countBarChart = barChartFactory("grey", countBars, approximify);
export const rowsReadBarChart = barChartFactory(
"grey",
rowsReadBars,
approximify,
rowsReadStdDev,
formatTwoPlaces,
);
export const bytesReadBarChart = barChartFactory(
"grey",
bytesReadBars,
Bytes,
bytesReadStdDev,
);
export const rowsWrittenBarChart = barChartFactory(
"grey",
rowsWrittenBars,
approximify,
rowsWrittenStdDev,
formatTwoPlaces,
);
export const latencyBarChart = barChartFactory(
"grey",
latencyBars,
v => Duration(v * 1e9),
latencyStdDev,
);
export const contentionBarChart = barChartFactory(
"grey",
contentionBars,
v => Duration(v * 1e9),
contentionStdDev,
);
export const maxMemUsageBarChart = barChartFactory(
"grey",
maxMemUsageBars,
Bytes,
maxMemUsageStdDev,
);
export const networkBytesBarChart = barChartFactory(
"grey",
networkBytesBars,
Bytes,
networkBytesStdDev,
);
export const retryBarChart = barChartFactory("red", retryBars, approximify);
export function workloadPctBarChart(
statements: AggregateStatistics[],
defaultBarChartOptions: BarChartOptions<any>,
totalWorkload: number,
) {
return barChartFactory(
"grey",
[
bar(
"pct-workload",
(d: StatementStatistics) =>
(d.stats.service_lat.mean * longToInt(d.stats.count)) / totalWorkload,
),
],
v => Percentage(v, 1, 1),
)(statements, defaultBarChartOptions);
}
| pkg/ui/workspaces/cluster-ui/src/barCharts/barCharts.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.0001791495451470837,
0.00017212597595062107,
0.00016501145728398114,
0.00017224863404408097,
0.0000037312072436179733
] |
{
"id": 3,
"code_window": [
"\tconst jobsQuery = `\n",
"SELECT id, status, created, payload, progress, created_by_type, created_by_id, claim_instance_id\n",
"FROM system.jobs\n",
"\t`\n",
"\t// Pause SQL Stats compaction job to ensure the test is deterministic.\n",
"\tsqlDB.Exec(t, `PAUSE SCHEDULES SELECT id FROM [SHOW SCHEDULES FOR SQL STATISTICS]`)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"WHERE id NOT IN\n",
"(\n",
"\tSELECT job_id FROM [SHOW AUTOMATIC JOBS]\n",
" WHERE job_type = 'AUTO SPAN CONFIG RECONCILIATION'\n",
")\n"
],
"file_path": "pkg/ccl/backupccl/full_cluster_backup_restore_test.go",
"type": "add",
"edit_start_line_idx": 97
} | // Copyright 2020 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package backupccl
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
_ "github.com/cockroachdb/cockroach/pkg/ccl/partitionccl"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/spanconfig"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
// Large test to ensure that all of the system table data is being restored in
// the new cluster. Ensures that all the moving pieces are working together.
func TestFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
params := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
DisableSpanConfigs: true, // TODO(irfansharif): #75060.
Knobs: base.TestingKnobs{
SpanConfig: &spanconfig.TestingKnobs{
// We compare job progress before and after a restore. Disable
// the automatic jobs checkpointing which could possibly mutate
// the progress data during the backup/restore process.
JobDisablePersistingCheckpoints: true,
},
GCJob: &sql.GCJobTestingKnobs{
DisableNewProtectedTimestampSubsystemCheck: true,
},
},
}}
const numAccounts = 10
tcBackup, sqlDB, tempDir, cleanupFn := backupRestoreTestSetupWithParams(t, singleNode, numAccounts, InitManualReplication, params)
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, params)
defer cleanupFn()
defer cleanupEmptyCluster()
backupKVDB := tcBackup.Server(0).DB()
// Closed when the restore is allowed to progress with the rest of the backup.
allowProgressAfterPreRestore := make(chan struct{})
// Closed to signal the the zones have been restored.
restoredZones := make(chan struct{})
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.afterPreRestore = func() error {
close(restoredZones)
<-allowProgressAfterPreRestore
return nil
}
return r
},
}
}
// The claim_session_id field in jobs is a uuid and so needs to be excluded
// when comparing jobs pre/post restore.
const jobsQuery = `
SELECT id, status, created, payload, progress, created_by_type, created_by_id, claim_instance_id
FROM system.jobs
`
// Pause SQL Stats compaction job to ensure the test is deterministic.
sqlDB.Exec(t, `PAUSE SCHEDULES SELECT id FROM [SHOW SCHEDULES FOR SQL STATISTICS]`)
// Disable automatic stats collection on the backup and restoring clusters to ensure
// the test is deterministic.
sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=false`)
sqlDBRestore.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=false`)
// Create some other descriptors as well.
sqlDB.Exec(t, `
USE data;
CREATE SCHEMA test_data_schema;
CREATE TABLE data.test_data_schema.test_table (a int);
INSERT INTO data.test_data_schema.test_table VALUES (1), (2);
USE defaultdb;
CREATE SCHEMA test_schema;
CREATE TABLE defaultdb.test_schema.test_table (a int);
INSERT INTO defaultdb.test_schema.test_table VALUES (1), (2);
CREATE TABLE defaultdb.foo (a int);
CREATE TYPE greeting AS ENUM ('hi');
CREATE TABLE welcomes (a greeting);
CREATE DATABASE data2;
USE data2;
CREATE SCHEMA empty_schema;
CREATE TABLE data2.foo (a int);
`)
tableDesc := desctestutils.TestingGetPublicTableDescriptor(backupKVDB, keys.SystemSQLCodec, "data2", "foo")
// Store the highest user-table ID for later assertions.
maxBackupTableID := tableDesc.GetID()
// Setup the system systemTablesToVerify to ensure that they are copied to the new cluster.
// Populate system.users.
numUsers := 1000
if util.RaceEnabled {
numUsers = 10
}
for i := 0; i < numUsers; i++ {
sqlDB.Exec(t, fmt.Sprintf("CREATE USER maxroach%d", i))
sqlDB.Exec(t, fmt.Sprintf("ALTER USER maxroach%d CREATEDB", i))
}
// Populate system.zones.
sqlDB.Exec(t, `ALTER TABLE data.bank CONFIGURE ZONE USING gc.ttlseconds = 3600`)
sqlDB.Exec(t, `ALTER TABLE defaultdb.foo CONFIGURE ZONE USING gc.ttlseconds = 45`)
sqlDB.Exec(t, `ALTER DATABASE data2 CONFIGURE ZONE USING gc.ttlseconds = 900`)
// Populate system.jobs.
// Note: this is not the backup under test, this just serves as a job which
// should appear in the restore.
// This job will eventually fail since it will run from a new cluster.
sqlDB.Exec(t, `BACKUP data.bank TO 'nodelocal://0/throwawayjob'`)
preBackupJobs := sqlDB.QueryStr(t, jobsQuery)
// Populate system.settings.
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_io_write.concurrent_addsstable_requests = 5`)
sqlDB.Exec(t, `INSERT INTO system.ui (key, value, "lastUpdated") VALUES ($1, $2, now())`, "some_key", "some_val")
// Populate system.comments.
sqlDB.Exec(t, `COMMENT ON TABLE data.bank IS 'table comment string'`)
sqlDB.Exec(t, `COMMENT ON DATABASE data IS 'database comment string'`)
sqlDB.Exec(t,
`INSERT INTO system.locations ("localityKey", "localityValue", latitude, longitude) VALUES ($1, $2, $3, $4)`,
"city", "New York City", 40.71427, -74.00597,
)
// Populate system.role_members.
sqlDB.Exec(t, `CREATE ROLE system_ops;`)
sqlDB.Exec(t, `GRANT system_ops TO maxroach1;`)
// Populate system.scheduled_jobs table with a first run in the future to prevent immediate adoption.
firstRun := timeutil.Now().Add(time.Hour).Format(timeutil.TimestampWithoutTZFormat)
sqlDB.Exec(t, `CREATE SCHEDULE FOR BACKUP data.bank INTO $1 RECURRING '@hourly' FULL BACKUP ALWAYS WITH SCHEDULE OPTIONS first_run = $2`, localFoo, firstRun)
sqlDB.Exec(t, `PAUSE SCHEDULES SELECT id FROM [SHOW SCHEDULES FOR BACKUP]`)
injectStats(t, sqlDB, "data.bank", "id")
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
// Create a bunch of user tables on the restoring cluster that we're going
// to delete.
numTables := 50
if util.RaceEnabled {
numTables = 2
}
for i := 0; i < numTables; i++ {
sqlDBRestore.Exec(t, `CREATE DATABASE db_to_drop`)
sqlDBRestore.Exec(t, `CREATE TABLE db_to_drop.table_to_drop (a int)`)
sqlDBRestore.Exec(t, `ALTER TABLE db_to_drop.table_to_drop CONFIGURE ZONE USING gc.ttlseconds=1`)
sqlDBRestore.Exec(t, `DROP DATABASE db_to_drop`)
}
// Wait for the GC job to finish to ensure the descriptors no longer exist.
sqlDBRestore.CheckQueryResultsRetry(
t, "SELECT count(*) FROM [SHOW JOBS] WHERE job_type = 'SCHEMA CHANGE GC' AND status = 'running'",
[][]string{{"0"}},
)
doneRestore := make(chan struct{})
go func() {
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
close(doneRestore)
}()
// Check that zones are restored during pre-restore.
t.Run("ensure zones are restored during pre-restore", func(t *testing.T) {
<-restoredZones
// Not specifying the schema makes the query search using defaultdb first.
// which ends up returning the error
// pq: database "defaultdb" is offline: restoring
checkZones := "SELECT * FROM system.public.zones"
sqlDBRestore.CheckQueryResults(t, checkZones, sqlDB.QueryStr(t, checkZones))
// Check that the user tables are still offline.
sqlDBRestore.ExpectErr(t, "database \"data\" is offline: restoring", "SELECT * FROM data.public.bank")
// Check there is no data in the span that we expect user data to be imported.
store := tcRestore.GetFirstStoreFromServer(t, 0)
startKey := keys.SystemSQLCodec.TablePrefix(bootstrap.TestingUserDescID(0))
endKey := keys.SystemSQLCodec.TablePrefix(uint32(maxBackupTableID)).PrefixEnd()
it := store.Engine().NewMVCCIterator(storage.MVCCKeyAndIntentsIterKind, storage.IterOptions{
UpperBound: endKey,
})
defer it.Close()
it.SeekGE(storage.MVCCKey{Key: startKey})
hasKey, err := it.Valid()
require.NoError(t, err)
require.False(t, hasKey)
})
// Allow the restore to make progress after we've checked the pre-restore
// stage.
close(allowProgressAfterPreRestore)
// Wait for the restore to finish before checking that it did the right thing.
<-doneRestore
t.Run("ensure all databases restored", func(t *testing.T) {
sqlDBRestore.CheckQueryResults(t,
`SELECT database_name, owner FROM [SHOW DATABASES]`,
[][]string{
{"data", security.RootUser},
{"data2", security.RootUser},
{"defaultdb", security.RootUser},
{"postgres", security.RootUser},
{"system", security.NodeUser},
})
})
t.Run("ensure all schemas are restored", func(t *testing.T) {
expectedSchemas := map[string][][]string{
"defaultdb": {{"crdb_internal"}, {"information_schema"}, {"pg_catalog"}, {"pg_extension"}, {"public"}, {"test_schema"}},
"data": {{"crdb_internal"}, {"information_schema"}, {"pg_catalog"}, {"pg_extension"}, {"public"}, {"test_data_schema"}},
"data2": {{"crdb_internal"}, {"empty_schema"}, {"information_schema"}, {"pg_catalog"}, {"pg_extension"}, {"public"}},
}
for dbName, expectedSchemas := range expectedSchemas {
sqlDBRestore.CheckQueryResults(t,
fmt.Sprintf(`USE %s; SELECT schema_name FROM [SHOW SCHEMAS] ORDER BY schema_name;`, dbName),
expectedSchemas)
}
})
t.Run("ensure system table data restored", func(t *testing.T) {
// Note the absence of the jobs table. Jobs are tested by another test as
// jobs are created during the RESTORE process.
systemTablesToVerify := []string{
systemschema.CommentsTable.GetName(),
systemschema.LocationsTable.GetName(),
systemschema.RoleMembersTable.GetName(),
systemschema.RoleOptionsTable.GetName(),
systemschema.SettingsTable.GetName(),
systemschema.TableStatisticsTable.GetName(),
systemschema.UITable.GetName(),
systemschema.UsersTable.GetName(),
systemschema.ScheduledJobsTable.GetName(),
}
verificationQueries := make([]string, len(systemTablesToVerify))
// Populate the list of tables we expect to be restored as well as queries
// that can be used to ensure that data in those tables is restored.
for i, table := range systemTablesToVerify {
switch table {
case systemschema.TableStatisticsTable.GetName():
// createdAt and statisticsID are re-generated on RESTORE.
query := `SELECT "tableID", name, "columnIDs", "rowCount" FROM system.table_statistics`
verificationQueries[i] = query
case systemschema.SettingsTable.GetName():
// We don't include the cluster version.
query := fmt.Sprintf("SELECT * FROM system.%s WHERE name <> 'version'", table)
verificationQueries[i] = query
default:
query := fmt.Sprintf("SELECT * FROM system.%s", table)
verificationQueries[i] = query
}
}
for _, read := range verificationQueries {
sqlDBRestore.CheckQueryResults(t, read, sqlDB.QueryStr(t, read))
}
})
t.Run("ensure table IDs have not changed", func(t *testing.T) {
// Check that all tables have been restored. DISTINCT is needed in order to
// deal with the inclusion of schemas in the system.namespace table.
tableIDCheck := "SELECT * FROM system.namespace ORDER BY id"
sqlDBRestore.CheckQueryResults(t, tableIDCheck, sqlDB.QueryStr(t, tableIDCheck))
})
t.Run("ensure user table data restored", func(t *testing.T) {
expectedUserTables := [][]string{
{"data", "bank"},
{"data2", "foo"},
{"defaultdb", "foo"},
}
for _, table := range expectedUserTables {
query := fmt.Sprintf("SELECT * FROM %s.%s", table[0], table[1])
sqlDBRestore.CheckQueryResults(t, query, sqlDB.QueryStr(t, query))
}
})
t.Run("ensure that grants are restored", func(t *testing.T) {
grantCheck := "use system; SHOW grants"
sqlDBRestore.CheckQueryResults(t, grantCheck, sqlDB.QueryStr(t, grantCheck))
grantCheck = "use data; SHOW grants"
sqlDBRestore.CheckQueryResults(t, grantCheck, sqlDB.QueryStr(t, grantCheck))
})
t.Run("ensure that jobs are restored", func(t *testing.T) {
// Ensure that the jobs in the RESTORE cluster is a superset of the jobs
// that were in the BACKUP cluster (before the full cluster BACKUP job was
// run). There may be more jobs now because the restore can run jobs of
// its own.
newJobsStr := sqlDBRestore.QueryStr(t, jobsQuery)
newJobs := make(map[string][]string)
for _, newJob := range newJobsStr {
// The first element of the slice is the job id.
newJobs[newJob[0]] = newJob
}
for _, oldJob := range preBackupJobs {
newJob, ok := newJobs[oldJob[0]]
if !ok {
t.Errorf("Expected to find job %+v in RESTORE cluster, but not found", oldJob)
}
require.Equal(t, oldJob, newJob)
}
})
t.Run("zone_configs", func(t *testing.T) {
// The restored zones should be a superset of the zones in the backed up
// cluster.
zoneIDsResult := sqlDB.QueryStr(t, `SELECT id FROM system.zones`)
var q strings.Builder
q.WriteString("SELECT * FROM system.zones WHERE id IN (")
for i, restoreZoneIDRow := range zoneIDsResult {
if i > 0 {
q.WriteString(", ")
}
q.WriteString(restoreZoneIDRow[0])
}
q.WriteString(")")
sqlDBRestore.CheckQueryResults(t, q.String(), sqlDB.QueryStr(t, q.String()))
})
t.Run("ensure that tables can be created at the excepted ID", func(t *testing.T) {
var maxID, dbID, tableID int
sqlDBRestore.QueryRow(t, "SELECT max(id) FROM system.namespace").Scan(&maxID)
dbName, tableName := "new_db", "new_table"
sqlDBRestore.Exec(t, fmt.Sprintf("CREATE DATABASE %s", dbName))
sqlDBRestore.Exec(t, fmt.Sprintf("CREATE TABLE %s.%s (a int)", dbName, tableName))
sqlDBRestore.QueryRow(t,
fmt.Sprintf("SELECT id FROM system.namespace WHERE name = '%s'", dbName)).Scan(&dbID)
require.True(t, dbID > maxID)
sqlDBRestore.QueryRow(t,
fmt.Sprintf("SELECT id FROM system.namespace WHERE name = '%s'", tableName)).Scan(&tableID)
require.True(t, tableID > maxID)
require.NotEqual(t, dbID, tableID)
})
}
// TestSingletonSpanConfigJobPostRestore ensures that there's a single span
// config reconciliation job running post restore.
func TestSingletonSpanConfigJobPostRestore(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
params := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(),
},
},
}
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetupWithParams(t, singleNode, numAccounts, InitManualReplication, params)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, params)
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
const numRunningReconciliationJobQuery = `
SELECT count(*) FROM [SHOW AUTOMATIC JOBS]
WHERE job_type = 'AUTO SPAN CONFIG RECONCILIATION' AND status = 'running'
`
testutils.SucceedsSoon(t, func() error {
var numRunningJobs int
sqlDBRestore.QueryRow(t, numRunningReconciliationJobQuery).Scan(&numRunningJobs)
if numRunningJobs != 1 {
return errors.Newf("expected single running reconciliation job, found %d", numRunningJobs)
}
return nil
})
}
func TestIncrementalFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
const incrementalBackupLocation = "nodelocal://0/inc-full-backup"
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, "CREATE USER maxroach1")
sqlDB.Exec(t, `BACKUP TO $1 INCREMENTAL FROM $2`, incrementalBackupLocation, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1, $2`, localFoo, incrementalBackupLocation)
checkQuery := "SELECT * FROM system.users"
sqlDBRestore.CheckQueryResults(t, checkQuery, sqlDB.QueryStr(t, checkQuery))
}
// TestEmptyFullClusterResotre ensures that we can backup and restore a full
// cluster backup with only metadata (no user data). Regression test for #49573.
func TestEmptyFullClusterRestore(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `CREATE USER alice`)
sqlDB.Exec(t, `CREATE USER bob`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
checkQuery := "SELECT * FROM system.users"
sqlDBRestore.CheckQueryResults(t, checkQuery, sqlDB.QueryStr(t, checkQuery))
}
// Regression test for #50561.
func TestClusterRestoreEmptyDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `CREATE DATABASE some_db`)
sqlDB.Exec(t, `CREATE DATABASE some_db_2`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
checkQuery := "SHOW DATABASES"
sqlDBRestore.CheckQueryResults(t, checkQuery, sqlDB.QueryStr(t, checkQuery))
}
func TestDisallowFullClusterRestoreOnNonFreshCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `CREATE DATABASE foo`)
sqlDBRestore.ExpectErr(t,
"pq: full cluster restore can only be run on a cluster with no tables or databases but found 2 descriptors: foo, public",
`RESTORE FROM $1`, localFoo,
)
}
func TestClusterRestoreSystemTableOrdering(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode,
tempDir,
InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
restoredSystemTables := make([]string, 0)
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.duringSystemTableRestoration = func(systemTableName string) error {
restoredSystemTables = append(restoredSystemTables, systemTableName)
return nil
}
return r
},
}
}
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
// Check that the settings table is the last of the system tables to be
// restored.
require.Equal(t, restoredSystemTables[len(restoredSystemTables)-1],
systemschema.SettingsTable.GetName())
}
func TestDisallowFullClusterRestoreOfNonFullBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP data.bank TO $1`, localFoo)
sqlDBRestore.ExpectErr(
t, "pq: full cluster RESTORE can only be used on full cluster BACKUP files",
`RESTORE FROM $1`, localFoo,
)
}
func TestAllowNonFullClusterRestoreOfFullBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE data2`)
sqlDB.Exec(t, `RESTORE data.bank FROM $1 WITH into_db='data2'`, localFoo)
checkResults := "SELECT * FROM data.bank"
sqlDB.CheckQueryResults(t, checkResults, sqlDB.QueryStr(t, checkResults))
}
func TestRestoreFromFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, `DROP DATABASE data`)
t.Run("database", func(t *testing.T) {
sqlDB.Exec(t, `RESTORE DATABASE data FROM $1`, localFoo)
defer sqlDB.Exec(t, `DROP DATABASE data`)
sqlDB.CheckQueryResults(t, "SELECT count(*) FROM data.bank", [][]string{{"10"}})
})
t.Run("table", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE data`)
defer sqlDB.Exec(t, `DROP DATABASE data`)
sqlDB.Exec(t, `RESTORE data.bank FROM $1`, localFoo)
sqlDB.CheckQueryResults(t, "SELECT count(*) FROM data.bank", [][]string{{"10"}})
})
t.Run("tables", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE data`)
defer sqlDB.Exec(t, `DROP DATABASE data`)
sqlDB.Exec(t, `RESTORE data.* FROM $1`, localFoo)
sqlDB.CheckQueryResults(t, "SELECT count(*) FROM data.bank", [][]string{{"10"}})
})
t.Run("system tables", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE temp_sys`)
sqlDB.Exec(t, `RESTORE system.users FROM $1 WITH into_db='temp_sys'`, localFoo)
sqlDB.CheckQueryResults(t, "SELECT * FROM temp_sys.users", sqlDB.QueryStr(t, "SELECT * FROM system.users"))
})
}
func TestCreateDBAndTableIncrementalFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, 0, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE foo`)
sqlDB.Exec(t, `CREATE TABLE foo.bar (a int)`)
// Ensure that the new backup succeeds.
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
}
// TestClusterRestoreFailCleanup tests that a failed RESTORE is cleaned up.
func TestClusterRestoreFailCleanup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t, "takes >1 min under race")
params := base.TestServerArgs{}
// Disable GC job so that the final check of crdb_internal.tables is
// guaranteed to not be cleaned up. Although this was never observed by a
// stress test, it is here for safety.
blockCh := make(chan struct{})
defer close(blockCh)
params.Knobs.GCJob = &sql.GCJobTestingKnobs{
RunBeforeResume: func(_ jobspb.JobID) error { <-blockCh; return nil },
}
const numAccounts = 1000
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
// Setup the system systemTablesToVerify to ensure that they are copied to the new cluster.
// Populate system.users.
for i := 0; i < 1000; i++ {
sqlDB.Exec(t, fmt.Sprintf("CREATE USER maxroach%d", i))
}
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/missing-ssts'`)
// Bugger the backup by removing the SST files. (Note this messes up all of
// the backups, but there is only one at this point.)
if err := filepath.Walk(tempDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
t.Fatal(err)
}
if info.Name() == backupManifestName || !strings.HasSuffix(path, ".sst") {
return nil
}
return os.Remove(path)
}); err != nil {
t.Fatal(err)
}
// Create a non-corrupted backup.
// Populate system.jobs.
// Note: this is not the backup under test, this just serves as a job which
// should appear in the restore.
// This job will eventually fail since it will run from a new cluster.
sqlDB.Exec(t, `BACKUP data.bank TO 'nodelocal://0/throwawayjob'`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
t.Run("during restoration of data", func(t *testing.T) {
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
sqlDBRestore.ExpectErr(t, "sst: no such file", `RESTORE FROM 'nodelocal://1/missing-ssts'`)
// Verify the failed RESTORE added some DROP tables.
// Note that the system tables here correspond to the temporary tables
// imported, not the system tables themselves.
sqlDBRestore.CheckQueryResults(t,
`SELECT name FROM system.crdb_internal.tables WHERE state = 'DROP' ORDER BY name`,
[][]string{
{"bank"},
{"comments"},
{"database_role_settings"},
{"jobs"},
{"locations"},
{"role_members"},
{"role_options"},
{"scheduled_jobs"},
{"settings"},
{"tenant_settings"},
{"ui"},
{"users"},
{"zones"},
},
)
})
// This test retries the job (by injected a retry error) after restoring a
// every system table that has a custom restore function. This tried to tease
// out any errors that may occur if some of the system table restoration
// functions are not idempotent.
t.Run("retry-during-custom-system-table-restore", func(t *testing.T) {
customRestoreSystemTables := make([]string, 0)
for table, config := range systemTableBackupConfiguration {
if config.customRestoreFunc != nil {
customRestoreSystemTables = append(customRestoreSystemTables, table)
}
}
for _, customRestoreSystemTable := range customRestoreSystemTables {
t.Run(customRestoreSystemTable, func(t *testing.T) {
args := base.TestClusterArgs{ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals()},
}}
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, args)
defer cleanupEmptyCluster()
// Inject a retry error, that returns once.
alreadyErrored := false
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.duringSystemTableRestoration = func(systemTableName string) error {
if !alreadyErrored && systemTableName == customRestoreSystemTable {
alreadyErrored = true
return jobs.MarkAsRetryJobError(errors.New("injected error"))
}
return nil
}
return r
},
}
}
// The initial restore will return an error, and restart.
sqlDBRestore.ExpectErr(t, `running execution from '.*' to '.*' on \d+ failed: injected error`, `RESTORE FROM $1`, localFoo)
// Reduce retry delays.
sqlDBRestore.Exec(t, "SET CLUSTER SETTING jobs.registry.retry.initial_delay = '1ms'")
// Expect the restore to succeed.
sqlDBRestore.CheckQueryResultsRetry(t,
`SELECT count(*) FROM [SHOW JOBS] WHERE job_type = 'RESTORE' AND status = 'succeeded'`,
[][]string{{"1"}})
})
}
})
t.Run("during system table restoration", func(t *testing.T) {
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
// Bugger the backup by injecting a failure while restoring the system data.
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.duringSystemTableRestoration = func(_ string) error {
return errors.New("injected error")
}
return r
},
}
}
sqlDBRestore.ExpectErr(t, "injected error", `RESTORE FROM $1`, localFoo)
// Verify the failed RESTORE added some DROP tables.
// Note that the system tables here correspond to the temporary tables
// imported, not the system tables themselves.
sqlDBRestore.CheckQueryResults(t,
`SELECT name FROM system.crdb_internal.tables WHERE state = 'DROP' ORDER BY name`,
[][]string{
{"bank"},
{"comments"},
{"database_role_settings"},
{"jobs"},
{"locations"},
{"role_members"},
{"role_options"},
{"scheduled_jobs"},
{"settings"},
{"tenant_settings"},
{"ui"},
{"users"},
{"zones"},
},
)
})
t.Run("after offline tables", func(t *testing.T) {
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
// Bugger the backup by injecting a failure while restoring the system data.
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.afterOfflineTableCreation = func() error {
return errors.New("injected error")
}
return r
},
}
}
sqlDBRestore.ExpectErr(t, "injected error", `RESTORE FROM $1`, localFoo)
})
}
// A regression test where dropped descriptors would appear in the set of
// `Descriptors`.
func TestDropDatabaseRevisionHistory(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 1
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE same_name_db;`)
sqlDB.Exec(t, `DROP DATABASE same_name_db;`)
sqlDB.Exec(t, `CREATE DATABASE same_name_db;`)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.ExpectErr(t, `database "same_name_db" already exists`, `CREATE DATABASE same_name_db`)
}
// TestClusterRevisionHistory tests that cluster backups can be taken with
// revision_history and correctly restore into various points in time.
func TestClusterRevisionHistory(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
type testCase struct {
ts string
check func(t *testing.T, runner *sqlutils.SQLRunner)
}
testCases := make([]testCase, 0)
ts := make([]string, 6)
var tc testCase
const numAccounts = 1
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `CREATE DATABASE d1`)
sqlDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[0])
tc = testCase{
ts: ts[0],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.ExpectErr(t, `database "d1" already exists`, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE DATABASE d2`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `CREATE DATABASE d2`)
sqlDB.Exec(t, `CREATE TABLE d2.t (a INT)`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[1])
tc = testCase{
ts: ts[1],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
// Expect both databases to exist at this point.
checkSQLDB.ExpectErr(t, `database "d1" already exists`, `CREATE DATABASE d1`)
checkSQLDB.ExpectErr(t, `database "d2" already exists`, `CREATE DATABASE d2`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `DROP DATABASE d1`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[2])
tc = testCase{
ts: ts[2],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.Exec(t, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.ExpectErr(t, `database "d2" already exists`, `CREATE DATABASE d2`)
checkSQLDB.ExpectErr(t, `relation "d2.public.t" already exists`, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[3])
sqlDB.Exec(t, `DROP DATABASE d2;`)
tc = testCase{
ts: ts[3],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.Exec(t, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.ExpectErr(t, `database "d2" already exists`, `CREATE DATABASE d2`)
checkSQLDB.ExpectErr(t, `relation "d2.public.t" already exists`, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE d1`)
sqlDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[4])
tc = testCase{
ts: ts[4],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.ExpectErr(t, `database "d1" already exists`, `CREATE DATABASE d1`)
checkSQLDB.ExpectErr(t, `relation "d1.public.t" already exists`, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.Exec(t, `CREATE DATABASE d2`)
checkSQLDB.Exec(t, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `DROP DATABASE d1`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[5])
tc = testCase{
ts: ts[5],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.Exec(t, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.Exec(t, `CREATE DATABASE d2`)
checkSQLDB.Exec(t, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
for i, testCase := range testCases {
t.Run(fmt.Sprintf("t%d", i), func(t *testing.T) {
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
sqlDBRestore.Exec(t, `RESTORE FROM $1 AS OF SYSTEM TIME `+testCase.ts, localFoo)
testCase.check(t, sqlDBRestore)
})
}
}
// TestReintroduceOfflineSpans is a regression test for #62564, which tracks a
// bug where AddSSTable requests to OFFLINE tables may be missed by cluster
// incremental backups since they can write at a timestamp older than the last
// backup.
func TestReintroduceOfflineSpans(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t, "likely slow under race")
// Block restores on the source cluster.
blockDBRestore := make(chan struct{})
dbRestoreStarted := make(chan struct{})
// The data is split such that there will be 10 span entries to process.
restoreBlockEntiresThreshold := 4
entriesCount := 0
params := base.TestClusterArgs{}
knobs := base.TestingKnobs{
DistSQL: &execinfra.TestingKnobs{
BackupRestoreTestingKnobs: &sql.BackupRestoreTestingKnobs{
RunAfterProcessingRestoreSpanEntry: func(_ context.Context) {
if entriesCount == 0 {
close(dbRestoreStarted)
}
if entriesCount == restoreBlockEntiresThreshold {
<-blockDBRestore
}
entriesCount++
},
}},
}
params.ServerArgs.Knobs = knobs
const numAccounts = 1000
ctx := context.Background()
_, srcDB, tempDir, cleanupSrc := backupRestoreTestSetupWithParams(t, singleNode, numAccounts, InitManualReplication, params)
defer cleanupSrc()
dbBackupLoc := "nodelocal://0/my_db_backup"
clusterBackupLoc := "nodelocal://0/my_cluster_backup"
// the small test-case will get entirely buffered/merged by small-file merging
// and not report any progress in the meantime unless it is disabled.
srcDB.Exec(t, `SET CLUSTER SETTING bulkio.backup.file_size = '1'`)
// Test servers only have 128MB root memory monitors, reduce the buffer size
// so we don't see memory errors.
srcDB.Exec(t, `SET CLUSTER SETTING bulkio.backup.merge_file_buffer_size = '1MiB'`)
// Take a backup that we'll use to create an OFFLINE descriptor.
srcDB.Exec(t, `CREATE INDEX new_idx ON data.bank (balance)`)
srcDB.Exec(t, `BACKUP DATABASE data TO $1 WITH revision_history`, dbBackupLoc)
srcDB.Exec(t, `CREATE DATABASE restoredb;`)
// Take a base full backup.
srcDB.Exec(t, `BACKUP TO $1 WITH revision_history`, clusterBackupLoc)
var g errgroup.Group
g.Go(func() error {
_, err := srcDB.DB.ExecContext(ctx, `RESTORE data.bank FROM $1 WITH into_db='restoredb'`, dbBackupLoc)
return err
})
// Take an incremental backup after the database restore starts.
<-dbRestoreStarted
srcDB.Exec(t, `BACKUP TO $1 WITH revision_history`, clusterBackupLoc)
var tsMidRestore string
srcDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&tsMidRestore)
// Allow the restore to finish. This will issue AddSSTable requests at a
// timestamp that is before the last incremental we just took.
close(blockDBRestore)
// Wait for the database restore to finish, and take another incremental
// backup that will miss the AddSSTable writes.
require.NoError(t, g.Wait())
var tsBefore string
srcDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&tsBefore)
// Drop an index on the restored table to ensure that the dropped index was
// also re-included.
srcDB.Exec(t, `DROP INDEX new_idx`)
srcDB.Exec(t, `BACKUP TO $1 WITH revision_history`, clusterBackupLoc)
t.Run("spans-reintroduced", func(t *testing.T) {
_, destDB, cleanupDst := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupDst()
// Restore the incremental backup chain that has missing writes.
destDB.Exec(t, `RESTORE FROM $1 AS OF SYSTEM TIME `+tsBefore, clusterBackupLoc)
// Assert that the restored database has the same number of rows in both the
// source and destination cluster.
checkQuery := `SELECT count(*) FROM restoredb.bank AS OF SYSTEM TIME ` + tsBefore
expectedCount := srcDB.QueryStr(t, checkQuery)
destDB.CheckQueryResults(t, `SELECT count(*) FROM restoredb.bank`, expectedCount)
checkQuery = `SELECT count(*) FROM restoredb.bank@new_idx AS OF SYSTEM TIME ` + tsBefore
expectedCount = srcDB.QueryStr(t, checkQuery)
destDB.CheckQueryResults(t, `SELECT count(*) FROM restoredb.bank@new_idx`, expectedCount)
})
t.Run("restore-canceled", func(t *testing.T) {
args := base.TestClusterArgs{ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals()}},
}
_, destDB, cleanupDst := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, args)
defer cleanupDst()
destDB.Exec(t, `RESTORE FROM $1 AS OF SYSTEM TIME `+tsMidRestore, clusterBackupLoc)
// Wait for the cluster restore job to finish, as well as the restored RESTORE TABLE
// job to cancel.
destDB.CheckQueryResultsRetry(t, `
SELECT description, status FROM [SHOW JOBS]
WHERE job_type = 'RESTORE' AND status NOT IN ('succeeded', 'canceled')`,
[][]string{},
)
// The cluster restore should succeed, but the table restore should have failed.
destDB.CheckQueryResults(t,
`SELECT status, count(*) FROM [SHOW JOBS] WHERE job_type = 'RESTORE' GROUP BY status ORDER BY status`,
[][]string{{"canceled", "1"}, {"succeeded", "1"}})
destDB.ExpectErr(t, `relation "restoredb.bank" does not exist`, `SELECT count(*) FROM restoredb.bank`)
})
}
// TestClusterRevisionDoesNotBackupOptOutSystemTables is a regression test for a
// bug that was introduced where we would include revisions for descriptors that
// are not supposed to be backed up egs: system tables that are opted out.
//
// The test would previously fail with an error that the descriptors table (an
// opt out system table) did not have a span covering the time between the
// `EndTime` of the first backup and second backup, since there are no revisions
// to it between those backups.
func TestClusterRevisionDoesNotBackupOptOutSystemTables(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
tc, _, _, cleanup := backupRestoreTestSetup(t, singleNode, 10, InitManualReplication)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
defer cleanup()
sqlDB.Exec(t, `CREATE DATABASE test;`)
sqlDB.Exec(t, `USE test;`)
sqlDB.Exec(t, `CREATE TABLE foo (id INT);`)
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
sqlDB.Exec(t, `CREATE TABLE bar (id INT);`)
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
}
func TestRestoreWithRecreatedDefaultDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `
DROP DATABASE defaultdb;
CREATE DATABASE defaultdb;
`)
row := sqlDB.QueryRow(t, `SELECT id FROM system.namespace WHERE name = 'defaultdb'`)
var expectedDefaultDBID string
row.Scan(&expectedDefaultDBID)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SELECT * FROM system.namespace WHERE name = 'defaultdb'`, [][]string{
{"0", "0", "defaultdb", expectedDefaultDBID},
})
}
func TestRestoreWithDroppedDefaultDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `
DROP DATABASE defaultdb;
`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SELECT count(*) FROM system.namespace WHERE name = 'defaultdb'`, [][]string{
{"0"},
})
}
func TestRestoreToClusterWithDroppedDefaultDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
expectedRow := sqlDB.QueryRow(t, `SELECT * FROM system.namespace WHERE name = 'defaultdb'`)
var parentID, parentSchemaID, ID int
var name string
expectedRow.Scan(&parentID, &parentSchemaID, &name, &ID)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `
DROP DATABASE defaultdb;
`)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SELECT * FROM system.namespace WHERE name = 'defaultdb'`, [][]string{
{fmt.Sprint(parentID), fmt.Sprint(parentSchemaID), name, fmt.Sprint(ID)},
})
}
| pkg/ccl/backupccl/full_cluster_backup_restore_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.9981619715690613,
0.02749834582209587,
0.00016648379096295685,
0.00032256869599223137,
0.15749472379684448
] |
{
"id": 3,
"code_window": [
"\tconst jobsQuery = `\n",
"SELECT id, status, created, payload, progress, created_by_type, created_by_id, claim_instance_id\n",
"FROM system.jobs\n",
"\t`\n",
"\t// Pause SQL Stats compaction job to ensure the test is deterministic.\n",
"\tsqlDB.Exec(t, `PAUSE SCHEDULES SELECT id FROM [SHOW SCHEDULES FOR SQL STATISTICS]`)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"WHERE id NOT IN\n",
"(\n",
"\tSELECT job_id FROM [SHOW AUTOMATIC JOBS]\n",
" WHERE job_type = 'AUTO SPAN CONFIG RECONCILIATION'\n",
")\n"
],
"file_path": "pkg/ccl/backupccl/full_cluster_backup_restore_test.go",
"type": "add",
"edit_start_line_idx": 97
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package quotapool
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/redact"
)
// Option is used to configure a quotapool.
type Option interface {
apply(*config)
}
// AcquisitionFunc is used to configure a quotapool to call a function after
// an acquisition has occurred.
type AcquisitionFunc func(
ctx context.Context, poolName string, r Request, start time.Time,
)
// OnAcquisition creates an Option to configure a callback upon acquisition.
// It is often useful for recording metrics.
func OnAcquisition(f AcquisitionFunc) Option {
return optionFunc(func(cfg *config) {
cfg.onAcquisition = f
})
}
// OnWaitStartFunc is the prototype for functions called to notify the start or
// finish of a waiting period when a request is blocked.
type OnWaitStartFunc func(
ctx context.Context, poolName string, r Request,
)
// OnWaitStart creates an Option to configure a callback which is called when a
// request blocks and has to wait for quota.
func OnWaitStart(onStart OnWaitStartFunc) Option {
return optionFunc(func(cfg *config) {
cfg.onWaitStart = onStart
})
}
// OnWaitFinish creates an Option to configure a callback which is called when a
// previously blocked request acquires resources.
func OnWaitFinish(onFinish AcquisitionFunc) Option {
return optionFunc(func(cfg *config) {
cfg.onWaitFinish = onFinish
})
}
// OnSlowAcquisition creates an Option to configure a callback upon slow
// acquisitions. Only one OnSlowAcquisition may be used. If multiple are
// specified only the last will be used.
func OnSlowAcquisition(threshold time.Duration, f SlowAcquisitionFunc) Option {
return optionFunc(func(cfg *config) {
cfg.slowAcquisitionThreshold = threshold
cfg.onSlowAcquisition = f
})
}
// LogSlowAcquisition is a SlowAcquisitionFunc.
func LogSlowAcquisition(ctx context.Context, poolName string, r Request, start time.Time) func() {
log.Warningf(ctx, "have been waiting %s attempting to acquire %s quota",
timeutil.Since(start), redact.Safe(poolName))
return func() {
log.Infof(ctx, "acquired %s quota after %s",
redact.Safe(poolName), timeutil.Since(start))
}
}
// SlowAcquisitionFunc is used to configure a quotapool to call a function when
// quota acquisition is slow. The returned callback is called when the
// acquisition occurs.
type SlowAcquisitionFunc func(
ctx context.Context, poolName string, r Request, start time.Time,
) (onAcquire func())
type optionFunc func(cfg *config)
func (f optionFunc) apply(cfg *config) { f(cfg) }
// WithTimeSource is used to configure a quotapool to use the provided
// TimeSource.
func WithTimeSource(ts timeutil.TimeSource) Option {
return optionFunc(func(cfg *config) {
cfg.timeSource = ts
})
}
// WithCloser allows the client to provide a channel which will lead to the
// AbstractPool being closed.
func WithCloser(closer <-chan struct{}) Option {
return optionFunc(func(cfg *config) {
cfg.closer = closer
})
}
// WithMinimumWait is used with the RateLimiter to control the minimum duration
// which a goroutine will sleep waiting for quota to accumulate. This
// can help avoid expensive spinning when the workload consists of many
// small acquisitions. If used with a regular (not rate limiting) quotapool,
// this option has no effect.
func WithMinimumWait(duration time.Duration) Option {
return optionFunc(func(cfg *config) {
cfg.minimumWait = duration
})
}
type config struct {
onAcquisition AcquisitionFunc
onSlowAcquisition SlowAcquisitionFunc
onWaitStart OnWaitStartFunc
onWaitFinish AcquisitionFunc
slowAcquisitionThreshold time.Duration
timeSource timeutil.TimeSource
closer <-chan struct{}
minimumWait time.Duration
}
var defaultConfig = config{
timeSource: timeutil.DefaultTimeSource{},
}
func initializeConfig(cfg *config, options ...Option) {
*cfg = defaultConfig
for _, opt := range options {
opt.apply(cfg)
}
}
| pkg/util/quotapool/config.go | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.00029782496858388186,
0.00019059976330026984,
0.00016564376710448414,
0.0001708874333417043,
0.000039414328057318926
] |
{
"id": 3,
"code_window": [
"\tconst jobsQuery = `\n",
"SELECT id, status, created, payload, progress, created_by_type, created_by_id, claim_instance_id\n",
"FROM system.jobs\n",
"\t`\n",
"\t// Pause SQL Stats compaction job to ensure the test is deterministic.\n",
"\tsqlDB.Exec(t, `PAUSE SCHEDULES SELECT id FROM [SHOW SCHEDULES FOR SQL STATISTICS]`)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"WHERE id NOT IN\n",
"(\n",
"\tSELECT job_id FROM [SHOW AUTOMATIC JOBS]\n",
" WHERE job_type = 'AUTO SPAN CONFIG RECONCILIATION'\n",
")\n"
],
"file_path": "pkg/ccl/backupccl/full_cluster_backup_restore_test.go",
"type": "add",
"edit_start_line_idx": 97
} | // Copyright 2014 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package batcheval
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/spanset"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/uncertainty"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
)
// DefaultDeclareKeys is the default implementation of Command.DeclareKeys.
func DefaultDeclareKeys(
_ ImmutableRangeState,
header *roachpb.Header,
req roachpb.Request,
latchSpans, _ *spanset.SpanSet,
_ time.Duration,
) {
access := spanset.SpanReadWrite
if roachpb.IsReadOnly(req) && !roachpb.IsLocking(req) {
access = spanset.SpanReadOnly
}
latchSpans.AddMVCC(access, req.Header().Span(), header.Timestamp)
}
// DefaultDeclareIsolatedKeys is similar to DefaultDeclareKeys, but it declares
// both lock spans in addition to latch spans. When used, commands will wait on
// locks and wait-queues owned by other transactions before evaluating. This
// ensures that the commands are fully isolated from conflicting transactions
// when it evaluated.
func DefaultDeclareIsolatedKeys(
_ ImmutableRangeState,
header *roachpb.Header,
req roachpb.Request,
latchSpans, lockSpans *spanset.SpanSet,
maxOffset time.Duration,
) {
access := spanset.SpanReadWrite
timestamp := header.Timestamp
if roachpb.IsReadOnly(req) && !roachpb.IsLocking(req) {
access = spanset.SpanReadOnly
// For non-locking reads, acquire read latches all the way up to the
// request's worst-case (i.e. global) uncertainty limit, because reads may
// observe writes all the way up to this timestamp.
//
// It is critical that reads declare latches up through their uncertainty
// interval so that they are properly synchronized with earlier writes that
// may have a happened-before relationship with the read. These writes could
// not have completed and returned to the client until they were durable in
// the Range's Raft log. However, they may not have been applied to the
// replica's state machine by the time the write was acknowledged, because
// Raft entry application occurs asynchronously with respect to the writer
// (see AckCommittedEntriesBeforeApplication). Latching is the only
// mechanism that ensures that any observers of the write wait for the write
// apply before reading.
//
// NOTE: we pass an empty lease status here, which means that observed
// timestamps collected by transactions will not be used. The actual
// uncertainty interval used by the request may be smaller (i.e. contain a
// local limit), but we can't determine that until after we have declared
// keys, acquired latches, and consulted the replica's lease.
in := uncertainty.ComputeInterval(header, kvserverpb.LeaseStatus{}, maxOffset)
timestamp.Forward(in.GlobalLimit)
}
latchSpans.AddMVCC(access, req.Header().Span(), timestamp)
lockSpans.AddNonMVCC(access, req.Header().Span())
}
// DeclareKeysForBatch adds all keys that the batch with the provided header
// touches to the given SpanSet. This does not include keys touched during the
// processing of the batch's individual commands.
func DeclareKeysForBatch(
rs ImmutableRangeState, header *roachpb.Header, latchSpans *spanset.SpanSet,
) {
if header.Txn != nil {
header.Txn.AssertInitialized(context.TODO())
latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{
Key: keys.AbortSpanKey(rs.GetRangeID(), header.Txn.ID),
})
}
}
// declareAllKeys declares a non-MVCC write over every addressable key. This
// guarantees that the caller conflicts with any other command because every
// command must declare at least one addressable key, which is tested against
// in TestRequestsSerializeWithAllKeys.
func declareAllKeys(latchSpans *spanset.SpanSet) {
// NOTE: we don't actually know what the end key of the Range will
// be at the time of request evaluation (see ImmutableRangeState),
// so we simply declare a latch over the entire keyspace. This may
// extend beyond the Range, but this is ok for the purpose of
// acquiring latches.
latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{Key: keys.LocalPrefix, EndKey: keys.LocalMax})
latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{Key: keys.MinKey, EndKey: keys.MaxKey})
}
// CommandArgs contains all the arguments to a command.
// TODO(bdarnell): consider merging with kvserverbase.FilterArgs (which
// would probably require removing the EvalCtx field due to import order
// constraints).
type CommandArgs struct {
EvalCtx EvalContext
Header roachpb.Header
Args roachpb.Request
// *Stats should be mutated to reflect any writes made by the command.
Stats *enginepb.MVCCStats
Uncertainty uncertainty.Interval
}
| pkg/kv/kvserver/batcheval/declare.go | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.00032164223375730217,
0.0001909041020553559,
0.00016618022345937788,
0.00016928430704865605,
0.000044957745558349416
] |
{
"id": 3,
"code_window": [
"\tconst jobsQuery = `\n",
"SELECT id, status, created, payload, progress, created_by_type, created_by_id, claim_instance_id\n",
"FROM system.jobs\n",
"\t`\n",
"\t// Pause SQL Stats compaction job to ensure the test is deterministic.\n",
"\tsqlDB.Exec(t, `PAUSE SCHEDULES SELECT id FROM [SHOW SCHEDULES FOR SQL STATISTICS]`)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"WHERE id NOT IN\n",
"(\n",
"\tSELECT job_id FROM [SHOW AUTOMATIC JOBS]\n",
" WHERE job_type = 'AUTO SPAN CONFIG RECONCILIATION'\n",
")\n"
],
"file_path": "pkg/ccl/backupccl/full_cluster_backup_restore_test.go",
"type": "add",
"edit_start_line_idx": 97
} | // Code generated by generate-staticcheck; DO NOT EDIT.
//go:build bazel
// +build bazel
package sa2001
import (
util "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/staticcheck"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/staticcheck"
)
var Analyzer *analysis.Analyzer
func init() {
for _, analyzer := range staticcheck.Analyzers {
if analyzer.Analyzer.Name == "SA2001" {
Analyzer = analyzer.Analyzer
break
}
}
util.MungeAnalyzer(Analyzer)
}
| build/bazelutil/staticcheckanalyzers/sa2001/analyzer.go | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.00017242335889022797,
0.0001709070784272626,
0.00016968007548712194,
0.00017061783000826836,
0.0000011384649951651227
] |
{
"id": 4,
"code_window": [
"\t\tif execCfg.Codec.ForSystemTenant() &&\n",
"\t\t\tdeprecatedIsProtected(ctx, ptsCache, droppedAtTime, sp) {\n",
"\t\t\treturn true, nil\n",
"\t\t}\n",
"\n",
"\t\t// Skip checking the new protected timestamp subsystem if the testing knob\n",
"\t\t// says as such.\n",
"\t\tif execCfg.GCJobTestingKnobs.DisableNewProtectedTimestampSubsystemCheck {\n",
"\t\t\treturn false, nil\n",
"\t\t}\n",
"\n",
"\t\tspanConfigRecords, err := kvAccessor.GetSpanConfigRecords(ctx, spanconfig.Targets{\n",
"\t\t\tspanconfig.MakeTargetFromSpan(sp),\n",
"\t\t})\n",
"\t\tif err != nil {\n",
"\t\t\treturn false, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/gcjob/refresh_statuses.go",
"type": "replace",
"edit_start_line_idx": 316
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package gcjob
import (
"context"
"math"
"time"
"github.com/cockroachdb/cockroach/pkg/config/zonepb"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/spanconfig"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
)
var maxDeadline = timeutil.Unix(0, math.MaxInt64)
// refreshTables updates the status of tables/indexes that are waiting to be
// GC'd.
// It returns whether or not any index/table has expired and the duration until
// the next index/table expires.
func refreshTables(
ctx context.Context,
execCfg *sql.ExecutorConfig,
tableIDs []descpb.ID,
tableDropTimes map[descpb.ID]int64,
indexDropTimes map[descpb.IndexID]int64,
jobID jobspb.JobID,
progress *jobspb.SchemaChangeGCProgress,
) (expired bool, earliestDeadline time.Time) {
earliestDeadline = maxDeadline
var haveAnyMissing bool
for _, tableID := range tableIDs {
tableHasExpiredElem, tableIsMissing, deadline := updateStatusForGCElements(
ctx,
execCfg,
jobID,
tableID,
tableDropTimes, indexDropTimes,
progress,
)
expired = expired || tableHasExpiredElem
haveAnyMissing = haveAnyMissing || tableIsMissing
if deadline.Before(earliestDeadline) {
earliestDeadline = deadline
}
}
if expired || haveAnyMissing {
persistProgress(ctx, execCfg, jobID, progress, sql.RunningStatusWaitingGC)
}
return expired, earliestDeadline
}
// updateStatusForGCElements updates the status for indexes on this table if any
// are waiting for GC. If the table is waiting for GC then the status of the table
// will be updated.
// It returns whether any indexes or the table have expired as well as the time
// until the next index expires if there are any more to drop. It also returns
// whether the table descriptor is missing indicating that it was gc'd by
// another job, in which case the progress will have been updated.
func updateStatusForGCElements(
ctx context.Context,
execCfg *sql.ExecutorConfig,
jobID jobspb.JobID,
tableID descpb.ID,
tableDropTimes map[descpb.ID]int64,
indexDropTimes map[descpb.IndexID]int64,
progress *jobspb.SchemaChangeGCProgress,
) (expired, missing bool, timeToNextTrigger time.Time) {
defTTL := execCfg.DefaultZoneConfig.GC.TTLSeconds
cfg := execCfg.SystemConfig.GetSystemConfig()
protectedtsCache := execCfg.ProtectedTimestampProvider
earliestDeadline := timeutil.Unix(0, int64(math.MaxInt64))
if err := sql.DescsTxn(ctx, execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error {
table, err := col.Direct().MustGetTableDescByID(ctx, txn, tableID)
if err != nil {
return err
}
zoneCfg, err := cfg.GetZoneConfigForObject(execCfg.Codec, uint32(tableID))
if err != nil {
log.Errorf(ctx, "zone config for desc: %d, err = %+v", tableID, err)
return nil
}
tableTTL := getTableTTL(defTTL, zoneCfg)
// Update the status of the table if the table was dropped.
if table.Dropped() {
deadline := updateTableStatus(ctx, execCfg, jobID, int64(tableTTL), table, tableDropTimes, progress)
if timeutil.Until(deadline) < 0 {
expired = true
} else if deadline.Before(earliestDeadline) {
earliestDeadline = deadline
}
}
// Update the status of any indexes waiting for GC.
indexesExpired, deadline := updateIndexesStatus(
ctx, execCfg, jobID, tableTTL, table, protectedtsCache, zoneCfg, indexDropTimes, progress,
)
if indexesExpired {
expired = true
}
if deadline.Before(earliestDeadline) {
earliestDeadline = deadline
}
return nil
}); err != nil {
if errors.Is(err, catalog.ErrDescriptorNotFound) {
log.Warningf(ctx, "table %d not found, marking as GC'd", tableID)
markTableGCed(ctx, tableID, progress)
return false, true, maxDeadline
}
log.Warningf(ctx, "error while calculating GC time for table %d, err: %+v", tableID, err)
return false, false, maxDeadline
}
return expired, false, earliestDeadline
}
// updateTableStatus sets the status the table to DELETING if the GC TTL has
// expired.
func updateTableStatus(
ctx context.Context,
execCfg *sql.ExecutorConfig,
jobID jobspb.JobID,
ttlSeconds int64,
table catalog.TableDescriptor,
tableDropTimes map[descpb.ID]int64,
progress *jobspb.SchemaChangeGCProgress,
) time.Time {
deadline := timeutil.Unix(0, int64(math.MaxInt64))
sp := table.TableSpan(execCfg.Codec)
for i, t := range progress.Tables {
droppedTable := &progress.Tables[i]
if droppedTable.ID != table.GetID() || droppedTable.Status == jobspb.SchemaChangeGCProgress_DELETED {
continue
}
deadlineNanos := tableDropTimes[t.ID] + ttlSeconds*time.Second.Nanoseconds()
deadline = timeutil.Unix(0, deadlineNanos)
isProtected, err := isProtected(
ctx,
jobID,
tableDropTimes[t.ID],
execCfg,
execCfg.SpanConfigKVAccessor,
execCfg.ProtectedTimestampProvider,
sp,
)
if err != nil {
log.Errorf(ctx, "error checking protection status %v", err)
// We don't want to make GC decisions if we can't validate the protection
// status of a table. We don't change the status of the table to DELETING
// and simply return a high deadline value; The GC job will be retried
// automatically up the stack.
return maxDeadline
}
if isProtected {
log.Infof(ctx, "a timestamp protection delayed GC of table %d", t.ID)
return maxDeadline
}
lifetime := timeutil.Until(deadline)
if lifetime < 0 {
if log.V(2) {
log.Infof(ctx, "detected expired table %d", t.ID)
}
droppedTable.Status = jobspb.SchemaChangeGCProgress_DELETING
} else {
if log.V(2) {
log.Infof(ctx, "table %d still has %+v until GC", t.ID, lifetime)
}
}
break
}
return deadline
}
// updateIndexesStatus updates the status on every index that is waiting for GC
// TTL in this table.
// It returns whether any indexes have expired and the timestamp of when another
// index should be GC'd, if any, otherwise MaxInt.
func updateIndexesStatus(
ctx context.Context,
execCfg *sql.ExecutorConfig,
jobID jobspb.JobID,
tableTTL int32,
table catalog.TableDescriptor,
protectedtsCache protectedts.Cache,
zoneCfg *zonepb.ZoneConfig,
indexDropTimes map[descpb.IndexID]int64,
progress *jobspb.SchemaChangeGCProgress,
) (expired bool, soonestDeadline time.Time) {
// Update the deadline for indexes that are being dropped, if any.
soonestDeadline = timeutil.Unix(0, int64(math.MaxInt64))
for i := 0; i < len(progress.Indexes); i++ {
idxProgress := &progress.Indexes[i]
if idxProgress.Status == jobspb.SchemaChangeGCProgress_DELETED {
continue
}
sp := table.IndexSpan(execCfg.Codec, idxProgress.IndexID)
ttlSeconds := getIndexTTL(tableTTL, zoneCfg, idxProgress.IndexID)
deadlineNanos := indexDropTimes[idxProgress.IndexID] + int64(ttlSeconds)*time.Second.Nanoseconds()
deadline := timeutil.Unix(0, deadlineNanos)
isProtected, err := isProtected(
ctx,
jobID,
indexDropTimes[idxProgress.IndexID],
execCfg,
execCfg.SpanConfigKVAccessor,
protectedtsCache,
sp,
)
if err != nil {
log.Errorf(ctx, "error checking protection status %v", err)
continue
}
if isProtected {
log.Infof(ctx, "a timestamp protection delayed GC of index %d from table %d", idxProgress.IndexID, table.GetID())
continue
}
lifetime := time.Until(deadline)
if lifetime > 0 {
if log.V(2) {
log.Infof(ctx, "index %d from table %d still has %+v until GC", idxProgress.IndexID, table.GetID(), lifetime)
}
}
if lifetime < 0 {
expired = true
if log.V(2) {
log.Infof(ctx, "detected expired index %d from table %d", idxProgress.IndexID, table.GetID())
}
idxProgress.Status = jobspb.SchemaChangeGCProgress_DELETING
} else if deadline.Before(soonestDeadline) {
soonestDeadline = deadline
}
}
return expired, soonestDeadline
}
// Helpers.
func getIndexTTL(tableTTL int32, placeholder *zonepb.ZoneConfig, indexID descpb.IndexID) int32 {
ttlSeconds := tableTTL
if placeholder != nil {
if subzone := placeholder.GetSubzone(
uint32(indexID), ""); subzone != nil && subzone.Config.GC != nil {
ttlSeconds = subzone.Config.GC.TTLSeconds
}
}
return ttlSeconds
}
func getTableTTL(defTTL int32, zoneCfg *zonepb.ZoneConfig) int32 {
ttlSeconds := defTTL
if zoneCfg != nil {
ttlSeconds = zoneCfg.GC.TTLSeconds
}
return ttlSeconds
}
// isProtected returns true if the supplied span is considered protected, and
// thus exempt from GC-ing, given the wall time at which it was dropped.
//
// This function is intended for table/index spans -- for spans that cover a
// secondary tenant's keyspace, checkout `isTenantProtected` instead.
func isProtected(
ctx context.Context,
jobID jobspb.JobID,
droppedAtTime int64,
execCfg *sql.ExecutorConfig,
kvAccessor spanconfig.KVAccessor,
ptsCache protectedts.Cache,
sp roachpb.Span,
) (bool, error) {
// Wrap this in a closure sp we can pass the protection status to the testing
// knob.
isProtected, err := func() (bool, error) {
// We check the old protected timestamp subsystem for protected timestamps
// if this is the GC job of the system tenant.
if execCfg.Codec.ForSystemTenant() &&
deprecatedIsProtected(ctx, ptsCache, droppedAtTime, sp) {
return true, nil
}
// Skip checking the new protected timestamp subsystem if the testing knob
// says as such.
if execCfg.GCJobTestingKnobs.DisableNewProtectedTimestampSubsystemCheck {
return false, nil
}
spanConfigRecords, err := kvAccessor.GetSpanConfigRecords(ctx, spanconfig.Targets{
spanconfig.MakeTargetFromSpan(sp),
})
if err != nil {
return false, err
}
_, tenID, err := keys.DecodeTenantPrefix(execCfg.Codec.TenantPrefix())
if err != nil {
return false, err
}
systemSpanConfigs, err := kvAccessor.GetAllSystemSpanConfigsThatApply(ctx, tenID)
if err != nil {
return false, err
}
// Collect all protected timestamps that apply to the given span; both by
// virtue of span configs and system span configs.
var protectedTimestamps []hlc.Timestamp
collectProtectedTimestamps := func(configs ...roachpb.SpanConfig) {
for _, config := range configs {
for _, protectionPolicy := range config.GCPolicy.ProtectionPolicies {
// We don't consider protected timestamps written by backups if the span
// is indicated as "excluded from backup". Checkout the field
// descriptions for more details about this coupling.
if config.ExcludeDataFromBackup && protectionPolicy.IgnoreIfExcludedFromBackup {
continue
}
protectedTimestamps = append(protectedTimestamps, protectionPolicy.ProtectedTimestamp)
}
}
}
for _, record := range spanConfigRecords {
collectProtectedTimestamps(record.GetConfig())
}
collectProtectedTimestamps(systemSpanConfigs...)
for _, protectedTimestamp := range protectedTimestamps {
if protectedTimestamp.WallTime < droppedAtTime {
return true, nil
}
}
return false, nil
}()
if err != nil {
return false, err
}
if fn := execCfg.GCJobTestingKnobs.RunAfterIsProtectedCheck; fn != nil {
fn(jobID, isProtected)
}
return isProtected, nil
}
// Returns whether or not a key in the given spans is protected.
// TODO(pbardea): If the TTL for this index/table expired and we're only blocked
// on a protected timestamp, this may be useful information to surface to the
// user.
func deprecatedIsProtected(
ctx context.Context, protectedtsCache protectedts.Cache, atTime int64, sp roachpb.Span,
) bool {
protected := false
protectedtsCache.Iterate(ctx,
sp.Key, sp.EndKey,
func(r *ptpb.Record) (wantMore bool) {
// If we encounter any protected timestamp records in this span, we
// can't GC.
if r.Timestamp.WallTime < atTime {
protected = true
return false
}
return true
})
return protected
}
// isTenantProtected returns true if there exist any protected timestamp records
// written by the system tenant, that targets the tenant with tenantID.
func isTenantProtected(
ctx context.Context, atTime hlc.Timestamp, tenantID roachpb.TenantID, execCfg *sql.ExecutorConfig,
) (bool, error) {
if !execCfg.Codec.ForSystemTenant() {
return false, errors.AssertionFailedf("isTenantProtected incorrectly invoked by secondary tenant")
}
isProtected := false
ptsProvider := execCfg.ProtectedTimestampProvider
if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
ptsState, err := ptsProvider.GetState(ctx, txn)
if err != nil {
return errors.Wrap(err, "failed to get protectedts State")
}
ptsStateReader := spanconfig.NewProtectedTimestampStateReader(ctx, ptsState)
// First check if the system tenant has any cluster level protections that protect
// all secondary tenants.
clusterProtections := ptsStateReader.GetProtectionPoliciesForCluster()
for _, p := range clusterProtections {
if p.ProtectedTimestamp.Less(atTime) {
isProtected = true
return nil
}
}
// Now check if the system tenant has any protections that target the
// tenantID's keyspace.
protectionsOnTenant := ptsStateReader.GetProtectionsForTenant(tenantID)
for _, p := range protectionsOnTenant {
if p.ProtectedTimestamp.Less(atTime) {
isProtected = true
return nil
}
}
return nil
}); err != nil {
return false, err
}
return isProtected, nil
}
// refreshTenant updates the status of tenant that is waiting to be GC'd. It
// returns whether or the tenant has expired or the duration until it expires.
func refreshTenant(
ctx context.Context,
execCfg *sql.ExecutorConfig,
dropTime int64,
details *jobspb.SchemaChangeGCDetails,
progress *jobspb.SchemaChangeGCProgress,
) (expired bool, _ time.Time, _ error) {
if progress.Tenant.Status != jobspb.SchemaChangeGCProgress_WAITING_FOR_GC {
return true, time.Time{}, nil
}
// Read the tenant's GC TTL to check if the tenant's data has expired.
tenID := details.Tenant.ID
cfg := execCfg.SystemConfig.GetSystemConfig()
tenantTTLSeconds := execCfg.DefaultZoneConfig.GC.TTLSeconds
zoneCfg, err := cfg.GetZoneConfigForObject(keys.MakeSQLCodec(roachpb.MakeTenantID(tenID)), 0)
if err == nil {
tenantTTLSeconds = zoneCfg.GC.TTLSeconds
} else {
log.Errorf(ctx, "zone config for tenants range: err = %+v", err)
}
deadlineNanos := dropTime + int64(tenantTTLSeconds)*time.Second.Nanoseconds()
deadlineUnix := timeutil.Unix(0, deadlineNanos)
if timeutil.Now().UnixNano() >= deadlineNanos {
// If the tenant's GC TTL has elapsed, check if there are any protected timestamp records
// that apply to the tenant keyspace.
atTime := hlc.Timestamp{WallTime: dropTime}
isProtected, err := isTenantProtected(ctx, atTime, roachpb.MakeTenantID(tenID), execCfg)
if err != nil {
return false, time.Time{}, err
}
if isProtected {
log.Infof(ctx, "GC TTL for dropped tenant %d has expired, but protected timestamp "+
"record(s) on the tenant keyspace are preventing GC", tenID)
return false, deadlineUnix, nil
}
// At this point, the tenant's keyspace is ready for GC.
progress.Tenant.Status = jobspb.SchemaChangeGCProgress_DELETING
return true, deadlineUnix, nil
}
return false, deadlineUnix, nil
}
| pkg/sql/gcjob/refresh_statuses.go | 1 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.9984184503555298,
0.061073604971170425,
0.00015821716806385666,
0.0004944858374074101,
0.23612338304519653
] |
{
"id": 4,
"code_window": [
"\t\tif execCfg.Codec.ForSystemTenant() &&\n",
"\t\t\tdeprecatedIsProtected(ctx, ptsCache, droppedAtTime, sp) {\n",
"\t\t\treturn true, nil\n",
"\t\t}\n",
"\n",
"\t\t// Skip checking the new protected timestamp subsystem if the testing knob\n",
"\t\t// says as such.\n",
"\t\tif execCfg.GCJobTestingKnobs.DisableNewProtectedTimestampSubsystemCheck {\n",
"\t\t\treturn false, nil\n",
"\t\t}\n",
"\n",
"\t\tspanConfigRecords, err := kvAccessor.GetSpanConfigRecords(ctx, spanconfig.Targets{\n",
"\t\t\tspanconfig.MakeTargetFromSpan(sp),\n",
"\t\t})\n",
"\t\tif err != nil {\n",
"\t\t\treturn false, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/gcjob/refresh_statuses.go",
"type": "replace",
"edit_start_line_idx": 316
} | # This requires the postgres and cockroach servers be already loaded
# with identical TPCH data.
smither = "postgres"
seed = -1
stmttimeoutsecs = 120
sql = [
"""
SELECT
l_returnflag,
l_linestatus,
sum(l_quantity) AS sum_qty,
sum(l_extendedprice) AS sum_base_price,
sum(l_extendedprice * (1 - l_discount)) AS sum_disc_price,
sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) AS sum_charge,
avg(l_quantity) AS avg_qty,
avg(l_extendedprice) AS avg_price,
avg(l_discount) AS avg_disc,
count(*) AS count_order
FROM
lineitem
WHERE
l_shipdate <= $1::DATE - $2::INTERVAL
GROUP BY
l_returnflag,
l_linestatus
ORDER BY
l_returnflag,
l_linestatus;
""",
"""
SELECT
s_acctbal,
s_name,
n_name,
p_partkey,
p_mfgr,
s_address,
s_phone,
s_comment
FROM
part,
supplier,
partsupp,
nation,
region
WHERE
p_partkey = ps_partkey
AND s_suppkey = ps_suppkey
AND p_size = $1
AND p_type LIKE '%BRASS'
AND s_nationkey = n_nationkey
AND n_regionkey = r_regionkey
AND r_name = 'EUROPE'
AND ps_supplycost = (
SELECT
min(ps_supplycost)
FROM
partsupp,
supplier,
nation,
region
WHERE
p_partkey = ps_partkey
AND s_suppkey = ps_suppkey
AND s_nationkey = n_nationkey
AND n_regionkey = r_regionkey
AND r_name = 'EUROPE'
)
ORDER BY
s_acctbal DESC,
n_name,
s_name,
p_partkey
LIMIT 100;
""",
"""
SELECT
l_orderkey,
sum(l_extendedprice * (1 - l_discount)) AS revenue,
o_orderdate,
o_shippriority
FROM
customer,
orders,
lineitem
WHERE
c_mktsegment = 'BUILDING'
AND c_custkey = o_custkey
AND l_orderkey = o_orderkey
AND o_orderDATE < $1::DATE
AND l_shipdate > $2::DATE
GROUP BY
l_orderkey,
o_orderdate,
o_shippriority
ORDER BY
revenue DESC,
o_orderdate
LIMIT 10;
""",
"""
SELECT
o_orderpriority,
count(*) AS order_count
FROM
orders
WHERE
o_orderdate >= $1::DATE
AND o_orderdate < $2::DATE + $3::INTERVAL
AND EXISTS (
SELECT
*
FROM
lineitem
WHERE
l_orderkey = o_orderkey
AND l_commitDATE < l_receiptdate
)
GROUP BY
o_orderpriority
ORDER BY
o_orderpriority;
""",
"""
SELECT
n_name,
sum(l_extendedprice * (1 - l_discount)) AS revenue
FROM
customer,
orders,
lineitem,
supplier,
nation,
region
WHERE
c_custkey = o_custkey
AND l_orderkey = o_orderkey
AND l_suppkey = s_suppkey
AND c_nationkey = s_nationkey
AND s_nationkey = n_nationkey
AND n_regionkey = r_regionkey
AND r_name = 'ASIA'
AND o_orderDATE >= $1::DATE
AND o_orderDATE < $2::DATE + $3::INTERVAL
GROUP BY
n_name
ORDER BY
revenue DESC;
""",
"""
SELECT
sum(l_extendedprice * l_discount) AS revenue
FROM
lineitem
WHERE
l_shipdate >= $1::DATE
AND l_shipdate < $2::DATE + $3::INTERVAL
AND l_discount BETWEEN $4::FLOAT8 - $5::FLOAT8 AND $6::FLOAT8 + $7::FLOAT8
AND l_quantity < $8::FLOAT8;
""",
"""
SELECT
supp_nation,
cust_nation,
l_year,
sum(volume) AS revenue
FROM
(
SELECT
n1.n_name AS supp_nation,
n2.n_name AS cust_nation,
EXTRACT(year FROM l_shipdate) AS l_year,
l_extendedprice * (1 - l_discount) AS volume
FROM
supplier,
lineitem,
orders,
customer,
nation n1,
nation n2
WHERE
s_suppkey = l_suppkey
AND o_orderkey = l_orderkey
AND c_custkey = o_custkey
AND s_nationkey = n1.n_nationkey
AND c_nationkey = n2.n_nationkey
AND (
(n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY')
or (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')
)
AND l_shipdate BETWEEN $1::DATE AND $2::DATE
) AS shipping
GROUP BY
supp_nation,
cust_nation,
l_year
ORDER BY
supp_nation,
cust_nation,
l_year;
""",
"""
SELECT
o_year,
sum(CASE
WHEN nation = 'BRAZIL' THEN volume
ELSE 0
END) / sum(volume) AS mkt_share
FROM
(
SELECT
EXTRACT(year FROM o_orderdate) AS o_year,
l_extendedprice * (1 - l_discount) AS volume,
n2.n_name AS nation
FROM
part,
supplier,
lineitem,
orders,
customer,
nation n1,
nation n2,
region
WHERE
p_partkey = l_partkey
AND s_suppkey = l_suppkey
AND l_orderkey = o_orderkey
AND o_custkey = c_custkey
AND c_nationkey = n1.n_nationkey
AND n1.n_regionkey = r_regionkey
AND r_name = 'AMERICA'
AND s_nationkey = n2.n_nationkey
AND o_orderdate BETWEEN $1::DATE AND $2::DATE
AND p_type = 'ECONOMY ANODIZED STEEL'
) AS all_nations
GROUP BY
o_year
ORDER BY
o_year;
""",
"""
SELECT
c_custkey,
c_name,
sum(l_extendedprice * (1 - l_discount)) AS revenue,
c_acctbal,
n_name,
c_address,
c_phone,
c_comment
FROM
customer,
orders,
lineitem,
nation
WHERE
c_custkey = o_custkey
AND l_orderkey = o_orderkey
AND o_orderDATE >= $1::DATE
AND o_orderDATE < $2::DATE + $3::INTERVAL
AND l_returnflag = 'R'
AND c_nationkey = n_nationkey
GROUP BY
c_custkey,
c_name,
c_acctbal,
c_phone,
n_name,
c_address,
c_comment
ORDER BY
revenue DESC
LIMIT 20;
""",
"""
SELECT
ps_partkey,
sum(ps_supplycost * ps_availqty::float) AS value
FROM
partsupp,
supplier,
nation
WHERE
ps_suppkey = s_suppkey
AND s_nationkey = n_nationkey
AND n_name = 'GERMANY'
GROUP BY
ps_partkey HAVING
sum(ps_supplycost * ps_availqty::float) > (
SELECT
sum(ps_supplycost * ps_availqty::float) * $1::FLOAT8
FROM
partsupp,
supplier,
nation
WHERE
ps_suppkey = s_suppkey
AND s_nationkey = n_nationkey
AND n_name = 'GERMANY'
)
ORDER BY
value DESC, ps_partkey;
""",
"""
SELECT
l_shipmode,
sum(CASE
WHEN o_orderpriority = '1-URGENT'
or o_orderpriority = '2-HIGH'
THEN 1
ELSE 0
END) AS high_line_count,
sum(CASE
WHEN o_orderpriority <> '1-URGENT'
AND o_orderpriority <> '2-HIGH'
THEN 1
ELSE 0
END) AS low_line_count
FROM
orders,
lineitem
WHERE
o_orderkey = l_orderkey
AND l_shipmode IN ('MAIL', 'SHIP')
AND l_commitdate < l_receiptdate
AND l_shipdate < l_commitdate
AND l_receiptdate >= $1::DATE
AND l_receiptdate < $2::DATE + $3::INTERVAL
GROUP BY
l_shipmode
ORDER BY
l_shipmode;
""",
"""
SELECT
100.00 * sum(CASE
WHEN p_type LIKE 'PROMO%'
THEN l_extendedprice * (1 - l_discount)
ELSE 0
END) / sum(l_extendedprice * (1 - l_discount)) AS promo_revenue
FROM
lineitem,
part
WHERE
l_partkey = p_partkey
AND l_shipdate >= $1::DATE
AND l_shipdate < $2::DATE + $3::INTERVAL;
""",
"""
SELECT
c_name,
c_custkey,
o_orderkey,
o_orderdate,
o_totalprice,
sum(l_quantity)
FROM
customer,
orders,
lineitem
WHERE
o_orderkey IN (
SELECT
l_orderkey
FROM
lineitem
GROUP BY
l_orderkey HAVING
sum(l_quantity) > $1::INT8
)
AND c_custkey = o_custkey
AND o_orderkey = l_orderkey
GROUP BY
c_name,
c_custkey,
o_orderkey,
o_orderdate,
o_totalprice
ORDER BY
o_totalprice DESC,
o_orderdate
LIMIT 100;
""",
"""
SELECT
sum(l_extendedprice* (1 - l_discount)) AS revenue
FROM
lineitem,
part
WHERE
(
p_partkey = l_partkey
AND p_brand = 'Brand#12'
AND p_container IN ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')
AND l_quantity >= $1::INT8 AND l_quantity <= $2::INT8 + $3::INT8
AND p_size BETWEEN $4::INT8 AND $5::INT8
AND l_shipmode IN ('AIR', 'AIR REG')
AND l_shipinstruct = 'DELIVER IN PERSON'
)
OR
(
p_partkey = l_partkey
AND p_brand = 'Brand#23'
AND p_container IN ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')
AND l_quantity >= $6::INT8 AND l_quantity <= $7::INT8 + $8::INT8
AND p_size BETWEEN $9::INT8 AND $10::INT8
AND l_shipmode IN ('AIR', 'AIR REG')
AND l_shipinstruct = 'DELIVER IN PERSON'
)
OR
(
p_partkey = l_partkey
AND p_brand = 'Brand#34'
AND p_container IN ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')
AND l_quantity >= $11::INT8 AND l_quantity <= $12::INT8 + $13::INT8
AND p_size BETWEEN $14::INT8 AND $15::INT8
AND l_shipmode IN ('AIR', 'AIR REG')
AND l_shipinstruct = 'DELIVER IN PERSON'
);
""",
"""
SELECT
s_name,
s_address
FROM
supplier,
nation
WHERE
s_suppkey IN (
SELECT
ps_suppkey
FROM
partsupp
WHERE
ps_partkey IN (
SELECT
p_partkey
FROM
part
WHERE
p_name LIKE 'forest%'
)
AND ps_availqty > (
SELECT
$1::FLOAT8 * sum(l_quantity)
FROM
lineitem
WHERE
l_partkey = ps_partkey
AND l_suppkey = ps_suppkey
AND l_shipdate >= $2::DATE
AND l_shipdate < $3::DATE + $4::INTERVAL
)
)
AND s_nationkey = n_nationkey
AND n_name = 'CANADA'
ORDER BY
s_name;
""",
"""
SELECT
cntrycode,
count(*) AS numcust,
sum(c_acctbal) AS totacctbal
FROM
(
SELECT
substring(c_phone FROM $1::INT4 FOR $2::INT4) AS cntrycode,
c_acctbal
FROM
customer
WHERE
substring(c_phone FROM $3::INT4 FOR $4::INT4) in
('13', '31', '23', '29', '30', '18', '17')
AND c_acctbal > (
SELECT
avg(c_acctbal)
FROM
customer
WHERE
c_acctbal > $5::FLOAT8
AND substring(c_phone FROM $6::INT4 FOR $7::INT4) in
('13', '31', '23', '29', '30', '18', '17')
)
AND NOT EXISTS (
SELECT
*
FROM
orders
WHERE
o_custkey = c_custkey
)
) AS custsale
GROUP BY
cntrycode
ORDER BY
cntrycode;
""",
]
# Missing: 9, 13, 15, 16, 17, 21
# These are missing either because 1) they use a CREATE VIEW, or 2)
# they don't have any parameters that make sense to randomize, and we'd
# thus be executing the same query each time. Queries that don't change
# should be tested in other places; smithcmp is for random testing.
[databases.vec-off]
addr = "postgresql://root@localhost:26257/tpch?sslmode=disable"
allowmutations = true
initsql = """
set vectorize=off;
"""
[databases.vec-on]
addr = "postgresql://root@localhost:26257/tpch?sslmode=disable"
allowmutations = true
initsql = """
set vectorize=on;
"""
[databases.postgres]
addr = "postgresql://postgres@localhost:5432/tpch?sslmode=disable"
| pkg/cmd/smithcmp/tpch.toml | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.0001789073139661923,
0.0001737452403176576,
0.00016655126819387078,
0.0001738429127726704,
0.000002046713689196622
] |
{
"id": 4,
"code_window": [
"\t\tif execCfg.Codec.ForSystemTenant() &&\n",
"\t\t\tdeprecatedIsProtected(ctx, ptsCache, droppedAtTime, sp) {\n",
"\t\t\treturn true, nil\n",
"\t\t}\n",
"\n",
"\t\t// Skip checking the new protected timestamp subsystem if the testing knob\n",
"\t\t// says as such.\n",
"\t\tif execCfg.GCJobTestingKnobs.DisableNewProtectedTimestampSubsystemCheck {\n",
"\t\t\treturn false, nil\n",
"\t\t}\n",
"\n",
"\t\tspanConfigRecords, err := kvAccessor.GetSpanConfigRecords(ctx, spanconfig.Targets{\n",
"\t\t\tspanconfig.MakeTargetFromSpan(sp),\n",
"\t\t})\n",
"\t\tif err != nil {\n",
"\t\t\treturn false, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/gcjob/refresh_statuses.go",
"type": "replace",
"edit_start_line_idx": 316
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"reflect"
"testing"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree/treebin"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
)
func TestTypeAsString(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
p := planner{alloc: &tree.DatumAlloc{}}
testData := []struct {
expr tree.Expr
expected string
expectedErr bool
}{
{expr: tree.NewDString("foo"), expected: "foo"},
{
expr: &tree.BinaryExpr{
Operator: treebin.MakeBinaryOperator(treebin.Concat), Left: tree.NewDString("foo"), Right: tree.NewDString("bar")},
expected: "foobar",
},
{expr: tree.NewDInt(3), expectedErr: true},
}
t.Run("TypeAsString", func(t *testing.T) {
for _, td := range testData {
fn, err := p.TypeAsString(ctx, td.expr, "test")
if err != nil {
if !td.expectedErr {
t.Fatalf("expected no error; got %v", err)
}
continue
} else if td.expectedErr {
t.Fatal("expected error; got none")
}
s, err := fn()
if err != nil {
t.Fatal(err)
}
if s != td.expected {
t.Fatalf("expected %s; got %s", td.expected, s)
}
}
})
t.Run("TypeAsStringArray", func(t *testing.T) {
for _, td := range testData {
fn, err := p.TypeAsStringArray(ctx, []tree.Expr{td.expr, td.expr}, "test")
if err != nil {
if !td.expectedErr {
t.Fatalf("expected no error; got %v", err)
}
continue
} else if td.expectedErr {
t.Fatal("expected error; got none")
}
a, err := fn()
if err != nil {
t.Fatal(err)
}
expected := []string{td.expected, td.expected}
if !reflect.DeepEqual(a, expected) {
t.Fatalf("expected %s; got %s", expected, a)
}
}
})
}
| pkg/sql/planner_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.00018019252456724644,
0.00016756434342823923,
0.00015810829063411802,
0.00016490953566972166,
0.00000789178375271149
] |
{
"id": 4,
"code_window": [
"\t\tif execCfg.Codec.ForSystemTenant() &&\n",
"\t\t\tdeprecatedIsProtected(ctx, ptsCache, droppedAtTime, sp) {\n",
"\t\t\treturn true, nil\n",
"\t\t}\n",
"\n",
"\t\t// Skip checking the new protected timestamp subsystem if the testing knob\n",
"\t\t// says as such.\n",
"\t\tif execCfg.GCJobTestingKnobs.DisableNewProtectedTimestampSubsystemCheck {\n",
"\t\t\treturn false, nil\n",
"\t\t}\n",
"\n",
"\t\tspanConfigRecords, err := kvAccessor.GetSpanConfigRecords(ctx, spanconfig.Targets{\n",
"\t\t\tspanconfig.MakeTargetFromSpan(sp),\n",
"\t\t})\n",
"\t\tif err != nil {\n",
"\t\t\treturn false, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/gcjob/refresh_statuses.go",
"type": "replace",
"edit_start_line_idx": 316
} | - Feature Name: Virtual pg-like schemas
- Status: completed
- Start Date: 2018-01-15
- Authors: knz, Jordan, Peter
- RFC PR: #21456
- Cockroach Issue: #22371, #22753
# Summary
## Short summary
Question: "What are some example clients that are currently broken
specifically because of our incomplete catalog/schema semantics?"
Answer (from Jordan): *"it's all of the GUI tools. like, all of them."*
This RFC aims to address this specifically.
## Longer summary
This RFC proposes to introduce the notion of “schema” in the *namespace*
rules used by CockroachDB so that tools that use virtual tables to
introspect the schema find a similar layout as in PostgreSQL, and
enables them to use the same name structure to construct queries as
they could otherwise with pg.
This makes it possible to:
1. make database and table names appear in the right positions of the
introspection tables in `pg_catalog` and `information_schema`.
2. support queries like `select * from mydb.public.tbl`, i.e. support
the standard pg notation for fully qualified tables, needed for
clients that construct SQL queries by using introspection.
The change does *not* include changing the hierarchical structure of
stored database/table descriptors in CockroachDB. In particular it
does not enable the use of multiple distinct physical schemas
side-by-side inside a single database, i.e. the ability to have two
stored (physical) tables with the same name in the same database (in
different schemas): having both `mydb.foo.tbl1` and `mydb.bar.tbl1`
side-by-side will still not be supported.
(Although it is still possible, like previously, to store a physical
table in the `public` schema that has the same name as a virtual table
in one of the virtual schemas.)
To achieve this, the RFC proposes to tweak the name resolution rules
and how the database introspection virtual tables (`pg_catalog.*`,
`information_schema.*`) are generated.
# Motivation
- The changes proposed here will unblock proper user experience of
CockroachDB for users of (graphical or non-graphical) DB inspection
tools.
- The changes proposed here will enable the alignment of the
terminology used in CockroachDB with that used with PostgreSQL's
documentation, so that pg's documentation becomes more readily
applicable to CockroachDB.
We aim for both goals with the general purpose to further drive
developer adoption, especially first-time developers who are not yet
sufficiently savvy to understand the current subtle distinctions
between CockroachDB and pg.
# Guide-level explanation
## Concepts and vocabulary
With this change we must be careful of the terminology. This needs
adjustments in docs, explanations, etc., to better align with Postgres
concepts.
| Word, before | What is being designated | Word, after | Visible to users? |
|----------------|------------------------------------------------------------|------------------------|-------------------|
| Database | The name for a stored database descriptor | DB descriptor name | Mostly not |
| Database | Namespace container from the perspective of SQL clients | Catalog or Database | Yes |
| Schema | The conceptual set of all db/table/view/seq descriptors | Physical schema | Mostly not |
| Schema | A namespace container for virtual tables | Logical schema | Yes |
| (didn't exist) | Namespace container for all tables in a catalog | Logical schema | Yes |
| Table | The name for a stored table/view/sequence descriptor | Object descriptor name | Mostly not |
| Table | The name for a table where a SQL client can store stuff | Table or Relation | Yes |
## How do we teach this?
### Teaching to new roachers
- a CockroachDB cluster contains multiple *catalogs*, or
"databases". Every cluster starts with at least the `system`
catalog. More catalogs can be created with `CREATE DATABASE`.
- each catalog contains one *physical schema* called `public`,
and some additional *virtual schemas*, currently including `pg_catalog`, `information_schema` and `crdb_internal`.
- a future version of CockroachDB may support multiple logical schemas per catalog besides `public`.
- each schema contains zero or more tables, views, sequences, etc.
- the `public` schema of different catalogs can contain the same table name, but they will designate different tables.
For example, two applications can use separate catalogs `myapp1` and `myapp2` and define their own `customers` table,
and the same name "`customers`" will refer to different tables.
- the virtual schemas exist in every catalog. They contain the same
tables in every catalog, but their (automatically generated)
contents will differ across catalogs.
- for example `db1.pg_catalog.pg_tables` only contains tables for `db1`,
`db2.pg_catalog.pg_tables` only tables for `db2`, etc.
- the session variable `database` designates the current
catalog, which is used in queries to resolve
(table/view/sequence/schema) names when no catalog is further
specified.
- the `USE` statement, provided as convenience for developers and inspired from MySQL,
adjusts the `database` session variable.
- the session variable `search_path` contains a list of schema names
inside the current catalog where to search for functions and tables named in
queries.
For example, with a `search_path` set to `public, pg_catalog`, a
`database` set to `myapp2` and given a query `select * from
kv`, CockroachDB will search for table `kv` first in the `public`
schema of catalog `myapp2`, then in the `pg_catalog` schema for
catalog `myapp2`.
- As a specific CockroachDB extension, a SQL client can specify
a table name as `dbname.tblname` in some conditions to
provide compatibility with previous CockroachDB versions.
### Teaching to existing roachers
- We'll adopt the word "catalog" as a synonym for "database" to
designate the visible portion of the storage container for tables. The
word "schema" should be used more sparingly, as it has a specific
meaning in PostgreSQL which CockroachDB does not yet support.
- Except for what was called "virtual schema" in CockroachDB; these
were already properly named after the equivalent PostgreSQL
concept and do not change.
- The virtual tables in `information_schema`, `pg_catalog` now list
the catalog in the "Catalog" column, instead of the "Schema" column
as previously. The previous filler string "`def`" disappears. The
string "`public`" is now used as filler for the "Schema" column for
rows that point to actual table data.
- The virtual schemas are still listed as previously in the "Schema"
column. They appear (are repeated) for every catalog.
- When talking to users, be mindful that "every catalog has multiple
schemas, including one physical schema called `public` that contains
that catalog's physical tables", instead of saying "catalogs contain
tables".
- `search_path` now refers to schemas, not catalogs, resolved relative
to the current value of `database`.
# Reference-level explanation
There are 4 relevant separate algorithms for name resolution, depending
on where in the SQL syntax the name resolution occurs:
- Algorithm A1: resolving the name of an *existing* persistent object
(table/view/sequence or function, later types)
- `SELECT ... FROM <here>`
- `INSERT INTO <here> (...) ...`
- `ALTER TABLE <here> ...`
- `DROP TABLE <here>`
- `SELECT <here>(x,y,z)` (function application)
- `SELECT lastval('<here>')` (sequence name in string)
- `SELECT '<here>'::REGPROC` (function name to OID conversion)
- `SELECT '<here>'::REGCLASS` (table name to to OID conversion)
- **NOT:** `CREATE TABLE ...` (see below)
- **NOT:** `SELECT ... FROM ...@<here>` (see below)
- Algorithm A2: resolving the name for a *new* persistent object
(table/view/sequence, we don't support custom functions or types yet
but if we did they would be included here)
- `CREATE TABLE <here>` (ditto view, sequence)
- `ALTER TABLE ... RENAME TO <here>` (ditto view, sequence)
- **NOT:** `CREATE DATABASE ...` (see below)
- Algorithm B: resolving the name for a column name
- `SELECT <here> FROM ...` (i.e. names in scalar expressions that don't fall into the patterns above)
- Algorithm C: resolving a *pattern* for persistent object(s)
- `GRANT ... TO ... ON <here>`
The name resolution for database and index names uses separate
algorithms and that remains unchanged in this RFC.
## Outline of the implementation
The generic, reusable algorithms are implemented in
`pkg/sql/sem/tree/name_resolution.go`.
- `(*TableName).ResolveExisting()`: algorithm A1
- `(*TableName).ResolveTarget()`: algorithm A2
- `(*ColumnItem).Resolve()`: algorithm B
- `(*TableNamePrefix).Resolve()`: algorithm C
## Changes to algorithm A1
Common case: accessing an existing object.
Input: some (potentially partially qualified) name N.
Output: fully qualified name FQN + optionally, object descriptor
Currently:
```
1. if the name already has two parts (D.T), then go to step 4 directly.
2. otherwise (name only has one part T), if `database` is non-empty and the object T exists in the
current database, then set D := current value of `database` and go to step 4 directly.
3. otherwise (name only has one part T), try for every value D in `search_path`:
3.1 if the object D.T exists, then keep D and go to step 4
3.2 if no value in `search_path` makes D.T exist, fail with a name resolution error.
4. FQN := D.T; resolve the descriptor using db D and object name T.
```
After this change:
```
1. if the name already has 3 parts (C.S.T) then go to step 4 directly.
2. otherwise, if the name already has 2 parts (S.T) then:
2.1. if the object S.T already exists in the current database (including if the current database is the empty string,
see below for details), then set C := current value of `database` and go to step 4 directly.
2.2. if the object S.public.T already exists, then set C := S, set S := 'public' and go to step 4.
2.3. otherwise, fail with a name resolution error.
3. otherwise (name only has one part T), try for every value N in `search_path`:
3.1. make C := current value of `database`, S := N
3.2. if the object C.S.T exists, then keep C and S and go to step 4.
3.3. if no value N in `search_path` makes N.T / C.N.T exist as per the rule above, then fail with a name resolution error.
(note: search_path cannot be empty, see "other changes" below)
4. FQN := C.S.T; resolve the descriptor using db C and object name T.
```
The rule 2.2 is a CockroachDB extension (not present in PostgreSQL)
which provides compatibility with previous CockroachDB versions.
For example, given a table `kv` in database `foo`, and `search_path` set to its default `public, pg_catalog`:
- `SELECT x FROM kv` with `database = foo`
- rule 1 fails
- rule 2 fails
- rule 3 applies
- rule 3.1 applies with C=foo, N=public
- rule 3.2 applies (`foo.public.kv` exists), FQN becomes `foo.public.kv`
- `SELECT x FROM blah` with `database = foo`
- rule 1 fails
- rule 2 fails
- rule 3 applies
- rule 3.1 applies with C=foo, N=public
- rule 3.2 fails (`foo.public.blah` doesn't exist)
- rule 3.1 applies with C=foo, N=pg_catalog
- rule 3.2 fails (`foo.pg_catalog.blah` doesn't exist)
- name resolution error
- `SELECT x FROM pg_tables` with `database = foo`
- rule 1 fails
- rule 2 fails
- rule 3 applies
- rule 3.1 applies with C=foo, N=public
- rule 3.2 fails (`foo.public.pg_tables` doesn't exist)
- rule 3.1 applies with C=foo,N=pg_catalog
- rule 3.2 applies (`foo.pg_catalog.pg_tables` is valid), FQN becomes `foo.pg_catalog.pg_tables`
- `SELECT x FROM kv` with empty `database`
- rule 1 fails
- rule 2 fails
- rule 3 applies
- rule 3.1 applies with C="", N=public
- rule 3.2 fails (`"".public.kv` doesn't exist)
- rule 3.1 applies with C="", N=pg_catalog
- rule 3.2 fails (`"".pg_catalog.kv` doesn't exist)
- name resolution error
- `SELECT x FROM pg_tables` with empty `database` (CockroachDB extension)
- rule 1 fails
- rule 2 fails
- rule 3 applies
- rule 3.1 applies with C="", N=public
- rule 3.2 fails (`"".public.pg_tables` doesn't exist)
- rule 3.1 applies with C="",N=pg_catalog
- rule 3.2 applies (`"".pg_catalog.pg_tables` is valid), FQN becomes `"".pg_catalog.pg_tables`
CockroachDB extensions for compatibility with previous CockroachDB versions:
- `SELECT x FROM foo.kv` with `database = foo`
- rule 1 fails
- rule 2 applies
- rule 2.1 fails (`foo.foo.kv` doesn't exist)
- rule 2.2 applies (`foo.public.kv` exists), FQN becomes `foo.public.kv`
- `SELECT x FROM blah.kv` with `database = foo`
- rule 1 fails
- rule 2 applies
- rule 2.1 fails (`foo.blah.kv` doesn't exist)
- rule 2.2 fails (`blah.public.kv` doesn't exist)
- name resolution error
- `SELECT x FROM foo.kv` with empty `database`
- rule 1 fails
- rule 2 applies
- rule 2.1 fails (`"".foo.kv` doesn't exist)
- rule 2.2 applies (`foo.public.kv` exists), FQN becomes `foo.public.kv`
- `SELECT x FROM blah.kv` with empty `database`
- rule 1 fails
- rule 2 applies
- rule 2.1 fails (`"".blah.kv` doesn't exist)
- rule 2.2 fails (`blah.public.kv` doesn't exists)
- name resolution error
- `SELECT x FROM pg_catalog.pg_tables` with `database = foo`
- rule 2 applies
- rule 2.1 applies (`foo.pg_catalog.pg_tables` exists), FQN becomes `foo.pg_catalog.pg_tables`
- `SELECT x FROM pg_catalog.pg_tables` with empty `database` (CockroachDB extension)
- rule 2 applies
- rule 2.1 applies (`"".pg_catalog.pg_tables` exists), FQN becomes `"".pg_catalog.pg_tables`
## Changes to algorithm A2
Case: creating a new object or renaming an object to a new name.
Input: some (potentially partially qualified) name N.
Output: fully qualified name FQN (valid to create a new object / rename target)
Currently:
```
1. if the name already has two parts (D.T), then go to step 4 directly.
2. otherwise (name only has one part T) if `database` is set then set D := current value of `database` and go to step 4 directly.
3. otherwise (name only has one part T, `database` not set), fail with an "invalid name" error
4. FQN := D.T. Check D is a valid database; if it is not fail with an "invalid target database" error
```
After this change:
```
1. if the name already has 3 parts (C.S.T) then go to step 4 directly.
2. otherwise, if the name already has 2 parts (S.T) then:
2.1. set C := current value of `database`; then
if C.S is a valid target schema, go to step 4 directly.
2.2. otherwise (<current database>.S is not a valid target schema):
set C := S, S := 'public' and go to step 4 directly.
3. otherwise (name only has one part T):
3.1. C := current value of `database`, S := first value specified in search_path
3.2. if the target schema C.S exists, then keep C and S and go to step 4
3.3. otherwise, fail with "no schema has been selected"
4. FQN := C.S.T. Check C.S is a valid target schema name; if it is not fail with an "invalid target schema" error
```
The rule 2.2 is a CockroachDB extension (not present in PostgreSQL)
which provides compatibility with previous CockroachDB versions.
For example, given a database `foo` and `search_path` set to its default `public, pg_catalog`
- `CREATE TABLE kv` with `database = foo`
- rule 1 fails
- rule 2 fails
- rule 3 applies
- rule 3.1 applies
- rule 3.1.1 applies, FQN := `foo.public.kv`
- rule 4 checks: `foo.public` is a valid target schema.
- `CREATE TABLE kv` with `database = blah`
- rule 1 fails
- rule 2 fails
- rule 3 applies
- rule 3.1 applies
- rule 3.1.1 applies, FQN := `blah.public.kv`
- rule 4 checks: `blah.public` is a valid target schema, error "invalid target schema"
- `CREATE TABLE kv` with empty `database`
- rule 1 fails
- rule 2 fails
- rule 3 applies
- rule 3.1 applies
- rule 3.1.1. applies, FQN := `"".public.kv`
- rule 4 checks `"".public` is not a valid target schema, error "invalid target schema"
- `CREATE TABLE foo.kv` with `database = foo`
- rule 1 fails
- rule 2 applies
- rule 2.1 fails (C.S = `foo.foo`, not a valid target schema)
- rule 2.2 applies, FQN := `foo.public.kv`
- rule 4 checks `foo.public` is valid
- `CREATE TABLE foo.kv` with empty `database`
- rule 1 fails
- rule 2 applies
- rule 2.1 fails (`database` not set)
- rule 2.2 applies, FQN := `foo.public.kv`
- rule 4 checks `foo.public` is valid
## Changes to algorithm B
(Used for column names)
Input: some (potentially partially qualified) name N
Output: fully qualified column name FQN + column ID
Currently:
```
1. if the name already has 3 parts (D.T.X), then
1.2. if there's a data source with name D.T already, then go to step 4 directly
1.2. otherwise, fail with "unknown column X"
2. if the name already has 2 parts (T.X), then
2.1. try to find a data source with name T in the current context.
2.2. if none is found, fail with "unknown table T"
2.2. if more than one is found, fail with "ambiguous table name T"
2.3. otherwise (exactly one found), extract the db name D from the data source metadata, then go to step 4.
3. otherwise (name only has one part X), try for every data source in the current context:
3.1. try to find an anonymous data source that provides column X in the current context.
3.2. if more than one is found, fail with "ambiguous column name"
3.3. if exactly one is found, extract the name D.T from the data source metadata, then go to step 4.
3.4. otherwise, try to find a named data source that provides column X in the current context.
3.5. if more than one is found, fail with "ambiguous column name"
3.6. if none is found, fail with "no data source matches prefix"
3.7. otherwise (exactly one found), extract the name D.T from the data source metadata, then go to step 4
4. FQN := D.T.X, column ID looked up from data source descriptor
```
After this change:
```
1. if the name already has 4 parts (C.S.T.X), then
1.1. if there's a data source with name C.S.T already, then go to step 5 directly
1.2. otherwise, fail with "unknown column X"
2. if the name already has 3 parts (S.T.X), then
2.1. try to find a data source with suffix S.T in the current context.
2.2. if more than one is found, fail with "ambiguous column name"
2.3. if exactly one is found, extract the db name C from the data source metadata, then go to step 5.
2.4. if none is found, then
2.4.1. if there's a data source with name S.public.T already, then use C:=S, S:='public' and go to step 5 directly
2.4.2. otherwise, fail with "unknown column X"
3. same rule as rule 2 above
4. same rule as rule 3 above
5. FQN := C.S.T.X, column ID looked up from data source descriptor
```
The rule 2.4.1 is a new CockroachDB extension (not present in PostgreSQL)
which provides compatibility with previous CockroachDB versions.
For example, given a table `kv` in database `foo`
- `SELECT x FROM foo.public.kv`
- rule 1, 2, 3 don't apply
- rule 4 applies, FQN := `foo.public.kv.x`
- `SELECT kv.x FROM foo.public.kv`
- rule 1, 2 don't apply
- rule 3 applies, FQN := `foo.public.kv.x`
- `SELECT foo.public.kv.x FROM foo.public.kv`
- rule 1 applies, FQN = given name
- `SELECT foo.kv.x FROM foo.public.kv`
- rule 1 doesn't apply
- rule 2 applies
- rule 2.1 determines no source with suffix `foo.kv` in current context
- rules 2.2, 2.3 fail
- rule 2.4 applies
- rule 2.4.1 applies, FQN := `foo.public.kv.x`
- `SELECT bar.kv.x FROM foo.public.kv`
- rule 1 doesn't apply
- rule 2 applies
- rule 2.1 determines no source with suffix `foo.kv` in current context
- rules 2.2, 2.3 fail
- rule 2.4 applies
- rule 2.4.1 fails
- rule 2.4.2 applies: unknown column `bar.kv.x`
## Changes to algorithm C
Case: GRANT ON TABLE (table patterns)
Input: some table pattern
Output: fully qualified table pattern FQP
Currently:
```
1. if the name already has two parts with no star or a table star (D.T, D.*), then use that as FQP
(note: we don't support the syntax *.T in table patterns)
2. if the name only has one part and is not a star (T), then
2.1 if `database` is set, set D := current value of `database` and use D.T as FQP
2.2 otherwise, fail with "invalid name"
```
After this change:
```
1. if the name already has 3 parts with no star or a table star (D.S.T, D.S.*), then use that as FQP
2. if the name already has 2 parts with no star or a table star (S.T, S.*), then
2.1. if `database` is set, set C:= current value of `database`; if C.S is a valid schema, use that as FQP
2.2. otherwise (`database` not set or C.S not a valid schema), set C := S, S := `public`, use that as FQP
3. if the pattern is an unqualified star for tables, then search for all tables
in the first schema specified in `search_path`.
```
The rule 2.2 is a new CockroachDB extension.
## Other changes
- same rules / compatibility for zone specifiers
- the vtable generator functions in
`sql/pg_catalog.go`. `sql/information_schema.go` and
`sql/crdb_internal.go` are modified to list the database descriptor
name in the "Catalog" column instead of "Schema". The virtual
schemas remain in the "Schema" column but are repeated for every
database descriptor (logical catalog).
- These generator functions already accept a "db prefix" parameter
to constraint the visibility they have over the physical
schema. This is to be filled with the current value of `database`.
Note: already stored views need no special handling due to the compatibility rules.
# Detailed design
This section has two parts: a [background section](#background)
reminds the reader of what is expected.
A ["problems with CockroachDB"
section](#current-problems-with-cockroachdb) spells out what are the
current shortcomings.
A last [detailed solution section](#detailed-solution) maps the
proposed solution, outlined in the reference-level guide above, to the
detailed problem statement. Two alternatives are proposed.
## Background
This section provides an introduction to standard naming rules in SQL
and what are the differences between the Postgres and MySQL
dialects.
If you are already intimately knowledgeable with these rules, the
following high-level summary should be a sufficient refresher:
- we must pay attention to the 3 separate features "name resolution",
"database introspection" and "meta-introspection". A common pitfall
when reasoning about SQL naming is to only think about the
first. The latter two features, once all is said and done, more or
less mandate a 3-level logical namespace with the components
catalog, schema, relation, and restricts the spectrum of what can be
done about the first feature.
- there are three separate rules (algorithms) for name resolution: one
for persistent objects (including tables and functions), one for
column references, and one for sub-parts of complex values.
Feel free to skip to the next section (["problems with
CockroachDB"](#current-problems-with-cockroachdb)) if you already know
these details. However, that will refer to some details presented here.
### Terminology and high-level features
The terminology for object names in standard SQL, and pg's dialect in
particular, uses the words "catalog", "schema" and "relation". These
define a *namespacing scheme*: relation names are scoped to a schema
namespace; schema names are scoped to a catalog namespace.
"Scoping" means the same as it does in e.g. C or Go: it makes it
possible to reuse the same name for different things. For example,
this standard naming structure allows the same name `tbl1` to
designate two different tables, e.g. `mydb.schema1.tbl1` and
`mydb.schema2.tbl1`.
Within this context, any SQL engine must provide the following 3 features:
1. name resolution for database objects.
2. introspection of database objects via `information_schema` (and, for pg compatibility, `pg_catalog` too).
3. introspection of `information_schema` via `information_schema`.
Each of these three items deserves attention because it provides
boundary restrictions on the work being done here.
### Name resolution
Any SQL engine must provide a translation from language-level,
catalog/schema/relation *semantic* names to physical,
in-memory/on-disk data structures. The question that needs to be
mechanically answered is:
*Which table ID / descriptor does this particular name refer to?*
With a variant when accessing individual columns in a table/view:
*Which table ID / descriptor and which column ID inside that does this particular name refer to?*
In CockroachDB, the mechanical transformation of a name to a table ID
/ descriptor is done as follows:
- the *schema* part of the name is used to look up a database ID
(`select id from system.namespace where name = <schemaname> and "parendID" = 0`)
- the *relation* part of the name is used to look up a table ID,
within all IDs that have the database ID as `ParentID`:
(`select id from system.namespace where name = <relname> and "parentID" = <dbID>`)
- then the descriptor for that table ID is loaded if needed; if column ID
resolution is also needed, that will use just that table/view descriptor.
### Introspection of database objects
SQL engines also provide introspection tables in `information_schema`
(also `pg_catalog` for pg). These must answer the question:
*For each object in the database, what is the canonical name to address it in SQL queries?*
For example, `information_schema.tables` has 3 columns
`table_catalog`, `table_schema`, `table_name` that contain the
canonical name decomposition for tables.
It is possible for a SQL engine to not support the catalog part of
logical names. For example, this seems to be true of MySQL. In this
case, the `catalog` column is irrelevant; then the following rules
hold:
- if `information_schema.tables` contains a row with values `unused`, `a`,
`b` for the aforementioned columns, then a query of the form
`select * from a.b` must work.
- if `information_schema.schemata` contains a row with values
`unused`, `a` for the catalog and schema name columns, then a
statement of the form `create table a.b (...)` must work.
However, if the engine claims to support the catalog part, *which is
necessary for compatibility with pg's SQL dialect*, then the following
assertions must hold for `information_schema` to be properly
constructed:
- if `information_schema.tables` contains a row with values `a`, `b`,
`c` for the aforementioned columns, then a query of the form
`select * from a.b.c` must work.
- if `information_schema.schemata` contains a row with values `a`,
`b` for the catalog and schema name columns, then a statement of the
form `create table a.b.c (...)` must work.
Regardless of which of the two variants is supported, these
observations teach us the following: the structure of
`information_schema` does not give us freedom to design fancy naming
schemes where the path to access a table can be too short or
arbitrarily long.
Really, the SQL community has settled on the catalog/schema/relation
structure for names, crystallized in the structure of the
`information_schema` tables: there's a catalog part, there's a schema
part, there's a table name part. This does not leave us the freedom to
make up our own naming scheme while hoping that existing tools using
db introspection will cope.
### Introspection of `information_schema` (meta-introspection)
`information_schema` (and, for pg, `pg_catalog` too) are very
specifically defined to be *schema names*. Also they are very much
defined to designate *virtual schemas* that *must exist in every
catalog*.
The working intuition is that the virtual tables in the virtual schemas
only contain rows pertaining to the catalog in which they are
(virtually) contained:
- `db1.information_schema.tables` only contains information about tables in `db1`.
- `db2.information_schema.tables` only contains information about tables in `db2`.
- etc.
Meanwhile, they are schemas, so they must appear in the introspection
tables in the right position. For example, the word
"`information_schema`" must occur in the column `schema_name` of
`information_schema.tables`, with a repeated row for every database
(because `information_schema` exists virtually in every catalog/database):
| Catalog | Schema | Table |
|------------|--------------------|-----------|
| test | information_schema | tables |
| test | information_schema | columns |
| test | information_schema | ... |
| myapp | information_schema | tables |
| myapp | information_schema | columns |
| myapp | information_schema | ... |
### Separate resolution rules for persistent objects, columns and sub-parts of complex values
Four separate rules (set of algorithms) apply to the different
syntactic constructs for names in SQL:
1. **resolution of persistent objects:** (algorithms A1 & A2 in the [reference-level explanation](#reference-level-explanation))
- the naming of tables / views in FROM clauses.
- the naming of *functions* in both scalar contexts and FROM clauses.
- the expansion of table patterns in GRANT.
- the naming of in-db objects in CREATE, DROP, ALTER RENAME, etc.
2. **resolution of column references:** (algorithm B in the reference-level explanation)
- a column reference is always composed of an optional persistent object name as prefix, followed by
a mandatory column identifier.
3. **resolution of sub-parts inside a complex value**, when the engine supports
sub-parts (e.g. arrays and/or compound types):
- the naming of columns in the INSERT/UPSERT target column list, or the LHS of UPDATE SET statements.
- scalar expressions of the form `<expr>[123][456]` or `(<expr>).path[1].to.field[2][3]`
4. **resolution of patterns** in e.g. GRANT (algorithm C in the reference-level explanation)
For example, in the following queries:
INSERT INTO a.b.c (d.e.f) VALUES (1)
^^^^ ^^^^- this uses the resolution rule 3 of sub-parts inside column 'd'
|
\--------- this uses the resolution rule 1 of a persistent object (alg A1 & B)
SELECT (a.b.c.d).e.f FROM a.b.c
^^^^^^^ ^^^^ ^^^^^ this uses the resolution rule 1 of a persistent object (alg A1)
| |
| \------------ this uses the resolution rule 3 of sub-parts inside column 'd'
|
\-------------------- this uses the resolution rule 2 for a column (alg B).
SELECT a.b.c(123) -- this is a SQL function application
^^^^^- this uses the resolution rule 1 of a persistent object (alg A1).
CREATE TABLE a.b.c ( ... )
^^^^^- this uses the resolution rule 1 of a persistent object (alg A2).
GRANT SELECT TO admin ON TABLE a.b.c
^^^^^- this uses resolution rule 4 for patterns
The choice of which of the two rules 1 or 2 to apply in a scalar
context is made unambiguously by the presence (rule 1) or absence
(rule 2) of a function call argument list starting with '(' after the
name.
The resolution rules are very well specified across all SQL engines:
1. when resolving a name for a persistent object, the last part of the name is always the name of the object.
The part before that, if present, is the logical schema name. The
part before that, if present, is the logical catalog name.
This implies that a fully qualified persistent object name has at
most 3 components.
2. when resolving a name for a column, the last part of the name is
always the name of a *column*.
The part before that, if present, is the name of the relation (one
defined from a FROM clause). The part before that, if present, is
the logical schema name. The part before that, if present, is the
logical catalog name.
This implies that a column reference has at most 4 components.
3. when resolving a name for a sub-part of a complex value:
- array subscripts are used by appending `[offset]` to some scalar
expression.
- to access a field in a compound type (if those are supported),
grouping parentheses *must* be used if the thing containing the
field is a column reference.
For example, `SELECT (a.b.c.d).e.f` in the query above.
In contrast, `SELECT a.b.c.d.e.f` would not be allowed, because
it is ambiguous: it could refer either to `.b.c.d.e.f` in column
`a` of some implicit table, or `.c.d.e.f` in column `b` of table
`a`, or `.d.e.f` in column `c` of table `b` in schema `a`, etc.
In contrast to the resolution of persistent objects above, the path to a
sub-part of a compound value can be arbitrarily long.
Currently CockroachDB does not support compound types, so the logic
for rule 3 is not yet fully implemented -- we only support arrays. The
support for compound types and field access is not in-scope for this
RFC and not considered further. The code in PR #21753 has ensured that
there is adequate space in the grammar to add this support later, with
concrete suggestions on how to achieve this compatibility.
### Partially qualified names
In all contexts where a fully qualified name (FQN) is accepted, a
*partially qualified* name is also accepted. A partially qualified
name is recognizable because it has fewer components than the number
of components expected for a FQN in that position. (As described
above, the number of components expected for a FQN is unambiguously
defined for each syntactic position.)
A partially qualified name is transformed into a FQN *before* name
resolution, as defined in the previous sections, occurs.
The rules are defined separately for each SQL engine.
- In MySQL, for example, the logical catalog part is always inferred
to be "`def`", and the logical schema part, if absent, is taken from
the latest USE statement.
- In PostgreSQL, for example:
- the logical catalog part, if absent, is inferred from the database
name specified in the connection string, incidentally also
available via the built-in function `current_catalog`.
- the logical schema part, if absent, is inferred by searching each
schema named in the `search_path` session variable:
- taking the first item in `search_path`, also designated by
the built-in function `current_schema()`, for operations that create a new object;
- iterating through `search_path` to find a schema that contains
the persistent object named by the last component, for
operations that require the object to exist already.
This search across schemas is made using schemas of the current
catalog only.
The PostgreSQL rules are not to be taken lightly, because they interact
very specifically with the data `information_schema` and `pg_catalog`.
If, say, `information_schema.tables` mentions two schemas `a` and `b`,
and two separate tables, both called `tbl`, in each of these two schemas,
then a client will expect to be able to set either
search_path = ['a']
or
search_path = ['b']
and expect queries of the form
SELECT * FROM tbl
to resolve `tbl` in one or the other of the two schemas.
This is of particular interest when a client needs to overload a name
or a table that otherwise already exists in `pg_catalog`:
-- client connects to `curdb`,
-- client sets search_path = ['public', 'pg_catalog']
SELECT * FROM pg_tables; -- initially resolves curdb.pg_catalog.pg_tables
CREATE TABLE pg_tables (x int); -- creates curdb.public.pg_tables
SELECT x FROM pg_tables; -- now resolves curdb.public.pg_tables
It would be an error to let the client access `pg_catalog.pg_tables`
in the latter query (and give it an error because `x` doesn't exist
there) after they have been able to run `CREATE TABLE pg_tables`
successfully.
## Current problems with CockroachDB
CockroachDB currently has several problems that this RFC aims to address:
- the name resolution algorithms are different than pg's. This means
that some queries valid in pg's SQL dialect are not valid in
CockroachDB, and vice-versa.
The specific phrasing of the problem is the following:
- CockroachDB currently uses the logical schema part of a qualified
name as a key to look up a database ID.
- CockroachDB fails to recognize FQN relation, column and function names.
Example failing queries, that should really work:
- `select * from mydb.public.foo` (invalid use of schema part)
- `select mydb.public.kv.v from kv` (insufficient FQN support)
- `select mydb.pg_catalog.pg_typeof(1)` (insufficient FQN support)
- the introspection tables are insufficiently populated for admin users.
A client that connects (via pg connection string) to a database
`curdb` but as admin user expects all the databases to be listed
alongside each other as separate "catalog" entries in
`information_schema` tables. Currently, CockroachDB will
only show them the tables for `curdb`, not other databases.
- the introspection tables plainly violate their contract.
1. A client will see a row (`def`, `curdb`, `tbl`) in there but
the query `select * from def.curdb.tbl` is invalid.
2. A client knowing that they are connected to database `curdb`
(from their connection URL) cannot find the string
"`curdb`" in the catalog column of the `information_schema` tables.
- meta-introspection (introspection of `information_schema` itself) is
wrong when connected as "root": the virtual schemas must exist for
every database, and currently they are only listed once.
These various problems compound and cause CockroachDB to confuse
most DB inspection tools.
## Detailed solution
The proposed change addresses the problem above as follows:
- the deviation in name resolution algorithms is resolved by changing
the name resolution algorithms to match pg's.
Backward compatibility with previous CockroachDB versions is ensured by a "catch"
rule that uses the logical schema name as database name if the pg rules would otherwise
determine the name was invalid.
- the limitations with introspection tables are addressed by
populating the database descriptor name in the "catalog" column. The
names of the virtual schemas are repeated for each database
descriptor.
### Handling of view queries
(needs some convincing argument that the proposed algorithm addressed previously stored views adequately)
# Drawbacks
Why should we *not* do this? Will need some adjustment by existing
CockroachDB users.
Mitigating factors: the name resolution rules may be able to recognize
invalid schema names as catalog names for compatibility.
Consequences on other areas of CockroachDB: internal queries
ran by CockroachDB against itself should use the new naming rules.
# Rationale and Alternatives
- Why is this design the best in the space of possible designs?
See the PG compatibility doc by Andy Woods.
- What other designs have been considered and what is the rationale for not choosing them?
- See my previous RFC from last year, which proposes to introduce
fully-fledged schemas (to support a 3-level hierarchy for table
descriptors). This would provide even more PG compatibility but is
left out of scope in this RFC to make the change more incremental.
- A wild idea by Peter: make FQNs variable length. ("The SQL to KV
mapping could be extended without too much difficulty to support
an arbitrary number of levels") - this does not fit the
restriction on name length forced on us by `information_schema`.
- What is the impact of not doing this?
Broken compatibility with GUI database inspection tools.
# Unresolved questions
Handling of view queries (currently under investigation).
| docs/RFCS/20180219_pg_virtual_namespacing.md | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.00017752900021150708,
0.00016975794278550893,
0.00015821294800844043,
0.00017090780602302402,
0.000004215472927171504
] |
{
"id": 5,
"code_window": [
"\tRunBeforePerformGC func(jobID jobspb.JobID) error\n",
"\t// RunAfterIsProtectedCheck is called after a successfully checking the\n",
"\t// protected timestamp status of a table or an index. The protection status is\n",
"\t// passed in along with the jobID.\n",
"\tRunAfterIsProtectedCheck func(jobID jobspb.JobID, isProtected bool)\n",
"\t// DisableNewProtectedTimestampSubsystemCheck disables checking the new\n",
"\t// protected timestamp subsystem when checking the protection status of a\n",
"\t// table or an index. This is useful for tests that disable the span\n",
"\t// configuration infrastructure, as the new protected timestamp subsystem is\n",
"\t// built on top of it.\n",
"\t// TODO(arul): Once we've fully migrated all tests to use span configurations\n",
"\t// we should be able to get rid of this testing knob as well.\n",
"\tDisableNewProtectedTimestampSubsystemCheck bool\n",
"}\n",
"\n",
"// ModuleTestingKnobs is part of the base.ModuleTestingKnobs interface.\n",
"func (*GCJobTestingKnobs) ModuleTestingKnobs() {}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/schema_changer.go",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright 2020 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package backupccl
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
_ "github.com/cockroachdb/cockroach/pkg/ccl/partitionccl"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/spanconfig"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
// Large test to ensure that all of the system table data is being restored in
// the new cluster. Ensures that all the moving pieces are working together.
func TestFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
params := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
DisableSpanConfigs: true, // TODO(irfansharif): #75060.
Knobs: base.TestingKnobs{
SpanConfig: &spanconfig.TestingKnobs{
// We compare job progress before and after a restore. Disable
// the automatic jobs checkpointing which could possibly mutate
// the progress data during the backup/restore process.
JobDisablePersistingCheckpoints: true,
},
GCJob: &sql.GCJobTestingKnobs{
DisableNewProtectedTimestampSubsystemCheck: true,
},
},
}}
const numAccounts = 10
tcBackup, sqlDB, tempDir, cleanupFn := backupRestoreTestSetupWithParams(t, singleNode, numAccounts, InitManualReplication, params)
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, params)
defer cleanupFn()
defer cleanupEmptyCluster()
backupKVDB := tcBackup.Server(0).DB()
// Closed when the restore is allowed to progress with the rest of the backup.
allowProgressAfterPreRestore := make(chan struct{})
// Closed to signal the the zones have been restored.
restoredZones := make(chan struct{})
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.afterPreRestore = func() error {
close(restoredZones)
<-allowProgressAfterPreRestore
return nil
}
return r
},
}
}
// The claim_session_id field in jobs is a uuid and so needs to be excluded
// when comparing jobs pre/post restore.
const jobsQuery = `
SELECT id, status, created, payload, progress, created_by_type, created_by_id, claim_instance_id
FROM system.jobs
`
// Pause SQL Stats compaction job to ensure the test is deterministic.
sqlDB.Exec(t, `PAUSE SCHEDULES SELECT id FROM [SHOW SCHEDULES FOR SQL STATISTICS]`)
// Disable automatic stats collection on the backup and restoring clusters to ensure
// the test is deterministic.
sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=false`)
sqlDBRestore.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=false`)
// Create some other descriptors as well.
sqlDB.Exec(t, `
USE data;
CREATE SCHEMA test_data_schema;
CREATE TABLE data.test_data_schema.test_table (a int);
INSERT INTO data.test_data_schema.test_table VALUES (1), (2);
USE defaultdb;
CREATE SCHEMA test_schema;
CREATE TABLE defaultdb.test_schema.test_table (a int);
INSERT INTO defaultdb.test_schema.test_table VALUES (1), (2);
CREATE TABLE defaultdb.foo (a int);
CREATE TYPE greeting AS ENUM ('hi');
CREATE TABLE welcomes (a greeting);
CREATE DATABASE data2;
USE data2;
CREATE SCHEMA empty_schema;
CREATE TABLE data2.foo (a int);
`)
tableDesc := desctestutils.TestingGetPublicTableDescriptor(backupKVDB, keys.SystemSQLCodec, "data2", "foo")
// Store the highest user-table ID for later assertions.
maxBackupTableID := tableDesc.GetID()
// Setup the system systemTablesToVerify to ensure that they are copied to the new cluster.
// Populate system.users.
numUsers := 1000
if util.RaceEnabled {
numUsers = 10
}
for i := 0; i < numUsers; i++ {
sqlDB.Exec(t, fmt.Sprintf("CREATE USER maxroach%d", i))
sqlDB.Exec(t, fmt.Sprintf("ALTER USER maxroach%d CREATEDB", i))
}
// Populate system.zones.
sqlDB.Exec(t, `ALTER TABLE data.bank CONFIGURE ZONE USING gc.ttlseconds = 3600`)
sqlDB.Exec(t, `ALTER TABLE defaultdb.foo CONFIGURE ZONE USING gc.ttlseconds = 45`)
sqlDB.Exec(t, `ALTER DATABASE data2 CONFIGURE ZONE USING gc.ttlseconds = 900`)
// Populate system.jobs.
// Note: this is not the backup under test, this just serves as a job which
// should appear in the restore.
// This job will eventually fail since it will run from a new cluster.
sqlDB.Exec(t, `BACKUP data.bank TO 'nodelocal://0/throwawayjob'`)
preBackupJobs := sqlDB.QueryStr(t, jobsQuery)
// Populate system.settings.
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_io_write.concurrent_addsstable_requests = 5`)
sqlDB.Exec(t, `INSERT INTO system.ui (key, value, "lastUpdated") VALUES ($1, $2, now())`, "some_key", "some_val")
// Populate system.comments.
sqlDB.Exec(t, `COMMENT ON TABLE data.bank IS 'table comment string'`)
sqlDB.Exec(t, `COMMENT ON DATABASE data IS 'database comment string'`)
sqlDB.Exec(t,
`INSERT INTO system.locations ("localityKey", "localityValue", latitude, longitude) VALUES ($1, $2, $3, $4)`,
"city", "New York City", 40.71427, -74.00597,
)
// Populate system.role_members.
sqlDB.Exec(t, `CREATE ROLE system_ops;`)
sqlDB.Exec(t, `GRANT system_ops TO maxroach1;`)
// Populate system.scheduled_jobs table with a first run in the future to prevent immediate adoption.
firstRun := timeutil.Now().Add(time.Hour).Format(timeutil.TimestampWithoutTZFormat)
sqlDB.Exec(t, `CREATE SCHEDULE FOR BACKUP data.bank INTO $1 RECURRING '@hourly' FULL BACKUP ALWAYS WITH SCHEDULE OPTIONS first_run = $2`, localFoo, firstRun)
sqlDB.Exec(t, `PAUSE SCHEDULES SELECT id FROM [SHOW SCHEDULES FOR BACKUP]`)
injectStats(t, sqlDB, "data.bank", "id")
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
// Create a bunch of user tables on the restoring cluster that we're going
// to delete.
numTables := 50
if util.RaceEnabled {
numTables = 2
}
for i := 0; i < numTables; i++ {
sqlDBRestore.Exec(t, `CREATE DATABASE db_to_drop`)
sqlDBRestore.Exec(t, `CREATE TABLE db_to_drop.table_to_drop (a int)`)
sqlDBRestore.Exec(t, `ALTER TABLE db_to_drop.table_to_drop CONFIGURE ZONE USING gc.ttlseconds=1`)
sqlDBRestore.Exec(t, `DROP DATABASE db_to_drop`)
}
// Wait for the GC job to finish to ensure the descriptors no longer exist.
sqlDBRestore.CheckQueryResultsRetry(
t, "SELECT count(*) FROM [SHOW JOBS] WHERE job_type = 'SCHEMA CHANGE GC' AND status = 'running'",
[][]string{{"0"}},
)
doneRestore := make(chan struct{})
go func() {
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
close(doneRestore)
}()
// Check that zones are restored during pre-restore.
t.Run("ensure zones are restored during pre-restore", func(t *testing.T) {
<-restoredZones
// Not specifying the schema makes the query search using defaultdb first.
// which ends up returning the error
// pq: database "defaultdb" is offline: restoring
checkZones := "SELECT * FROM system.public.zones"
sqlDBRestore.CheckQueryResults(t, checkZones, sqlDB.QueryStr(t, checkZones))
// Check that the user tables are still offline.
sqlDBRestore.ExpectErr(t, "database \"data\" is offline: restoring", "SELECT * FROM data.public.bank")
// Check there is no data in the span that we expect user data to be imported.
store := tcRestore.GetFirstStoreFromServer(t, 0)
startKey := keys.SystemSQLCodec.TablePrefix(bootstrap.TestingUserDescID(0))
endKey := keys.SystemSQLCodec.TablePrefix(uint32(maxBackupTableID)).PrefixEnd()
it := store.Engine().NewMVCCIterator(storage.MVCCKeyAndIntentsIterKind, storage.IterOptions{
UpperBound: endKey,
})
defer it.Close()
it.SeekGE(storage.MVCCKey{Key: startKey})
hasKey, err := it.Valid()
require.NoError(t, err)
require.False(t, hasKey)
})
// Allow the restore to make progress after we've checked the pre-restore
// stage.
close(allowProgressAfterPreRestore)
// Wait for the restore to finish before checking that it did the right thing.
<-doneRestore
t.Run("ensure all databases restored", func(t *testing.T) {
sqlDBRestore.CheckQueryResults(t,
`SELECT database_name, owner FROM [SHOW DATABASES]`,
[][]string{
{"data", security.RootUser},
{"data2", security.RootUser},
{"defaultdb", security.RootUser},
{"postgres", security.RootUser},
{"system", security.NodeUser},
})
})
t.Run("ensure all schemas are restored", func(t *testing.T) {
expectedSchemas := map[string][][]string{
"defaultdb": {{"crdb_internal"}, {"information_schema"}, {"pg_catalog"}, {"pg_extension"}, {"public"}, {"test_schema"}},
"data": {{"crdb_internal"}, {"information_schema"}, {"pg_catalog"}, {"pg_extension"}, {"public"}, {"test_data_schema"}},
"data2": {{"crdb_internal"}, {"empty_schema"}, {"information_schema"}, {"pg_catalog"}, {"pg_extension"}, {"public"}},
}
for dbName, expectedSchemas := range expectedSchemas {
sqlDBRestore.CheckQueryResults(t,
fmt.Sprintf(`USE %s; SELECT schema_name FROM [SHOW SCHEMAS] ORDER BY schema_name;`, dbName),
expectedSchemas)
}
})
t.Run("ensure system table data restored", func(t *testing.T) {
// Note the absence of the jobs table. Jobs are tested by another test as
// jobs are created during the RESTORE process.
systemTablesToVerify := []string{
systemschema.CommentsTable.GetName(),
systemschema.LocationsTable.GetName(),
systemschema.RoleMembersTable.GetName(),
systemschema.RoleOptionsTable.GetName(),
systemschema.SettingsTable.GetName(),
systemschema.TableStatisticsTable.GetName(),
systemschema.UITable.GetName(),
systemschema.UsersTable.GetName(),
systemschema.ScheduledJobsTable.GetName(),
}
verificationQueries := make([]string, len(systemTablesToVerify))
// Populate the list of tables we expect to be restored as well as queries
// that can be used to ensure that data in those tables is restored.
for i, table := range systemTablesToVerify {
switch table {
case systemschema.TableStatisticsTable.GetName():
// createdAt and statisticsID are re-generated on RESTORE.
query := `SELECT "tableID", name, "columnIDs", "rowCount" FROM system.table_statistics`
verificationQueries[i] = query
case systemschema.SettingsTable.GetName():
// We don't include the cluster version.
query := fmt.Sprintf("SELECT * FROM system.%s WHERE name <> 'version'", table)
verificationQueries[i] = query
default:
query := fmt.Sprintf("SELECT * FROM system.%s", table)
verificationQueries[i] = query
}
}
for _, read := range verificationQueries {
sqlDBRestore.CheckQueryResults(t, read, sqlDB.QueryStr(t, read))
}
})
t.Run("ensure table IDs have not changed", func(t *testing.T) {
// Check that all tables have been restored. DISTINCT is needed in order to
// deal with the inclusion of schemas in the system.namespace table.
tableIDCheck := "SELECT * FROM system.namespace ORDER BY id"
sqlDBRestore.CheckQueryResults(t, tableIDCheck, sqlDB.QueryStr(t, tableIDCheck))
})
t.Run("ensure user table data restored", func(t *testing.T) {
expectedUserTables := [][]string{
{"data", "bank"},
{"data2", "foo"},
{"defaultdb", "foo"},
}
for _, table := range expectedUserTables {
query := fmt.Sprintf("SELECT * FROM %s.%s", table[0], table[1])
sqlDBRestore.CheckQueryResults(t, query, sqlDB.QueryStr(t, query))
}
})
t.Run("ensure that grants are restored", func(t *testing.T) {
grantCheck := "use system; SHOW grants"
sqlDBRestore.CheckQueryResults(t, grantCheck, sqlDB.QueryStr(t, grantCheck))
grantCheck = "use data; SHOW grants"
sqlDBRestore.CheckQueryResults(t, grantCheck, sqlDB.QueryStr(t, grantCheck))
})
t.Run("ensure that jobs are restored", func(t *testing.T) {
// Ensure that the jobs in the RESTORE cluster is a superset of the jobs
// that were in the BACKUP cluster (before the full cluster BACKUP job was
// run). There may be more jobs now because the restore can run jobs of
// its own.
newJobsStr := sqlDBRestore.QueryStr(t, jobsQuery)
newJobs := make(map[string][]string)
for _, newJob := range newJobsStr {
// The first element of the slice is the job id.
newJobs[newJob[0]] = newJob
}
for _, oldJob := range preBackupJobs {
newJob, ok := newJobs[oldJob[0]]
if !ok {
t.Errorf("Expected to find job %+v in RESTORE cluster, but not found", oldJob)
}
require.Equal(t, oldJob, newJob)
}
})
t.Run("zone_configs", func(t *testing.T) {
// The restored zones should be a superset of the zones in the backed up
// cluster.
zoneIDsResult := sqlDB.QueryStr(t, `SELECT id FROM system.zones`)
var q strings.Builder
q.WriteString("SELECT * FROM system.zones WHERE id IN (")
for i, restoreZoneIDRow := range zoneIDsResult {
if i > 0 {
q.WriteString(", ")
}
q.WriteString(restoreZoneIDRow[0])
}
q.WriteString(")")
sqlDBRestore.CheckQueryResults(t, q.String(), sqlDB.QueryStr(t, q.String()))
})
t.Run("ensure that tables can be created at the excepted ID", func(t *testing.T) {
var maxID, dbID, tableID int
sqlDBRestore.QueryRow(t, "SELECT max(id) FROM system.namespace").Scan(&maxID)
dbName, tableName := "new_db", "new_table"
sqlDBRestore.Exec(t, fmt.Sprintf("CREATE DATABASE %s", dbName))
sqlDBRestore.Exec(t, fmt.Sprintf("CREATE TABLE %s.%s (a int)", dbName, tableName))
sqlDBRestore.QueryRow(t,
fmt.Sprintf("SELECT id FROM system.namespace WHERE name = '%s'", dbName)).Scan(&dbID)
require.True(t, dbID > maxID)
sqlDBRestore.QueryRow(t,
fmt.Sprintf("SELECT id FROM system.namespace WHERE name = '%s'", tableName)).Scan(&tableID)
require.True(t, tableID > maxID)
require.NotEqual(t, dbID, tableID)
})
}
// TestSingletonSpanConfigJobPostRestore ensures that there's a single span
// config reconciliation job running post restore.
func TestSingletonSpanConfigJobPostRestore(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
params := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(),
},
},
}
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetupWithParams(t, singleNode, numAccounts, InitManualReplication, params)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, params)
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
const numRunningReconciliationJobQuery = `
SELECT count(*) FROM [SHOW AUTOMATIC JOBS]
WHERE job_type = 'AUTO SPAN CONFIG RECONCILIATION' AND status = 'running'
`
testutils.SucceedsSoon(t, func() error {
var numRunningJobs int
sqlDBRestore.QueryRow(t, numRunningReconciliationJobQuery).Scan(&numRunningJobs)
if numRunningJobs != 1 {
return errors.Newf("expected single running reconciliation job, found %d", numRunningJobs)
}
return nil
})
}
func TestIncrementalFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
const incrementalBackupLocation = "nodelocal://0/inc-full-backup"
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, "CREATE USER maxroach1")
sqlDB.Exec(t, `BACKUP TO $1 INCREMENTAL FROM $2`, incrementalBackupLocation, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1, $2`, localFoo, incrementalBackupLocation)
checkQuery := "SELECT * FROM system.users"
sqlDBRestore.CheckQueryResults(t, checkQuery, sqlDB.QueryStr(t, checkQuery))
}
// TestEmptyFullClusterResotre ensures that we can backup and restore a full
// cluster backup with only metadata (no user data). Regression test for #49573.
func TestEmptyFullClusterRestore(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `CREATE USER alice`)
sqlDB.Exec(t, `CREATE USER bob`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
checkQuery := "SELECT * FROM system.users"
sqlDBRestore.CheckQueryResults(t, checkQuery, sqlDB.QueryStr(t, checkQuery))
}
// Regression test for #50561.
func TestClusterRestoreEmptyDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `CREATE DATABASE some_db`)
sqlDB.Exec(t, `CREATE DATABASE some_db_2`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
checkQuery := "SHOW DATABASES"
sqlDBRestore.CheckQueryResults(t, checkQuery, sqlDB.QueryStr(t, checkQuery))
}
func TestDisallowFullClusterRestoreOnNonFreshCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `CREATE DATABASE foo`)
sqlDBRestore.ExpectErr(t,
"pq: full cluster restore can only be run on a cluster with no tables or databases but found 2 descriptors: foo, public",
`RESTORE FROM $1`, localFoo,
)
}
func TestClusterRestoreSystemTableOrdering(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode,
tempDir,
InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
restoredSystemTables := make([]string, 0)
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.duringSystemTableRestoration = func(systemTableName string) error {
restoredSystemTables = append(restoredSystemTables, systemTableName)
return nil
}
return r
},
}
}
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
// Check that the settings table is the last of the system tables to be
// restored.
require.Equal(t, restoredSystemTables[len(restoredSystemTables)-1],
systemschema.SettingsTable.GetName())
}
func TestDisallowFullClusterRestoreOfNonFullBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `BACKUP data.bank TO $1`, localFoo)
sqlDBRestore.ExpectErr(
t, "pq: full cluster RESTORE can only be used on full cluster BACKUP files",
`RESTORE FROM $1`, localFoo,
)
}
func TestAllowNonFullClusterRestoreOfFullBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE data2`)
sqlDB.Exec(t, `RESTORE data.bank FROM $1 WITH into_db='data2'`, localFoo)
checkResults := "SELECT * FROM data.bank"
sqlDB.CheckQueryResults(t, checkResults, sqlDB.QueryStr(t, checkResults))
}
func TestRestoreFromFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 10
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, `DROP DATABASE data`)
t.Run("database", func(t *testing.T) {
sqlDB.Exec(t, `RESTORE DATABASE data FROM $1`, localFoo)
defer sqlDB.Exec(t, `DROP DATABASE data`)
sqlDB.CheckQueryResults(t, "SELECT count(*) FROM data.bank", [][]string{{"10"}})
})
t.Run("table", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE data`)
defer sqlDB.Exec(t, `DROP DATABASE data`)
sqlDB.Exec(t, `RESTORE data.bank FROM $1`, localFoo)
sqlDB.CheckQueryResults(t, "SELECT count(*) FROM data.bank", [][]string{{"10"}})
})
t.Run("tables", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE data`)
defer sqlDB.Exec(t, `DROP DATABASE data`)
sqlDB.Exec(t, `RESTORE data.* FROM $1`, localFoo)
sqlDB.CheckQueryResults(t, "SELECT count(*) FROM data.bank", [][]string{{"10"}})
})
t.Run("system tables", func(t *testing.T) {
sqlDB.Exec(t, `CREATE DATABASE temp_sys`)
sqlDB.Exec(t, `RESTORE system.users FROM $1 WITH into_db='temp_sys'`, localFoo)
sqlDB.CheckQueryResults(t, "SELECT * FROM temp_sys.users", sqlDB.QueryStr(t, "SELECT * FROM system.users"))
})
}
func TestCreateDBAndTableIncrementalFullClusterBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, 0, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE foo`)
sqlDB.Exec(t, `CREATE TABLE foo.bar (a int)`)
// Ensure that the new backup succeeds.
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
}
// TestClusterRestoreFailCleanup tests that a failed RESTORE is cleaned up.
func TestClusterRestoreFailCleanup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t, "takes >1 min under race")
params := base.TestServerArgs{}
// Disable GC job so that the final check of crdb_internal.tables is
// guaranteed to not be cleaned up. Although this was never observed by a
// stress test, it is here for safety.
blockCh := make(chan struct{})
defer close(blockCh)
params.Knobs.GCJob = &sql.GCJobTestingKnobs{
RunBeforeResume: func(_ jobspb.JobID) error { <-blockCh; return nil },
}
const numAccounts = 1000
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
// Setup the system systemTablesToVerify to ensure that they are copied to the new cluster.
// Populate system.users.
for i := 0; i < 1000; i++ {
sqlDB.Exec(t, fmt.Sprintf("CREATE USER maxroach%d", i))
}
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/missing-ssts'`)
// Bugger the backup by removing the SST files. (Note this messes up all of
// the backups, but there is only one at this point.)
if err := filepath.Walk(tempDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
t.Fatal(err)
}
if info.Name() == backupManifestName || !strings.HasSuffix(path, ".sst") {
return nil
}
return os.Remove(path)
}); err != nil {
t.Fatal(err)
}
// Create a non-corrupted backup.
// Populate system.jobs.
// Note: this is not the backup under test, this just serves as a job which
// should appear in the restore.
// This job will eventually fail since it will run from a new cluster.
sqlDB.Exec(t, `BACKUP data.bank TO 'nodelocal://0/throwawayjob'`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
t.Run("during restoration of data", func(t *testing.T) {
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
sqlDBRestore.ExpectErr(t, "sst: no such file", `RESTORE FROM 'nodelocal://1/missing-ssts'`)
// Verify the failed RESTORE added some DROP tables.
// Note that the system tables here correspond to the temporary tables
// imported, not the system tables themselves.
sqlDBRestore.CheckQueryResults(t,
`SELECT name FROM system.crdb_internal.tables WHERE state = 'DROP' ORDER BY name`,
[][]string{
{"bank"},
{"comments"},
{"database_role_settings"},
{"jobs"},
{"locations"},
{"role_members"},
{"role_options"},
{"scheduled_jobs"},
{"settings"},
{"tenant_settings"},
{"ui"},
{"users"},
{"zones"},
},
)
})
// This test retries the job (by injected a retry error) after restoring a
// every system table that has a custom restore function. This tried to tease
// out any errors that may occur if some of the system table restoration
// functions are not idempotent.
t.Run("retry-during-custom-system-table-restore", func(t *testing.T) {
customRestoreSystemTables := make([]string, 0)
for table, config := range systemTableBackupConfiguration {
if config.customRestoreFunc != nil {
customRestoreSystemTables = append(customRestoreSystemTables, table)
}
}
for _, customRestoreSystemTable := range customRestoreSystemTables {
t.Run(customRestoreSystemTable, func(t *testing.T) {
args := base.TestClusterArgs{ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals()},
}}
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, args)
defer cleanupEmptyCluster()
// Inject a retry error, that returns once.
alreadyErrored := false
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.duringSystemTableRestoration = func(systemTableName string) error {
if !alreadyErrored && systemTableName == customRestoreSystemTable {
alreadyErrored = true
return jobs.MarkAsRetryJobError(errors.New("injected error"))
}
return nil
}
return r
},
}
}
// The initial restore will return an error, and restart.
sqlDBRestore.ExpectErr(t, `running execution from '.*' to '.*' on \d+ failed: injected error`, `RESTORE FROM $1`, localFoo)
// Reduce retry delays.
sqlDBRestore.Exec(t, "SET CLUSTER SETTING jobs.registry.retry.initial_delay = '1ms'")
// Expect the restore to succeed.
sqlDBRestore.CheckQueryResultsRetry(t,
`SELECT count(*) FROM [SHOW JOBS] WHERE job_type = 'RESTORE' AND status = 'succeeded'`,
[][]string{{"1"}})
})
}
})
t.Run("during system table restoration", func(t *testing.T) {
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
// Bugger the backup by injecting a failure while restoring the system data.
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.duringSystemTableRestoration = func(_ string) error {
return errors.New("injected error")
}
return r
},
}
}
sqlDBRestore.ExpectErr(t, "injected error", `RESTORE FROM $1`, localFoo)
// Verify the failed RESTORE added some DROP tables.
// Note that the system tables here correspond to the temporary tables
// imported, not the system tables themselves.
sqlDBRestore.CheckQueryResults(t,
`SELECT name FROM system.crdb_internal.tables WHERE state = 'DROP' ORDER BY name`,
[][]string{
{"bank"},
{"comments"},
{"database_role_settings"},
{"jobs"},
{"locations"},
{"role_members"},
{"role_options"},
{"scheduled_jobs"},
{"settings"},
{"tenant_settings"},
{"ui"},
{"users"},
{"zones"},
},
)
})
t.Run("after offline tables", func(t *testing.T) {
tcRestore, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
// Bugger the backup by injecting a failure while restoring the system data.
for _, server := range tcRestore.Servers {
registry := server.JobRegistry().(*jobs.Registry)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeRestore: func(raw jobs.Resumer) jobs.Resumer {
r := raw.(*restoreResumer)
r.testingKnobs.afterOfflineTableCreation = func() error {
return errors.New("injected error")
}
return r
},
}
}
sqlDBRestore.ExpectErr(t, "injected error", `RESTORE FROM $1`, localFoo)
})
}
// A regression test where dropped descriptors would appear in the set of
// `Descriptors`.
func TestDropDatabaseRevisionHistory(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
const numAccounts = 1
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE same_name_db;`)
sqlDB.Exec(t, `DROP DATABASE same_name_db;`)
sqlDB.Exec(t, `CREATE DATABASE same_name_db;`)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.ExpectErr(t, `database "same_name_db" already exists`, `CREATE DATABASE same_name_db`)
}
// TestClusterRevisionHistory tests that cluster backups can be taken with
// revision_history and correctly restore into various points in time.
func TestClusterRevisionHistory(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
type testCase struct {
ts string
check func(t *testing.T, runner *sqlutils.SQLRunner)
}
testCases := make([]testCase, 0)
ts := make([]string, 6)
var tc testCase
const numAccounts = 1
_, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
defer cleanupFn()
sqlDB.Exec(t, `CREATE DATABASE d1`)
sqlDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[0])
tc = testCase{
ts: ts[0],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.ExpectErr(t, `database "d1" already exists`, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE DATABASE d2`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `CREATE DATABASE d2`)
sqlDB.Exec(t, `CREATE TABLE d2.t (a INT)`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[1])
tc = testCase{
ts: ts[1],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
// Expect both databases to exist at this point.
checkSQLDB.ExpectErr(t, `database "d1" already exists`, `CREATE DATABASE d1`)
checkSQLDB.ExpectErr(t, `database "d2" already exists`, `CREATE DATABASE d2`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `DROP DATABASE d1`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[2])
tc = testCase{
ts: ts[2],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.Exec(t, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.ExpectErr(t, `database "d2" already exists`, `CREATE DATABASE d2`)
checkSQLDB.ExpectErr(t, `relation "d2.public.t" already exists`, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[3])
sqlDB.Exec(t, `DROP DATABASE d2;`)
tc = testCase{
ts: ts[3],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.Exec(t, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.ExpectErr(t, `database "d2" already exists`, `CREATE DATABASE d2`)
checkSQLDB.ExpectErr(t, `relation "d2.public.t" already exists`, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
sqlDB.Exec(t, `CREATE DATABASE d1`)
sqlDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[4])
tc = testCase{
ts: ts[4],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.ExpectErr(t, `database "d1" already exists`, `CREATE DATABASE d1`)
checkSQLDB.ExpectErr(t, `relation "d1.public.t" already exists`, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.Exec(t, `CREATE DATABASE d2`)
checkSQLDB.Exec(t, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `DROP DATABASE d1`)
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&ts[5])
tc = testCase{
ts: ts[5],
check: func(t *testing.T, checkSQLDB *sqlutils.SQLRunner) {
checkSQLDB.Exec(t, `CREATE DATABASE d1`)
checkSQLDB.Exec(t, `CREATE TABLE d1.t (a INT)`)
checkSQLDB.Exec(t, `CREATE DATABASE d2`)
checkSQLDB.Exec(t, `CREATE TABLE d2.t (a INT)`)
},
}
testCases = append(testCases, tc)
sqlDB.Exec(t, `BACKUP TO $1 WITH revision_history`, localFoo)
for i, testCase := range testCases {
t.Run(fmt.Sprintf("t%d", i), func(t *testing.T) {
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupEmptyCluster()
sqlDBRestore.Exec(t, `RESTORE FROM $1 AS OF SYSTEM TIME `+testCase.ts, localFoo)
testCase.check(t, sqlDBRestore)
})
}
}
// TestReintroduceOfflineSpans is a regression test for #62564, which tracks a
// bug where AddSSTable requests to OFFLINE tables may be missed by cluster
// incremental backups since they can write at a timestamp older than the last
// backup.
func TestReintroduceOfflineSpans(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t, "likely slow under race")
// Block restores on the source cluster.
blockDBRestore := make(chan struct{})
dbRestoreStarted := make(chan struct{})
// The data is split such that there will be 10 span entries to process.
restoreBlockEntiresThreshold := 4
entriesCount := 0
params := base.TestClusterArgs{}
knobs := base.TestingKnobs{
DistSQL: &execinfra.TestingKnobs{
BackupRestoreTestingKnobs: &sql.BackupRestoreTestingKnobs{
RunAfterProcessingRestoreSpanEntry: func(_ context.Context) {
if entriesCount == 0 {
close(dbRestoreStarted)
}
if entriesCount == restoreBlockEntiresThreshold {
<-blockDBRestore
}
entriesCount++
},
}},
}
params.ServerArgs.Knobs = knobs
const numAccounts = 1000
ctx := context.Background()
_, srcDB, tempDir, cleanupSrc := backupRestoreTestSetupWithParams(t, singleNode, numAccounts, InitManualReplication, params)
defer cleanupSrc()
dbBackupLoc := "nodelocal://0/my_db_backup"
clusterBackupLoc := "nodelocal://0/my_cluster_backup"
// the small test-case will get entirely buffered/merged by small-file merging
// and not report any progress in the meantime unless it is disabled.
srcDB.Exec(t, `SET CLUSTER SETTING bulkio.backup.file_size = '1'`)
// Test servers only have 128MB root memory monitors, reduce the buffer size
// so we don't see memory errors.
srcDB.Exec(t, `SET CLUSTER SETTING bulkio.backup.merge_file_buffer_size = '1MiB'`)
// Take a backup that we'll use to create an OFFLINE descriptor.
srcDB.Exec(t, `CREATE INDEX new_idx ON data.bank (balance)`)
srcDB.Exec(t, `BACKUP DATABASE data TO $1 WITH revision_history`, dbBackupLoc)
srcDB.Exec(t, `CREATE DATABASE restoredb;`)
// Take a base full backup.
srcDB.Exec(t, `BACKUP TO $1 WITH revision_history`, clusterBackupLoc)
var g errgroup.Group
g.Go(func() error {
_, err := srcDB.DB.ExecContext(ctx, `RESTORE data.bank FROM $1 WITH into_db='restoredb'`, dbBackupLoc)
return err
})
// Take an incremental backup after the database restore starts.
<-dbRestoreStarted
srcDB.Exec(t, `BACKUP TO $1 WITH revision_history`, clusterBackupLoc)
var tsMidRestore string
srcDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&tsMidRestore)
// Allow the restore to finish. This will issue AddSSTable requests at a
// timestamp that is before the last incremental we just took.
close(blockDBRestore)
// Wait for the database restore to finish, and take another incremental
// backup that will miss the AddSSTable writes.
require.NoError(t, g.Wait())
var tsBefore string
srcDB.QueryRow(t, "SELECT cluster_logical_timestamp()").Scan(&tsBefore)
// Drop an index on the restored table to ensure that the dropped index was
// also re-included.
srcDB.Exec(t, `DROP INDEX new_idx`)
srcDB.Exec(t, `BACKUP TO $1 WITH revision_history`, clusterBackupLoc)
t.Run("spans-reintroduced", func(t *testing.T) {
_, destDB, cleanupDst := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupDst()
// Restore the incremental backup chain that has missing writes.
destDB.Exec(t, `RESTORE FROM $1 AS OF SYSTEM TIME `+tsBefore, clusterBackupLoc)
// Assert that the restored database has the same number of rows in both the
// source and destination cluster.
checkQuery := `SELECT count(*) FROM restoredb.bank AS OF SYSTEM TIME ` + tsBefore
expectedCount := srcDB.QueryStr(t, checkQuery)
destDB.CheckQueryResults(t, `SELECT count(*) FROM restoredb.bank`, expectedCount)
checkQuery = `SELECT count(*) FROM restoredb.bank@new_idx AS OF SYSTEM TIME ` + tsBefore
expectedCount = srcDB.QueryStr(t, checkQuery)
destDB.CheckQueryResults(t, `SELECT count(*) FROM restoredb.bank@new_idx`, expectedCount)
})
t.Run("restore-canceled", func(t *testing.T) {
args := base.TestClusterArgs{ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals()}},
}
_, destDB, cleanupDst := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, args)
defer cleanupDst()
destDB.Exec(t, `RESTORE FROM $1 AS OF SYSTEM TIME `+tsMidRestore, clusterBackupLoc)
// Wait for the cluster restore job to finish, as well as the restored RESTORE TABLE
// job to cancel.
destDB.CheckQueryResultsRetry(t, `
SELECT description, status FROM [SHOW JOBS]
WHERE job_type = 'RESTORE' AND status NOT IN ('succeeded', 'canceled')`,
[][]string{},
)
// The cluster restore should succeed, but the table restore should have failed.
destDB.CheckQueryResults(t,
`SELECT status, count(*) FROM [SHOW JOBS] WHERE job_type = 'RESTORE' GROUP BY status ORDER BY status`,
[][]string{{"canceled", "1"}, {"succeeded", "1"}})
destDB.ExpectErr(t, `relation "restoredb.bank" does not exist`, `SELECT count(*) FROM restoredb.bank`)
})
}
// TestClusterRevisionDoesNotBackupOptOutSystemTables is a regression test for a
// bug that was introduced where we would include revisions for descriptors that
// are not supposed to be backed up egs: system tables that are opted out.
//
// The test would previously fail with an error that the descriptors table (an
// opt out system table) did not have a span covering the time between the
// `EndTime` of the first backup and second backup, since there are no revisions
// to it between those backups.
func TestClusterRevisionDoesNotBackupOptOutSystemTables(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
tc, _, _, cleanup := backupRestoreTestSetup(t, singleNode, 10, InitManualReplication)
conn := tc.Conns[0]
sqlDB := sqlutils.MakeSQLRunner(conn)
defer cleanup()
sqlDB.Exec(t, `CREATE DATABASE test;`)
sqlDB.Exec(t, `USE test;`)
sqlDB.Exec(t, `CREATE TABLE foo (id INT);`)
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
sqlDB.Exec(t, `CREATE TABLE bar (id INT);`)
sqlDB.Exec(t, `BACKUP TO 'nodelocal://1/foo' WITH revision_history;`)
}
func TestRestoreWithRecreatedDefaultDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `
DROP DATABASE defaultdb;
CREATE DATABASE defaultdb;
`)
row := sqlDB.QueryRow(t, `SELECT id FROM system.namespace WHERE name = 'defaultdb'`)
var expectedDefaultDBID string
row.Scan(&expectedDefaultDBID)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SELECT * FROM system.namespace WHERE name = 'defaultdb'`, [][]string{
{"0", "0", "defaultdb", expectedDefaultDBID},
})
}
func TestRestoreWithDroppedDefaultDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
sqlDB.Exec(t, `
DROP DATABASE defaultdb;
`)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SELECT count(*) FROM system.namespace WHERE name = 'defaultdb'`, [][]string{
{"0"},
})
}
func TestRestoreToClusterWithDroppedDefaultDB(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
sqlDB, tempDir, cleanupFn := createEmptyCluster(t, singleNode)
_, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, InitManualReplication, base.TestClusterArgs{})
defer cleanupFn()
defer cleanupEmptyCluster()
expectedRow := sqlDB.QueryRow(t, `SELECT * FROM system.namespace WHERE name = 'defaultdb'`)
var parentID, parentSchemaID, ID int
var name string
expectedRow.Scan(&parentID, &parentSchemaID, &name, &ID)
sqlDB.Exec(t, `BACKUP TO $1`, localFoo)
sqlDBRestore.Exec(t, `
DROP DATABASE defaultdb;
`)
sqlDBRestore.Exec(t, `RESTORE FROM $1`, localFoo)
sqlDBRestore.CheckQueryResults(t, `SELECT * FROM system.namespace WHERE name = 'defaultdb'`, [][]string{
{fmt.Sprint(parentID), fmt.Sprint(parentSchemaID), name, fmt.Sprint(ID)},
})
}
| pkg/ccl/backupccl/full_cluster_backup_restore_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.08140309900045395,
0.0009578669560141861,
0.00015523054753430188,
0.00017081413534469903,
0.007504478096961975
] |
{
"id": 5,
"code_window": [
"\tRunBeforePerformGC func(jobID jobspb.JobID) error\n",
"\t// RunAfterIsProtectedCheck is called after a successfully checking the\n",
"\t// protected timestamp status of a table or an index. The protection status is\n",
"\t// passed in along with the jobID.\n",
"\tRunAfterIsProtectedCheck func(jobID jobspb.JobID, isProtected bool)\n",
"\t// DisableNewProtectedTimestampSubsystemCheck disables checking the new\n",
"\t// protected timestamp subsystem when checking the protection status of a\n",
"\t// table or an index. This is useful for tests that disable the span\n",
"\t// configuration infrastructure, as the new protected timestamp subsystem is\n",
"\t// built on top of it.\n",
"\t// TODO(arul): Once we've fully migrated all tests to use span configurations\n",
"\t// we should be able to get rid of this testing knob as well.\n",
"\tDisableNewProtectedTimestampSubsystemCheck bool\n",
"}\n",
"\n",
"// ModuleTestingKnobs is part of the base.ModuleTestingKnobs interface.\n",
"func (*GCJobTestingKnobs) ModuleTestingKnobs() {}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/schema_changer.go",
"type": "replace",
"edit_start_line_idx": 1000
} | # OpenSSL CA configuration file
# Copied from https://www.cockroachlabs.com/docs/stable/create-security-certificates-openssl.html#step-1-create-the-ca-key-and-certificate-pair
# Added authorityInfoAccess line.
[ ca ]
default_ca = CA_default
[ CA_default ]
default_days = 365
database = index.txt
serial = serial.txt
default_md = sha256
copy_extensions = copy
unique_subject = no
# Used to create the CA certificate.
[ req ]
prompt=no
distinguished_name = distinguished_name
x509_extensions = extensions
[ distinguished_name ]
organizationName = Cockroach
commonName = Cockroach CA
[ extensions ]
keyUsage = critical,digitalSignature,nonRepudiation,keyEncipherment,keyCertSign
basicConstraints = critical,CA:true,pathlen:1
# Common policy for nodes and users.
[ signing_policy ]
organizationName = supplied
commonName = optional
# Used to sign node certificates.
[ signing_node_req ]
keyUsage = critical,digitalSignature,keyEncipherment
extendedKeyUsage = serverAuth,clientAuth
# Used to sign client certificates.
[ signing_client_req ]
keyUsage = critical,digitalSignature,keyEncipherment
extendedKeyUsage = clientAuth
authorityInfoAccess = OCSP;URI:http://127.0.0.1:1234
| pkg/cli/interactive_tests/ocsp_ca.cnf | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.00017385170212946832,
0.0001724959147395566,
0.00017028544971253723,
0.00017355024465359747,
0.0000014964890624469263
] |
{
"id": 5,
"code_window": [
"\tRunBeforePerformGC func(jobID jobspb.JobID) error\n",
"\t// RunAfterIsProtectedCheck is called after a successfully checking the\n",
"\t// protected timestamp status of a table or an index. The protection status is\n",
"\t// passed in along with the jobID.\n",
"\tRunAfterIsProtectedCheck func(jobID jobspb.JobID, isProtected bool)\n",
"\t// DisableNewProtectedTimestampSubsystemCheck disables checking the new\n",
"\t// protected timestamp subsystem when checking the protection status of a\n",
"\t// table or an index. This is useful for tests that disable the span\n",
"\t// configuration infrastructure, as the new protected timestamp subsystem is\n",
"\t// built on top of it.\n",
"\t// TODO(arul): Once we've fully migrated all tests to use span configurations\n",
"\t// we should be able to get rid of this testing knob as well.\n",
"\tDisableNewProtectedTimestampSubsystemCheck bool\n",
"}\n",
"\n",
"// ModuleTestingKnobs is part of the base.ModuleTestingKnobs interface.\n",
"func (*GCJobTestingKnobs) ModuleTestingKnobs() {}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/schema_changer.go",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright 2018 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
import React from "react";
import * as PathMath from "src/views/clusterviz/util/pathmath";
import {
BACKGROUND_BLUE,
DARK_BLUE,
LIGHT_TEXT_BLUE,
MAIN_BLUE,
} from "src/views/shared/colors";
import { Bytes } from "src/util/format";
import {
NodeArcPercentageTooltip,
NodeArcUsedCapacityTooltip,
NodeArcTotalCapacityTooltip,
} from "src/views/clusterviz/components/nodeOrLocality/tooltips";
const ARC_INNER_RADIUS = 56;
const ARC_WIDTH = 6;
const ARC_OUTER_RADIUS = ARC_INNER_RADIUS + ARC_WIDTH;
interface CapacityArcProps {
usedCapacity: number;
usableCapacity: number;
nodeLabel?: string;
localityLabel?: string;
}
export class CapacityArc extends React.Component<CapacityArcProps> {
render() {
// Compute used percentage.
const usedCapacity = this.props.usedCapacity;
const capacity = this.props.usableCapacity;
const capacityUsedPct = capacity ? (usedCapacity / capacity) * 100 : 0;
return (
<g>
<g transform="translate(90 115)">
{/* background arc */}
<path
fill={BACKGROUND_BLUE}
strokeLinecap="round"
d={PathMath.createArcPath(
ARC_INNER_RADIUS,
ARC_OUTER_RADIUS,
PathMath.arcAngleFromPct(0),
PathMath.arcAngleFromPct(1),
ARC_WIDTH,
)}
/>
{/* current value arc */}
<path
fill={MAIN_BLUE}
strokeLinecap="round"
d={PathMath.createArcPath(
ARC_INNER_RADIUS,
ARC_OUTER_RADIUS,
PathMath.arcAngleFromPct(0),
PathMath.arcAngleFromPct(capacityUsedPct / 100),
ARC_WIDTH,
)}
/>
</g>
{/* text inside arc */}
<text
fill={MAIN_BLUE}
fontFamily="Lato-Bold, Lato"
fontSize="34"
fontWeight="bold"
textAnchor="middle"
x="90"
y="110"
>
{Math.round(capacityUsedPct)}%
</text>
<NodeArcPercentageTooltip {...this.props}>
<text
fill={DARK_BLUE}
fontFamily="Lato-Bold, Lato"
fontSize="12"
fontWeight="bold"
letterSpacing="1.333"
textAnchor="middle"
x="90"
y="132"
>
CAPACITY
</text>
</NodeArcPercentageTooltip>
{/* labels at ends of arc */}
<NodeArcUsedCapacityTooltip {...this.props}>
<text fill={MAIN_BLUE} x="17" y="156" textAnchor="center">
{Bytes(usedCapacity)}
</text>
</NodeArcUsedCapacityTooltip>
<NodeArcTotalCapacityTooltip {...this.props}>
<text fill={LIGHT_TEXT_BLUE} x="118" y="156" textAnchor="center">
{Bytes(capacity)}
</text>
</NodeArcTotalCapacityTooltip>
</g>
);
}
}
| pkg/ui/workspaces/db-console/ccl/src/views/clusterviz/components/nodeOrLocality/capacityArc.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.0001774565753294155,
0.00017515294894110411,
0.00016658476670272648,
0.00017612992087379098,
0.0000027873793442267925
] |
{
"id": 5,
"code_window": [
"\tRunBeforePerformGC func(jobID jobspb.JobID) error\n",
"\t// RunAfterIsProtectedCheck is called after a successfully checking the\n",
"\t// protected timestamp status of a table or an index. The protection status is\n",
"\t// passed in along with the jobID.\n",
"\tRunAfterIsProtectedCheck func(jobID jobspb.JobID, isProtected bool)\n",
"\t// DisableNewProtectedTimestampSubsystemCheck disables checking the new\n",
"\t// protected timestamp subsystem when checking the protection status of a\n",
"\t// table or an index. This is useful for tests that disable the span\n",
"\t// configuration infrastructure, as the new protected timestamp subsystem is\n",
"\t// built on top of it.\n",
"\t// TODO(arul): Once we've fully migrated all tests to use span configurations\n",
"\t// we should be able to get rid of this testing knob as well.\n",
"\tDisableNewProtectedTimestampSubsystemCheck bool\n",
"}\n",
"\n",
"// ModuleTestingKnobs is part of the base.ModuleTestingKnobs interface.\n",
"func (*GCJobTestingKnobs) ModuleTestingKnobs() {}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/schema_changer.go",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package testmodel
import (
"fmt"
"math"
"github.com/cockroachdb/cockroach/pkg/ts/tspb"
)
type aggFunc func(DataSeries) float64
type fillFunc func(DataSeries, DataSeries, int64) DataSeries
// AggregateSum returns the sum value of all points in the provided data series.
func AggregateSum(data DataSeries) float64 {
total := 0.0
for _, dp := range data {
total += dp.Value
}
return total
}
// AggregateAverage returns the average value of the points in the provided data
// series.
func AggregateAverage(data DataSeries) float64 {
if len(data) == 0 {
return 0.0
}
return AggregateSum(data) / float64(len(data))
}
// AggregateMax returns the maximum value of any point in the provided data
// series.
func AggregateMax(data DataSeries) float64 {
max := -math.MaxFloat64
for _, dp := range data {
if dp.Value > max {
max = dp.Value
}
}
return max
}
// AggregateMin returns the minimum value of any point in the provided data
// series.
func AggregateMin(data DataSeries) float64 {
min := math.MaxFloat64
for _, dp := range data {
if dp.Value < min {
min = dp.Value
}
}
return min
}
// AggregateFirst returns the first value in the provided data series.
func AggregateFirst(data DataSeries) float64 {
return data[0].Value
}
// AggregateLast returns the last value in the provided data series.
func AggregateLast(data DataSeries) float64 {
return data[len(data)-1].Value
}
// AggregateVariance returns the variance of the provided data series. The returned
// variance is the sample variance, not the population variance.
func AggregateVariance(data DataSeries) float64 {
mean := 0.0
meanSquaredDist := 0.0
if len(data) < 2 {
return 0
}
for i, dp := range data {
// Welford's algorithm for computing variance.
delta := dp.Value - mean
mean += delta / float64(i+1)
delta2 := dp.Value - mean
meanSquaredDist += delta * delta2
}
return meanSquaredDist / float64(len(data))
}
// getAggFunction is a convenience method used to process an aggregator option
// from our time series query protobuffer format.
func getAggFunction(agg tspb.TimeSeriesQueryAggregator) aggFunc {
switch agg {
case tspb.TimeSeriesQueryAggregator_AVG:
return AggregateAverage
case tspb.TimeSeriesQueryAggregator_SUM:
return AggregateSum
case tspb.TimeSeriesQueryAggregator_MAX:
return AggregateMax
case tspb.TimeSeriesQueryAggregator_MIN:
return AggregateMin
case tspb.TimeSeriesQueryAggregator_FIRST:
return AggregateFirst
case tspb.TimeSeriesQueryAggregator_LAST:
return AggregateLast
case tspb.TimeSeriesQueryAggregator_VARIANCE:
return AggregateVariance
}
// The model should not be called with an invalid aggregator option.
panic(fmt.Sprintf("unknown aggregator option specified: %v", agg))
}
func fillFuncLinearInterpolate(before DataSeries, after DataSeries, resolution int64) DataSeries {
start := before[len(before)-1]
end := after[0]
// compute interpolation step
step := (end.Value - start.Value) / float64(end.TimestampNanos-start.TimestampNanos)
result := make(DataSeries, (end.TimestampNanos-start.TimestampNanos)/resolution-1)
for i := range result {
result[i] = dp(
start.TimestampNanos+(resolution*int64(i+1)),
start.Value+(step*float64(i+1)*float64(resolution)),
)
}
return result
}
| pkg/ts/testmodel/functions.go | 0 | https://github.com/cockroachdb/cockroach/commit/f475361ee075d1746bcd148e6da004fa20dece51 | [
0.00017841618682723492,
0.0001702054141787812,
0.00016359412984456867,
0.00017020429368130863,
0.00000384990880775149
] |
{
"id": 0,
"code_window": [
"import (\n",
"\t\"fmt\"\n",
"\t\"testing\"\n",
"\n",
"\t\"github.com/hashicorp/terraform/helper/resource\"\n",
"\t\"github.com/hashicorp/terraform/terraform\"\n",
")\n",
"\n",
"func TestAccAzureDnsServerBasic(t *testing.T) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/Azure/azure-sdk-for-go/management\"\n"
],
"file_path": "builtin/providers/azure/resource_azure_dns_server_test.go",
"type": "add",
"edit_start_line_idx": 6
} | package azure
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAzureDnsServerBasic(t *testing.T) {
name := "azure_dns_server.foo"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAzureDnsServerDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureDnsServerBasic,
Check: resource.ComposeTestCheckFunc(
testAccCheckAzureDnsServerExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-dns-server"),
resource.TestCheckResourceAttr(name, "dns_address", "8.8.8.8"),
),
},
},
})
}
func TestAccAzureDnsServerUpdate(t *testing.T) {
name := "azure_dns_server.foo"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAzureDnsServerDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureDnsServerBasic,
Check: resource.ComposeTestCheckFunc(
testAccCheckAzureDnsServerExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-dns-server"),
resource.TestCheckResourceAttr(name, "dns_address", "8.8.8.8"),
),
},
resource.TestStep{
Config: testAccAzureDnsServerUpdate,
Check: resource.ComposeTestCheckFunc(
testAccCheckAzureDnsServerExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-dns-server"),
resource.TestCheckResourceAttr(name, "dns_address", "8.8.4.4"),
),
},
},
})
}
func testAccCheckAzureDnsServerExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
resource, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Resource not found: %s", name)
}
if resource.Primary.ID == "" {
return fmt.Errorf("No DNS Server ID set.")
}
vnetClient := testAccProvider.Meta().(*Client).vnetClient
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return fmt.Errorf("Failed fetching networking configuration: %s", err)
}
for _, dns := range netConf.Configuration.DNS.DNSServers {
if dns.Name == resource.Primary.ID {
return nil
}
}
return fmt.Errorf("Azure DNS Server not found.")
}
}
func testAccCheckAzureDnsServerDestroy(s *terraform.State) error {
vnetClient := testAccProvider.Meta().(*Client).vnetClient
for _, resource := range s.RootModule().Resources {
if resource.Type != "azure_dns_server" {
continue
}
if resource.Primary.ID == "" {
return fmt.Errorf("No DNS Server ID is set.")
}
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return fmt.Errorf("Error retrieving networking configuration from Azure: %s", err)
}
for _, dns := range netConf.Configuration.DNS.DNSServers {
if dns.Name == resource.Primary.ID {
return fmt.Errorf("Azure DNS Server still exists.")
}
}
}
return nil
}
const testAccAzureDnsServerBasic = `
resource "azure_dns_server" "foo" {
name = "terraform-dns-server"
dns_address = "8.8.8.8"
}
`
const testAccAzureDnsServerUpdate = `
resource "azure_dns_server" "foo" {
name = "terraform-dns-server"
dns_address = "8.8.4.4"
}
`
| builtin/providers/azure/resource_azure_dns_server_test.go | 1 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.9990864992141724,
0.47699424624443054,
0.0001586021826369688,
0.22417229413986206,
0.4741254448890686
] |
{
"id": 0,
"code_window": [
"import (\n",
"\t\"fmt\"\n",
"\t\"testing\"\n",
"\n",
"\t\"github.com/hashicorp/terraform/helper/resource\"\n",
"\t\"github.com/hashicorp/terraform/terraform\"\n",
")\n",
"\n",
"func TestAccAzureDnsServerBasic(t *testing.T) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/Azure/azure-sdk-for-go/management\"\n"
],
"file_path": "builtin/providers/azure/resource_azure_dns_server_test.go",
"type": "add",
"edit_start_line_idx": 6
} | package config
import (
"reflect"
"testing"
"github.com/hashicorp/terraform/config/lang"
)
func TestNewInterpolatedVariable(t *testing.T) {
cases := []struct {
Input string
Result InterpolatedVariable
Error bool
}{
{
"var.foo",
&UserVariable{
Name: "foo",
key: "var.foo",
},
false,
},
{
"module.foo.bar",
&ModuleVariable{
Name: "foo",
Field: "bar",
key: "module.foo.bar",
},
false,
},
{
"count.index",
&CountVariable{
Type: CountValueIndex,
key: "count.index",
},
false,
},
{
"count.nope",
&CountVariable{
Type: CountValueInvalid,
key: "count.nope",
},
false,
},
{
"path.module",
&PathVariable{
Type: PathValueModule,
key: "path.module",
},
false,
},
{
"self.address",
&SelfVariable{
Field: "address",
key: "self.address",
},
false,
},
}
for i, tc := range cases {
actual, err := NewInterpolatedVariable(tc.Input)
if err != nil != tc.Error {
t.Fatalf("%d. Error: %s", i, err)
}
if !reflect.DeepEqual(actual, tc.Result) {
t.Fatalf("%d bad: %#v", i, actual)
}
}
}
func TestNewResourceVariable(t *testing.T) {
v, err := NewResourceVariable("foo.bar.baz")
if err != nil {
t.Fatalf("err: %s", err)
}
if v.Type != "foo" {
t.Fatalf("bad: %#v", v)
}
if v.Name != "bar" {
t.Fatalf("bad: %#v", v)
}
if v.Field != "baz" {
t.Fatalf("bad: %#v", v)
}
if v.Multi {
t.Fatal("should not be multi")
}
if v.FullKey() != "foo.bar.baz" {
t.Fatalf("bad: %#v", v)
}
}
func TestNewUserVariable(t *testing.T) {
v, err := NewUserVariable("var.bar")
if err != nil {
t.Fatalf("err: %s", err)
}
if v.Name != "bar" {
t.Fatalf("bad: %#v", v.Name)
}
if v.FullKey() != "var.bar" {
t.Fatalf("bad: %#v", v)
}
}
func TestNewUserVariable_map(t *testing.T) {
v, err := NewUserVariable("var.bar.baz")
if err != nil {
t.Fatalf("err: %s", err)
}
if v.Name != "bar" {
t.Fatalf("bad: %#v", v.Name)
}
if v.Elem != "baz" {
t.Fatalf("bad: %#v", v.Elem)
}
if v.FullKey() != "var.bar.baz" {
t.Fatalf("bad: %#v", v)
}
}
func TestResourceVariable_impl(t *testing.T) {
var _ InterpolatedVariable = new(ResourceVariable)
}
func TestResourceVariable_Multi(t *testing.T) {
v, err := NewResourceVariable("foo.bar.*.baz")
if err != nil {
t.Fatalf("err: %s", err)
}
if v.Type != "foo" {
t.Fatalf("bad: %#v", v)
}
if v.Name != "bar" {
t.Fatalf("bad: %#v", v)
}
if v.Field != "baz" {
t.Fatalf("bad: %#v", v)
}
if !v.Multi {
t.Fatal("should be multi")
}
}
func TestResourceVariable_MultiIndex(t *testing.T) {
cases := []struct {
Input string
Index int
Field string
}{
{"foo.bar.*.baz", -1, "baz"},
{"foo.bar.0.baz", 0, "baz"},
{"foo.bar.5.baz", 5, "baz"},
}
for _, tc := range cases {
v, err := NewResourceVariable(tc.Input)
if err != nil {
t.Fatalf("err: %s", err)
}
if !v.Multi {
t.Fatalf("should be multi: %s", tc.Input)
}
if v.Index != tc.Index {
t.Fatalf("bad: %d\n\n%s", v.Index, tc.Input)
}
if v.Field != tc.Field {
t.Fatalf("bad: %s\n\n%s", v.Field, tc.Input)
}
}
}
func TestUserVariable_impl(t *testing.T) {
var _ InterpolatedVariable = new(UserVariable)
}
func TestDetectVariables(t *testing.T) {
cases := []struct {
Input string
Result []InterpolatedVariable
}{
{
"foo $${var.foo}",
nil,
},
{
"foo ${var.foo}",
[]InterpolatedVariable{
&UserVariable{
Name: "foo",
key: "var.foo",
},
},
},
{
"foo ${var.foo} ${var.bar}",
[]InterpolatedVariable{
&UserVariable{
Name: "foo",
key: "var.foo",
},
&UserVariable{
Name: "bar",
key: "var.bar",
},
},
},
}
for _, tc := range cases {
ast, err := lang.Parse(tc.Input)
if err != nil {
t.Fatalf("%s\n\nInput: %s", err, tc.Input)
}
actual, err := DetectVariables(ast)
if err != nil {
t.Fatalf("err: %s", err)
}
if !reflect.DeepEqual(actual, tc.Result) {
t.Fatalf("bad: %#v\n\nInput: %s", actual, tc.Input)
}
}
}
| config/interpolate_test.go | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.0033747386187314987,
0.0003169616684317589,
0.00016400989261455834,
0.00017712879343889654,
0.0006380646373145282
] |
{
"id": 0,
"code_window": [
"import (\n",
"\t\"fmt\"\n",
"\t\"testing\"\n",
"\n",
"\t\"github.com/hashicorp/terraform/helper/resource\"\n",
"\t\"github.com/hashicorp/terraform/terraform\"\n",
")\n",
"\n",
"func TestAccAzureDnsServerBasic(t *testing.T) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/Azure/azure-sdk-for-go/management\"\n"
],
"file_path": "builtin/providers/azure/resource_azure_dns_server_test.go",
"type": "add",
"edit_start_line_idx": 6
} | ---
layout: "google"
page_title: "Google: google_pubsub_topic"
sidebar_current: "docs-google-pubsub-topic"
description: |-
Creates a topic in Google's pubsub queueing system
---
# google\_pubsub\_topic
Creates a topic in Google's pubsub queueing system. For more information see
[the official documentation](https://cloud.google.com/pubsub/docs) and
[API](https://cloud.google.com/pubsub/reference/rest/v1/projects.topics).
## Example Usage
```
resource "google_pubsub_topic" "default" {
name = "default-topic"
}
```
## Argument Reference
The following arguments are supported:
* `name` - (Required) A unique name for the resource, required by pubsub.
Changing this forces a new resource to be created.
## Attributes Reference
The following attributes are exported:
* `name` - The name of the resource.
| website/source/docs/providers/google/r/pubsub_topic.html.markdown | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.00017548937466926873,
0.00016908184625208378,
0.00016249807958956808,
0.00016916997265070677,
0.000004999028988095233
] |
{
"id": 0,
"code_window": [
"import (\n",
"\t\"fmt\"\n",
"\t\"testing\"\n",
"\n",
"\t\"github.com/hashicorp/terraform/helper/resource\"\n",
"\t\"github.com/hashicorp/terraform/terraform\"\n",
")\n",
"\n",
"func TestAccAzureDnsServerBasic(t *testing.T) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/Azure/azure-sdk-for-go/management\"\n"
],
"file_path": "builtin/providers/azure/resource_azure_dns_server_test.go",
"type": "add",
"edit_start_line_idx": 6
} | resource "aws_instance" "foo" {}
resource "aws_instance" "web" {
count = "${aws_instance.foo.bar}"
}
| config/test-fixtures/validate-count-resource-var/main.tf | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.00017619911523070186,
0.00017619911523070186,
0.00017619911523070186,
0.00017619911523070186,
0
] |
{
"id": 1,
"code_window": [
"\n",
"\t\tnetConf, err := vnetClient.GetVirtualNetworkConfiguration()\n",
"\t\tif err != nil {\n",
"\t\t\treturn fmt.Errorf(\"Error retrieving networking configuration from Azure: %s\", err)\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// This is desirable - if there is no network config there can't be any DNS Servers\n",
"\t\t\tif management.IsResourceNotFoundError(err) {\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"file_path": "builtin/providers/azure/resource_azure_dns_server_test.go",
"type": "add",
"edit_start_line_idx": 100
} | package azure
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAzureLocalNetworkConnectionBasic(t *testing.T) {
name := "azure_local_network_connection.foo"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccAzureLocalNetworkConnectionDestroyed,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureLocalNetworkConnectionBasic,
Check: resource.ComposeTestCheckFunc(
testAccAzureLocalNetworkConnectionExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"),
resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.13"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.0/31"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.1/31"),
),
},
},
})
}
func TestAccAzureLocalNetworkConnectionUpdate(t *testing.T) {
name := "azure_local_network_connection.foo"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccAzureLocalNetworkConnectionDestroyed,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureLocalNetworkConnectionBasic,
Check: resource.ComposeTestCheckFunc(
testAccAzureLocalNetworkConnectionExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"),
resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.13"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.0/31"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.1/31"),
),
},
resource.TestStep{
Config: testAccAzureLocalNetworkConnectionUpdate,
Check: resource.ComposeTestCheckFunc(
testAccAzureLocalNetworkConnectionExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"),
resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.14"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.2/30"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.3/30"),
),
},
},
})
}
// testAccAzureLocalNetworkConnectionExists checks whether the given local network
// connection exists on Azure.
func testAccAzureLocalNetworkConnectionExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
resource, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Azure Local Network Connection not found: %s", name)
}
if resource.Primary.ID == "" {
return fmt.Errorf("Azure Local Network Connection ID not set.")
}
vnetClient := testAccProvider.Meta().(*Client).vnetClient
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return err
}
for _, lnet := range netConf.Configuration.LocalNetworkSites {
if lnet.Name == resource.Primary.ID {
return nil
}
break
}
return fmt.Errorf("Local Network Connection not found: %s", name)
}
}
// testAccAzureLocalNetworkConnectionDestroyed checks whether the local network
// connection has been destroyed on Azure or not.
func testAccAzureLocalNetworkConnectionDestroyed(s *terraform.State) error {
vnetClient := testAccProvider.Meta().(*Client).vnetClient
for _, resource := range s.RootModule().Resources {
if resource.Type != "azure_local_network_connection" {
continue
}
if resource.Primary.ID == "" {
return fmt.Errorf("Azure Local Network Connection ID not set.")
}
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return err
}
for _, lnet := range netConf.Configuration.LocalNetworkSites {
if lnet.Name == resource.Primary.ID {
return fmt.Errorf("Azure Local Network Connection still exists.")
}
}
}
return nil
}
const testAccAzureLocalNetworkConnectionBasic = `
resource "azure_local_network_connection" "foo" {
name = "terraform-local-network-connection"
vpn_gateway_address = "10.11.12.13"
address_space_prefixes = ["10.10.10.0/31", "10.10.10.1/31"]
}
`
const testAccAzureLocalNetworkConnectionUpdate = `
resource "azure_local_network_connection" "foo" {
name = "terraform-local-network-connection"
vpn_gateway_address = "10.11.12.14"
address_space_prefixes = ["10.10.10.2/30", "10.10.10.3/30"]
}
`
| builtin/providers/azure/resource_azure_local_network_test.go | 1 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.9988338351249695,
0.28638067841529846,
0.00016260409029200673,
0.001108671072870493,
0.4499514102935791
] |
{
"id": 1,
"code_window": [
"\n",
"\t\tnetConf, err := vnetClient.GetVirtualNetworkConfiguration()\n",
"\t\tif err != nil {\n",
"\t\t\treturn fmt.Errorf(\"Error retrieving networking configuration from Azure: %s\", err)\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// This is desirable - if there is no network config there can't be any DNS Servers\n",
"\t\t\tif management.IsResourceNotFoundError(err) {\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"file_path": "builtin/providers/azure/resource_azure_dns_server_test.go",
"type": "add",
"edit_start_line_idx": 100
} | package rundeck
import (
"github.com/hashicorp/terraform/helper/schema"
"github.com/apparentlymart/go-rundeck-api/rundeck"
)
func resourceRundeckPublicKey() *schema.Resource {
return &schema.Resource{
Create: CreatePublicKey,
Update: UpdatePublicKey,
Delete: DeletePublicKey,
Exists: PublicKeyExists,
Read: ReadPublicKey,
Schema: map[string]*schema.Schema{
"path": &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: "Path to the key within the key store",
ForceNew: true,
},
"key_material": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "The public key data to store, in the usual OpenSSH public key file format",
},
"url": &schema.Schema{
Type: schema.TypeString,
Computed: true,
Description: "URL at which the key content can be retrieved",
},
"delete": &schema.Schema{
Type: schema.TypeBool,
Computed: true,
Description: "True if the key should be deleted when the resource is deleted. Defaults to true if key_material is provided in the configuration.",
},
},
}
}
func CreatePublicKey(d *schema.ResourceData, meta interface{}) error {
client := meta.(*rundeck.Client)
path := d.Get("path").(string)
keyMaterial := d.Get("key_material").(string)
if keyMaterial != "" {
err := client.CreatePublicKey(path, keyMaterial)
if err != nil {
return err
}
d.Set("delete", true)
}
d.SetId(path)
return ReadPublicKey(d, meta)
}
func UpdatePublicKey(d *schema.ResourceData, meta interface{}) error {
client := meta.(*rundeck.Client)
if d.HasChange("key_material") {
path := d.Get("path").(string)
keyMaterial := d.Get("key_material").(string)
err := client.ReplacePublicKey(path, keyMaterial)
if err != nil {
return err
}
}
return ReadPublicKey(d, meta)
}
func DeletePublicKey(d *schema.ResourceData, meta interface{}) error {
client := meta.(*rundeck.Client)
path := d.Id()
// Since this resource can be used both to create and to read existing
// public keys, we'll only actually delete the key if we remember that
// we created the key in the first place, or if the user explicitly
// opted in to have an existing key deleted.
if d.Get("delete").(bool) {
// The only "delete" call we have is oblivious to key type, but
// that's okay since our Exists implementation makes sure that we
// won't try to delete a key of the wrong type since we'll pretend
// that it's already been deleted.
err := client.DeleteKey(path)
if err != nil {
return err
}
}
d.SetId("")
return nil
}
func ReadPublicKey(d *schema.ResourceData, meta interface{}) error {
client := meta.(*rundeck.Client)
path := d.Id()
key, err := client.GetKeyMeta(path)
if err != nil {
return err
}
keyMaterial, err := client.GetKeyContent(path)
if err != nil {
return err
}
d.Set("key_material", keyMaterial)
d.Set("url", key.URL)
return nil
}
func PublicKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) {
client := meta.(*rundeck.Client)
path := d.Id()
key, err := client.GetKeyMeta(path)
if err != nil {
if _, ok := err.(rundeck.NotFoundError); ok {
err = nil
}
return false, err
}
if key.KeyType != "public" {
// If the key type isn't public then as far as this resource is
// concerned it doesn't exist. (We'll fail properly when we try to
// create a key where one already exists.)
return false, nil
}
return true, nil
}
| builtin/providers/rundeck/resource_public_key.go | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.00017237734573427588,
0.00016983035311568528,
0.00016745575703680515,
0.0001698338455753401,
0.0000014021212564330199
] |
{
"id": 1,
"code_window": [
"\n",
"\t\tnetConf, err := vnetClient.GetVirtualNetworkConfiguration()\n",
"\t\tif err != nil {\n",
"\t\t\treturn fmt.Errorf(\"Error retrieving networking configuration from Azure: %s\", err)\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// This is desirable - if there is no network config there can't be any DNS Servers\n",
"\t\t\tif management.IsResourceNotFoundError(err) {\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"file_path": "builtin/providers/azure/resource_azure_dns_server_test.go",
"type": "add",
"edit_start_line_idx": 100
} | package consul
import (
"log"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/mapstructure"
)
// Provider returns a terraform.ResourceProvider.
func Provider() terraform.ResourceProvider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
"datacenter": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"address": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"scheme": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
ResourcesMap: map[string]*schema.Resource{
"consul_keys": resourceConsulKeys(),
},
ConfigureFunc: providerConfigure,
}
}
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
var config Config
configRaw := d.Get("").(map[string]interface{})
if err := mapstructure.Decode(configRaw, &config); err != nil {
return nil, err
}
log.Printf("[INFO] Initializing Consul client")
return config.Client()
}
| builtin/providers/consul/resource_provider.go | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.00017485933494754136,
0.00017031510651577264,
0.00016765577311161906,
0.0001691658835625276,
0.000002759461722234846
] |
{
"id": 1,
"code_window": [
"\n",
"\t\tnetConf, err := vnetClient.GetVirtualNetworkConfiguration()\n",
"\t\tif err != nil {\n",
"\t\t\treturn fmt.Errorf(\"Error retrieving networking configuration from Azure: %s\", err)\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// This is desirable - if there is no network config there can't be any DNS Servers\n",
"\t\t\tif management.IsResourceNotFoundError(err) {\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"file_path": "builtin/providers/azure/resource_azure_dns_server_test.go",
"type": "add",
"edit_start_line_idx": 100
} | variable "things" {}
resource "aws_instance" "bar" {
baz = "baz"
count = 2
}
resource "aws_instance" "foo" {
foo = "${join(",",aws_instance.bar.*.baz)}"
}
| terraform/test-fixtures/plan-module-multi-var/child/main.tf | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.00017867001588456333,
0.0001753442920744419,
0.00017201855371240526,
0.0001753442920744419,
0.0000033257310860790312
] |
{
"id": 2,
"code_window": [
"import (\n",
"\t\"fmt\"\n",
"\t\"testing\"\n",
"\n",
"\t\"github.com/hashicorp/terraform/helper/resource\"\n",
"\t\"github.com/hashicorp/terraform/terraform\"\n",
")\n",
"\n",
"func TestAccAzureLocalNetworkConnectionBasic(t *testing.T) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/Azure/azure-sdk-for-go/management\"\n"
],
"file_path": "builtin/providers/azure/resource_azure_local_network_test.go",
"type": "add",
"edit_start_line_idx": 6
} | package azure
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAzureLocalNetworkConnectionBasic(t *testing.T) {
name := "azure_local_network_connection.foo"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccAzureLocalNetworkConnectionDestroyed,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureLocalNetworkConnectionBasic,
Check: resource.ComposeTestCheckFunc(
testAccAzureLocalNetworkConnectionExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"),
resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.13"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.0/31"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.1/31"),
),
},
},
})
}
func TestAccAzureLocalNetworkConnectionUpdate(t *testing.T) {
name := "azure_local_network_connection.foo"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccAzureLocalNetworkConnectionDestroyed,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureLocalNetworkConnectionBasic,
Check: resource.ComposeTestCheckFunc(
testAccAzureLocalNetworkConnectionExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"),
resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.13"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.0/31"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.1/31"),
),
},
resource.TestStep{
Config: testAccAzureLocalNetworkConnectionUpdate,
Check: resource.ComposeTestCheckFunc(
testAccAzureLocalNetworkConnectionExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"),
resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.14"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.2/30"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.3/30"),
),
},
},
})
}
// testAccAzureLocalNetworkConnectionExists checks whether the given local network
// connection exists on Azure.
func testAccAzureLocalNetworkConnectionExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
resource, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Azure Local Network Connection not found: %s", name)
}
if resource.Primary.ID == "" {
return fmt.Errorf("Azure Local Network Connection ID not set.")
}
vnetClient := testAccProvider.Meta().(*Client).vnetClient
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return err
}
for _, lnet := range netConf.Configuration.LocalNetworkSites {
if lnet.Name == resource.Primary.ID {
return nil
}
break
}
return fmt.Errorf("Local Network Connection not found: %s", name)
}
}
// testAccAzureLocalNetworkConnectionDestroyed checks whether the local network
// connection has been destroyed on Azure or not.
func testAccAzureLocalNetworkConnectionDestroyed(s *terraform.State) error {
vnetClient := testAccProvider.Meta().(*Client).vnetClient
for _, resource := range s.RootModule().Resources {
if resource.Type != "azure_local_network_connection" {
continue
}
if resource.Primary.ID == "" {
return fmt.Errorf("Azure Local Network Connection ID not set.")
}
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return err
}
for _, lnet := range netConf.Configuration.LocalNetworkSites {
if lnet.Name == resource.Primary.ID {
return fmt.Errorf("Azure Local Network Connection still exists.")
}
}
}
return nil
}
const testAccAzureLocalNetworkConnectionBasic = `
resource "azure_local_network_connection" "foo" {
name = "terraform-local-network-connection"
vpn_gateway_address = "10.11.12.13"
address_space_prefixes = ["10.10.10.0/31", "10.10.10.1/31"]
}
`
const testAccAzureLocalNetworkConnectionUpdate = `
resource "azure_local_network_connection" "foo" {
name = "terraform-local-network-connection"
vpn_gateway_address = "10.11.12.14"
address_space_prefixes = ["10.10.10.2/30", "10.10.10.3/30"]
}
`
| builtin/providers/azure/resource_azure_local_network_test.go | 1 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.9992656111717224,
0.4266607463359833,
0.00018925435142591596,
0.25136590003967285,
0.44140395522117615
] |
{
"id": 2,
"code_window": [
"import (\n",
"\t\"fmt\"\n",
"\t\"testing\"\n",
"\n",
"\t\"github.com/hashicorp/terraform/helper/resource\"\n",
"\t\"github.com/hashicorp/terraform/terraform\"\n",
")\n",
"\n",
"func TestAccAzureLocalNetworkConnectionBasic(t *testing.T) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/Azure/azure-sdk-for-go/management\"\n"
],
"file_path": "builtin/providers/azure/resource_azure_local_network_test.go",
"type": "add",
"edit_start_line_idx": 6
} | package azure
import (
"fmt"
"log"
"github.com/Azure/azure-sdk-for-go/management"
"github.com/Azure/azure-sdk-for-go/management/virtualnetwork"
"github.com/hashicorp/terraform/helper/schema"
)
// resourceAzureDnsServer returns the *schema.Resource associated
// to an Azure hosted service.
func resourceAzureDnsServer() *schema.Resource {
return &schema.Resource{
Create: resourceAzureDnsServerCreate,
Read: resourceAzureDnsServerRead,
Update: resourceAzureDnsServerUpdate,
Exists: resourceAzureDnsServerExists,
Delete: resourceAzureDnsServerDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Required: true,
Description: parameterDescriptions["name"],
},
"dns_address": &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: parameterDescriptions["dns_address"],
},
},
}
}
// resourceAzureDnsServerCreate does all the necessary API calls
// to create a new DNS server definition on Azure.
func resourceAzureDnsServerCreate(d *schema.ResourceData, meta interface{}) error {
azureClient := meta.(*Client)
mgmtClient := azureClient.mgmtClient
vnetClient := azureClient.vnetClient
log.Println("[INFO] Fetching current network configuration from Azure.")
azureClient.vnetMutex.Lock()
defer azureClient.vnetMutex.Unlock()
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
if management.IsResourceNotFoundError(err) {
// if no network configuration exists yet; create one now:
netConf = virtualnetwork.NetworkConfiguration{}
} else {
return fmt.Errorf("Failed to get the current network configuration from Azure: %s", err)
}
}
log.Println("[DEBUG] Adding new DNS server definition to Azure.")
name := d.Get("name").(string)
address := d.Get("dns_address").(string)
netConf.Configuration.DNS.DNSServers = append(
netConf.Configuration.DNS.DNSServers,
virtualnetwork.DNSServer{
Name: name,
IPAddress: address,
})
// send the configuration back to Azure:
log.Println("[INFO] Sending updated network configuration back to Azure.")
reqID, err := vnetClient.SetVirtualNetworkConfiguration(netConf)
if err != nil {
return fmt.Errorf("Failed issuing update to network configuration: %s", err)
}
err = mgmtClient.WaitForOperation(reqID, nil)
if err != nil {
return fmt.Errorf("Error setting network configuration: %s", err)
}
d.SetId(name)
return nil
}
// resourceAzureDnsServerRead does all the necessary API calls to read
// the state of the DNS server off Azure.
func resourceAzureDnsServerRead(d *schema.ResourceData, meta interface{}) error {
vnetClient := meta.(*Client).vnetClient
log.Println("[INFO] Fetching current network configuration from Azure.")
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return fmt.Errorf("Failed to get the current network configuration from Azure: %s", err)
}
var found bool
name := d.Get("name").(string)
// search for our DNS and update it if the IP has been changed:
for _, dns := range netConf.Configuration.DNS.DNSServers {
if dns.Name == name {
found = true
d.Set("dns_address", dns.IPAddress)
break
}
}
// remove the resource from the state if it has been deleted in the meantime:
if !found {
d.SetId("")
}
return nil
}
// resourceAzureDnsServerUpdate does all the necessary API calls
// to update the DNS definition on Azure.
func resourceAzureDnsServerUpdate(d *schema.ResourceData, meta interface{}) error {
azureClient := meta.(*Client)
mgmtClient := azureClient.mgmtClient
vnetClient := azureClient.vnetClient
var found bool
name := d.Get("name").(string)
if d.HasChange("dns_address") {
log.Println("[DEBUG] DNS server address has changes; updating it on Azure.")
log.Println("[INFO] Fetching current network configuration from Azure.")
azureClient.vnetMutex.Lock()
defer azureClient.vnetMutex.Unlock()
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return fmt.Errorf("Failed to get the current network configuration from Azure: %s", err)
}
// search for our DNS and update its address value:
for i, dns := range netConf.Configuration.DNS.DNSServers {
if dns.Name == name {
found = true
netConf.Configuration.DNS.DNSServers[i].IPAddress = d.Get("dns_address").(string)
break
}
}
// if the config has changes, send the configuration back to Azure:
if found {
log.Println("[INFO] Sending updated network configuration back to Azure.")
reqID, err := vnetClient.SetVirtualNetworkConfiguration(netConf)
if err != nil {
return fmt.Errorf("Failed issuing update to network configuration: %s", err)
}
err = mgmtClient.WaitForOperation(reqID, nil)
if err != nil {
return fmt.Errorf("Error setting network configuration: %s", err)
}
return nil
}
}
// remove the resource from the state if it has been deleted in the meantime:
if !found {
d.SetId("")
}
return nil
}
// resourceAzureDnsServerExists does all the necessary API calls to
// check if the DNS server definition already exists on Azure.
func resourceAzureDnsServerExists(d *schema.ResourceData, meta interface{}) (bool, error) {
azureClient := meta.(*Client)
vnetClient := azureClient.vnetClient
log.Println("[INFO] Fetching current network configuration from Azure.")
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return false, fmt.Errorf("Failed to get the current network configuration from Azure: %s", err)
}
name := d.Get("name").(string)
// search for the DNS server's definition:
for _, dns := range netConf.Configuration.DNS.DNSServers {
if dns.Name == name {
return true, nil
}
}
// if we reached this point; the resource must have been deleted; and we must untrack it:
d.SetId("")
return false, nil
}
// resourceAzureDnsServerDelete does all the necessary API calls
// to delete the DNS server definition from Azure.
func resourceAzureDnsServerDelete(d *schema.ResourceData, meta interface{}) error {
azureClient := meta.(*Client)
mgmtClient := azureClient.mgmtClient
vnetClient := azureClient.vnetClient
log.Println("[INFO] Fetching current network configuration from Azure.")
azureClient.vnetMutex.Lock()
defer azureClient.vnetMutex.Unlock()
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return fmt.Errorf("Failed to get the current network configuration from Azure: %s", err)
}
name := d.Get("name").(string)
// search for the DNS server's definition and remove it:
var found bool
for i, dns := range netConf.Configuration.DNS.DNSServers {
if dns.Name == name {
found = true
netConf.Configuration.DNS.DNSServers = append(
netConf.Configuration.DNS.DNSServers[:i],
netConf.Configuration.DNS.DNSServers[i+1:]...,
)
break
}
}
// if not found; don't bother re-sending the natwork config:
if !found {
return nil
}
// send the configuration back to Azure:
log.Println("[INFO] Sending updated network configuration back to Azure.")
reqID, err := vnetClient.SetVirtualNetworkConfiguration(netConf)
if err != nil {
return fmt.Errorf("Failed issuing update to network configuration: %s", err)
}
err = mgmtClient.WaitForOperation(reqID, nil)
if err != nil {
return fmt.Errorf("Error setting network configuration: %s", err)
}
return nil
}
| builtin/providers/azure/resource_azure_dns_server.go | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.0034872647374868393,
0.0006165470113046467,
0.00016349743236787617,
0.00016930740093812346,
0.0009589038090780377
] |
{
"id": 2,
"code_window": [
"import (\n",
"\t\"fmt\"\n",
"\t\"testing\"\n",
"\n",
"\t\"github.com/hashicorp/terraform/helper/resource\"\n",
"\t\"github.com/hashicorp/terraform/terraform\"\n",
")\n",
"\n",
"func TestAccAzureLocalNetworkConnectionBasic(t *testing.T) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/Azure/azure-sdk-for-go/management\"\n"
],
"file_path": "builtin/providers/azure/resource_azure_local_network_test.go",
"type": "add",
"edit_start_line_idx": 6
} | // The plugin package exposes functions and helpers for communicating to
// Terraform plugins which are implemented as standalone binary applications.
//
// plugin.Client fully manages the lifecycle of executing the application,
// connecting to it, and returning the RPC client and service names for
// connecting to it using the terraform/rpc package.
//
// plugin.Serve fully manages listeners to expose an RPC server from a binary
// that plugin.Client can connect to.
package plugin
| plugin/plugin.go | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.00017684948397800326,
0.00017168822523672134,
0.0001665269664954394,
0.00017168822523672134,
0.000005161258741281927
] |
{
"id": 2,
"code_window": [
"import (\n",
"\t\"fmt\"\n",
"\t\"testing\"\n",
"\n",
"\t\"github.com/hashicorp/terraform/helper/resource\"\n",
"\t\"github.com/hashicorp/terraform/terraform\"\n",
")\n",
"\n",
"func TestAccAzureLocalNetworkConnectionBasic(t *testing.T) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/Azure/azure-sdk-for-go/management\"\n"
],
"file_path": "builtin/providers/azure/resource_azure_local_network_test.go",
"type": "add",
"edit_start_line_idx": 6
} | package cloudstack
import (
"fmt"
"log"
"strings"
"time"
"github.com/hashicorp/terraform/helper/schema"
"github.com/xanzy/go-cloudstack/cloudstack"
)
func resourceCloudStackTemplate() *schema.Resource {
return &schema.Resource{
Create: resourceCloudStackTemplateCreate,
Read: resourceCloudStackTemplateRead,
Update: resourceCloudStackTemplateUpdate,
Delete: resourceCloudStackTemplateDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"display_text": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"format": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"hypervisor": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"os_type": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"url": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"project": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"zone": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"is_dynamically_scalable": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"is_extractable": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
ForceNew: true,
},
"is_featured": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
ForceNew: true,
},
"is_public": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"password_enabled": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"is_ready": &schema.Schema{
Type: schema.TypeBool,
Computed: true,
},
"is_ready_timeout": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: 300,
},
},
}
}
func resourceCloudStackTemplateCreate(d *schema.ResourceData, meta interface{}) error {
cs := meta.(*cloudstack.CloudStackClient)
if err := verifyTemplateParams(d); err != nil {
return err
}
name := d.Get("name").(string)
// Compute/set the display text
displaytext := d.Get("display_text").(string)
if displaytext == "" {
displaytext = name
}
// Retrieve the os_type ID
ostypeid, e := retrieveID(cs, "os_type", d.Get("os_type").(string))
if e != nil {
return e.Error()
}
// Retrieve the zone ID
zoneid, e := retrieveID(cs, "zone", d.Get("zone").(string))
if e != nil {
return e.Error()
}
// Create a new parameter struct
p := cs.Template.NewRegisterTemplateParams(
displaytext,
d.Get("format").(string),
d.Get("hypervisor").(string),
name,
ostypeid,
d.Get("url").(string),
zoneid)
// Set optional parameters
if v, ok := d.GetOk("is_dynamically_scalable"); ok {
p.SetIsdynamicallyscalable(v.(bool))
}
if v, ok := d.GetOk("is_extractable"); ok {
p.SetIsextractable(v.(bool))
}
if v, ok := d.GetOk("is_featured"); ok {
p.SetIsfeatured(v.(bool))
}
if v, ok := d.GetOk("is_public"); ok {
p.SetIspublic(v.(bool))
}
if v, ok := d.GetOk("password_enabled"); ok {
p.SetPasswordenabled(v.(bool))
}
// If there is a project supplied, we retrieve and set the project id
if project, ok := d.GetOk("project"); ok {
// Retrieve the project ID
projectid, e := retrieveID(cs, "project", project.(string))
if e != nil {
return e.Error()
}
// Set the default project ID
p.SetProjectid(projectid)
}
// Create the new template
r, err := cs.Template.RegisterTemplate(p)
if err != nil {
return fmt.Errorf("Error creating template %s: %s", name, err)
}
d.SetId(r.RegisterTemplate[0].Id)
// Wait until the template is ready to use, or timeout with an error...
currentTime := time.Now().Unix()
timeout := int64(d.Get("is_ready_timeout").(int))
for {
// Start with the sleep so the register action has a few seconds
// to process the registration correctly. Without this wait
time.Sleep(10 * time.Second)
err := resourceCloudStackTemplateRead(d, meta)
if err != nil {
return err
}
if d.Get("is_ready").(bool) {
return nil
}
if time.Now().Unix()-currentTime > timeout {
return fmt.Errorf("Timeout while waiting for template to become ready")
}
}
}
func resourceCloudStackTemplateRead(d *schema.ResourceData, meta interface{}) error {
cs := meta.(*cloudstack.CloudStackClient)
// Get the template details
t, count, err := cs.Template.GetTemplateByID(d.Id(), "executable")
if err != nil {
if count == 0 {
log.Printf(
"[DEBUG] Template %s no longer exists", d.Get("name").(string))
d.SetId("")
return nil
}
return err
}
d.Set("name", t.Name)
d.Set("display_text", t.Displaytext)
d.Set("format", t.Format)
d.Set("hypervisor", t.Hypervisor)
d.Set("is_dynamically_scalable", t.Isdynamicallyscalable)
d.Set("is_extractable", t.Isextractable)
d.Set("is_featured", t.Isfeatured)
d.Set("is_public", t.Ispublic)
d.Set("password_enabled", t.Passwordenabled)
d.Set("is_ready", t.Isready)
setValueOrID(d, "os_type", t.Ostypename, t.Ostypeid)
setValueOrID(d, "project", t.Project, t.Projectid)
setValueOrID(d, "zone", t.Zonename, t.Zoneid)
return nil
}
func resourceCloudStackTemplateUpdate(d *schema.ResourceData, meta interface{}) error {
cs := meta.(*cloudstack.CloudStackClient)
name := d.Get("name").(string)
// Create a new parameter struct
p := cs.Template.NewUpdateTemplateParams(d.Id())
if d.HasChange("name") {
p.SetName(name)
}
if d.HasChange("display_text") {
p.SetDisplaytext(d.Get("display_text").(string))
}
if d.HasChange("format") {
p.SetFormat(d.Get("format").(string))
}
if d.HasChange("is_dynamically_scalable") {
p.SetIsdynamicallyscalable(d.Get("is_dynamically_scalable").(bool))
}
if d.HasChange("os_type") {
ostypeid, e := retrieveID(cs, "os_type", d.Get("os_type").(string))
if e != nil {
return e.Error()
}
p.SetOstypeid(ostypeid)
}
if d.HasChange("password_enabled") {
p.SetPasswordenabled(d.Get("password_enabled").(bool))
}
_, err := cs.Template.UpdateTemplate(p)
if err != nil {
return fmt.Errorf("Error updating template %s: %s", name, err)
}
return resourceCloudStackTemplateRead(d, meta)
}
func resourceCloudStackTemplateDelete(d *schema.ResourceData, meta interface{}) error {
cs := meta.(*cloudstack.CloudStackClient)
// Create a new parameter struct
p := cs.Template.NewDeleteTemplateParams(d.Id())
// Delete the template
log.Printf("[INFO] Deleting template: %s", d.Get("name").(string))
_, err := cs.Template.DeleteTemplate(p)
if err != nil {
// This is a very poor way to be told the ID does no longer exist :(
if strings.Contains(err.Error(), fmt.Sprintf(
"Invalid parameter id value=%s due to incorrect long value format, "+
"or entity does not exist", d.Id())) {
return nil
}
return fmt.Errorf("Error deleting template %s: %s", d.Get("name").(string), err)
}
return nil
}
func verifyTemplateParams(d *schema.ResourceData) error {
format := d.Get("format").(string)
if format != "OVA" && format != "QCOW2" && format != "RAW" && format != "VHD" && format != "VMDK" {
return fmt.Errorf(
"%s is not a valid format. Valid options are 'OVA','QCOW2', 'RAW', 'VHD' and 'VMDK'", format)
}
return nil
}
| builtin/providers/cloudstack/resource_cloudstack_template.go | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.0013772177044302225,
0.00022111560974735767,
0.00016394921112805605,
0.00017467173165641725,
0.0002146993501810357
] |
{
"id": 3,
"code_window": [
"\t\t}\n",
"\n",
"\t\tnetConf, err := vnetClient.GetVirtualNetworkConfiguration()\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// This is desirable - if there is no network config there can be no gateways\n",
"\t\t\tif management.IsResourceNotFoundError(err) {\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"file_path": "builtin/providers/azure/resource_azure_local_network_test.go",
"type": "add",
"edit_start_line_idx": 111
} | package azure
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAzureLocalNetworkConnectionBasic(t *testing.T) {
name := "azure_local_network_connection.foo"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccAzureLocalNetworkConnectionDestroyed,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureLocalNetworkConnectionBasic,
Check: resource.ComposeTestCheckFunc(
testAccAzureLocalNetworkConnectionExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"),
resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.13"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.0/31"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.1/31"),
),
},
},
})
}
func TestAccAzureLocalNetworkConnectionUpdate(t *testing.T) {
name := "azure_local_network_connection.foo"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccAzureLocalNetworkConnectionDestroyed,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureLocalNetworkConnectionBasic,
Check: resource.ComposeTestCheckFunc(
testAccAzureLocalNetworkConnectionExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"),
resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.13"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.0/31"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.1/31"),
),
},
resource.TestStep{
Config: testAccAzureLocalNetworkConnectionUpdate,
Check: resource.ComposeTestCheckFunc(
testAccAzureLocalNetworkConnectionExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"),
resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.14"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.2/30"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.3/30"),
),
},
},
})
}
// testAccAzureLocalNetworkConnectionExists checks whether the given local network
// connection exists on Azure.
func testAccAzureLocalNetworkConnectionExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
resource, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Azure Local Network Connection not found: %s", name)
}
if resource.Primary.ID == "" {
return fmt.Errorf("Azure Local Network Connection ID not set.")
}
vnetClient := testAccProvider.Meta().(*Client).vnetClient
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return err
}
for _, lnet := range netConf.Configuration.LocalNetworkSites {
if lnet.Name == resource.Primary.ID {
return nil
}
break
}
return fmt.Errorf("Local Network Connection not found: %s", name)
}
}
// testAccAzureLocalNetworkConnectionDestroyed checks whether the local network
// connection has been destroyed on Azure or not.
func testAccAzureLocalNetworkConnectionDestroyed(s *terraform.State) error {
vnetClient := testAccProvider.Meta().(*Client).vnetClient
for _, resource := range s.RootModule().Resources {
if resource.Type != "azure_local_network_connection" {
continue
}
if resource.Primary.ID == "" {
return fmt.Errorf("Azure Local Network Connection ID not set.")
}
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return err
}
for _, lnet := range netConf.Configuration.LocalNetworkSites {
if lnet.Name == resource.Primary.ID {
return fmt.Errorf("Azure Local Network Connection still exists.")
}
}
}
return nil
}
const testAccAzureLocalNetworkConnectionBasic = `
resource "azure_local_network_connection" "foo" {
name = "terraform-local-network-connection"
vpn_gateway_address = "10.11.12.13"
address_space_prefixes = ["10.10.10.0/31", "10.10.10.1/31"]
}
`
const testAccAzureLocalNetworkConnectionUpdate = `
resource "azure_local_network_connection" "foo" {
name = "terraform-local-network-connection"
vpn_gateway_address = "10.11.12.14"
address_space_prefixes = ["10.10.10.2/30", "10.10.10.3/30"]
}
`
| builtin/providers/azure/resource_azure_local_network_test.go | 1 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.9990962743759155,
0.28554630279541016,
0.00016347717610187829,
0.00041079119546338916,
0.45060062408447266
] |
{
"id": 3,
"code_window": [
"\t\t}\n",
"\n",
"\t\tnetConf, err := vnetClient.GetVirtualNetworkConfiguration()\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// This is desirable - if there is no network config there can be no gateways\n",
"\t\t\tif management.IsResourceNotFoundError(err) {\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"file_path": "builtin/providers/azure/resource_azure_local_network_test.go",
"type": "add",
"edit_start_line_idx": 111
} | ---
layout: "dyn"
page_title: "Provider: Dyn"
sidebar_current: "docs-dyn-index"
description: |-
The Dyn provider is used to interact with the resources supported by Dyn. The provider needs to be configured with the proper credentials before it can be used.
---
# Dyn Provider
The Dyn provider is used to interact with the
resources supported by Dyn. The provider needs to be configured
with the proper credentials before it can be used.
Use the navigation to the left to read about the available resources.
## Example Usage
```
# Configure the Dyn provider
provider "dyn" {
customer_name = "${var.dyn_customer_name}"
username = "${var.dyn_username}"
password = "${var.dyn_password}"
}
# Create a record
resource "dyn_record" "www" {
...
}
```
## Argument Reference
The following arguments are supported:
* `customer_name` - (Required) The Dyn customer name. It must be provided, but it can also be sourced from the `DYN_CUSTOMER_NAME` environment variable.
* `username` - (Required) The Dyn username. It must be provided, but it can also be sourced from the `DYN_USERNAME` environment variable.
* `password` - (Required) The Dyn password. It must be provided, but it can also be sourced from the `DYN_PASSWORD` environment variable.
| website/source/docs/providers/dyn/index.html.markdown | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.0001743927859934047,
0.00017031449533533305,
0.00016808684449642897,
0.00016938918270170689,
0.0000024275700525322463
] |
{
"id": 3,
"code_window": [
"\t\t}\n",
"\n",
"\t\tnetConf, err := vnetClient.GetVirtualNetworkConfiguration()\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// This is desirable - if there is no network config there can be no gateways\n",
"\t\t\tif management.IsResourceNotFoundError(err) {\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"file_path": "builtin/providers/azure/resource_azure_local_network_test.go",
"type": "add",
"edit_start_line_idx": 111
} | package aws
import (
"bytes"
"fmt"
"log"
"regexp"
"strings"
"time"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/rds"
)
func resourceAwsDbParameterGroup() *schema.Resource {
return &schema.Resource{
Create: resourceAwsDbParameterGroupCreate,
Read: resourceAwsDbParameterGroupRead,
Update: resourceAwsDbParameterGroupUpdate,
Delete: resourceAwsDbParameterGroupDelete,
Schema: map[string]*schema.Schema{
"arn": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Required: true,
ValidateFunc: validateDbParamGroupName,
},
"family": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"description": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"parameter": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
ForceNew: false,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"value": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"apply_method": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "immediate",
// this parameter is not actually state, but a
// meta-parameter describing how the RDS API call
// to modify the parameter group should be made.
// Future reads of the resource from AWS don't tell
// us what we used for apply_method previously, so
// by squashing state to an empty string we avoid
// needing to do an update for every future run.
StateFunc: func(interface{}) string { return "" },
},
},
},
Set: resourceAwsDbParameterHash,
},
"tags": tagsSchema(),
},
}
}
func resourceAwsDbParameterGroupCreate(d *schema.ResourceData, meta interface{}) error {
rdsconn := meta.(*AWSClient).rdsconn
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
createOpts := rds.CreateDBParameterGroupInput{
DBParameterGroupName: aws.String(d.Get("name").(string)),
DBParameterGroupFamily: aws.String(d.Get("family").(string)),
Description: aws.String(d.Get("description").(string)),
Tags: tags,
}
log.Printf("[DEBUG] Create DB Parameter Group: %#v", createOpts)
_, err := rdsconn.CreateDBParameterGroup(&createOpts)
if err != nil {
return fmt.Errorf("Error creating DB Parameter Group: %s", err)
}
d.Partial(true)
d.SetPartial("name")
d.SetPartial("family")
d.SetPartial("description")
d.Partial(false)
d.SetId(*createOpts.DBParameterGroupName)
log.Printf("[INFO] DB Parameter Group ID: %s", d.Id())
return resourceAwsDbParameterGroupUpdate(d, meta)
}
func resourceAwsDbParameterGroupRead(d *schema.ResourceData, meta interface{}) error {
rdsconn := meta.(*AWSClient).rdsconn
describeOpts := rds.DescribeDBParameterGroupsInput{
DBParameterGroupName: aws.String(d.Id()),
}
describeResp, err := rdsconn.DescribeDBParameterGroups(&describeOpts)
if err != nil {
return err
}
if len(describeResp.DBParameterGroups) != 1 ||
*describeResp.DBParameterGroups[0].DBParameterGroupName != d.Id() {
return fmt.Errorf("Unable to find Parameter Group: %#v", describeResp.DBParameterGroups)
}
d.Set("name", describeResp.DBParameterGroups[0].DBParameterGroupName)
d.Set("family", describeResp.DBParameterGroups[0].DBParameterGroupFamily)
d.Set("description", describeResp.DBParameterGroups[0].Description)
// Only include user customized parameters as there's hundreds of system/default ones
describeParametersOpts := rds.DescribeDBParametersInput{
DBParameterGroupName: aws.String(d.Id()),
Source: aws.String("user"),
}
describeParametersResp, err := rdsconn.DescribeDBParameters(&describeParametersOpts)
if err != nil {
return err
}
d.Set("parameter", flattenParameters(describeParametersResp.Parameters))
paramGroup := describeResp.DBParameterGroups[0]
arn, err := buildRDSPGARN(d, meta)
if err != nil {
name := "<empty>"
if paramGroup.DBParameterGroupName != nil && *paramGroup.DBParameterGroupName != "" {
name = *paramGroup.DBParameterGroupName
}
log.Printf("[DEBUG] Error building ARN for DB Parameter Group, not setting Tags for Param Group %s", name)
} else {
d.Set("arn", arn)
resp, err := rdsconn.ListTagsForResource(&rds.ListTagsForResourceInput{
ResourceName: aws.String(arn),
})
if err != nil {
log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
}
var dt []*rds.Tag
if len(resp.TagList) > 0 {
dt = resp.TagList
}
d.Set("tags", tagsToMapRDS(dt))
}
return nil
}
func resourceAwsDbParameterGroupUpdate(d *schema.ResourceData, meta interface{}) error {
rdsconn := meta.(*AWSClient).rdsconn
d.Partial(true)
if d.HasChange("parameter") {
o, n := d.GetChange("parameter")
if o == nil {
o = new(schema.Set)
}
if n == nil {
n = new(schema.Set)
}
os := o.(*schema.Set)
ns := n.(*schema.Set)
// Expand the "parameter" set to aws-sdk-go compat []rds.Parameter
parameters, err := expandParameters(ns.Difference(os).List())
if err != nil {
return err
}
if len(parameters) > 0 {
modifyOpts := rds.ModifyDBParameterGroupInput{
DBParameterGroupName: aws.String(d.Get("name").(string)),
Parameters: parameters,
}
log.Printf("[DEBUG] Modify DB Parameter Group: %s", modifyOpts)
_, err = rdsconn.ModifyDBParameterGroup(&modifyOpts)
if err != nil {
return fmt.Errorf("Error modifying DB Parameter Group: %s", err)
}
}
d.SetPartial("parameter")
}
if arn, err := buildRDSPGARN(d, meta); err == nil {
if err := setTagsRDS(rdsconn, d, arn); err != nil {
return err
} else {
d.SetPartial("tags")
}
}
d.Partial(false)
return resourceAwsDbParameterGroupRead(d, meta)
}
func resourceAwsDbParameterGroupDelete(d *schema.ResourceData, meta interface{}) error {
stateConf := &resource.StateChangeConf{
Pending: []string{"pending"},
Target: "destroyed",
Refresh: resourceAwsDbParameterGroupDeleteRefreshFunc(d, meta),
Timeout: 3 * time.Minute,
MinTimeout: 1 * time.Second,
}
_, err := stateConf.WaitForState()
return err
}
func resourceAwsDbParameterGroupDeleteRefreshFunc(
d *schema.ResourceData,
meta interface{}) resource.StateRefreshFunc {
rdsconn := meta.(*AWSClient).rdsconn
return func() (interface{}, string, error) {
deleteOpts := rds.DeleteDBParameterGroupInput{
DBParameterGroupName: aws.String(d.Id()),
}
if _, err := rdsconn.DeleteDBParameterGroup(&deleteOpts); err != nil {
rdserr, ok := err.(awserr.Error)
if !ok {
return d, "error", err
}
if rdserr.Code() != "DBParameterGroupNotFoundFault" {
return d, "error", err
}
}
return d, "destroyed", nil
}
}
func resourceAwsDbParameterHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["name"].(string)))
// Store the value as a lower case string, to match how we store them in flattenParameters
buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["value"].(string))))
return hashcode.String(buf.String())
}
func buildRDSPGARN(d *schema.ResourceData, meta interface{}) (string, error) {
iamconn := meta.(*AWSClient).iamconn
region := meta.(*AWSClient).region
// An zero value GetUserInput{} defers to the currently logged in user
resp, err := iamconn.GetUser(&iam.GetUserInput{})
if err != nil {
return "", err
}
userARN := *resp.User.Arn
accountID := strings.Split(userARN, ":")[4]
arn := fmt.Sprintf("arn:aws:rds:%s:%s:pg:%s", region, accountID, d.Id())
return arn, nil
}
func validateDbParamGroupName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only lowercase alphanumeric characters and hyphens allowed in %q", k))
}
if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"first character of %q must be a letter", k))
}
if regexp.MustCompile(`--`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q cannot contain two consecutive hyphens", k))
}
if regexp.MustCompile(`-$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q cannot end with a hyphen", k))
}
if len(value) > 255 {
errors = append(errors, fmt.Errorf(
"%q cannot be greater than 255 characters", k))
}
return
}
| builtin/providers/aws/resource_aws_db_parameter_group.go | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.0006277160719037056,
0.00019714355585165322,
0.00016428472008556128,
0.00017172834486700594,
0.00008475057984469458
] |
{
"id": 3,
"code_window": [
"\t\t}\n",
"\n",
"\t\tnetConf, err := vnetClient.GetVirtualNetworkConfiguration()\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t// This is desirable - if there is no network config there can be no gateways\n",
"\t\t\tif management.IsResourceNotFoundError(err) {\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"file_path": "builtin/providers/azure/resource_azure_local_network_test.go",
"type": "add",
"edit_start_line_idx": 111
} | package aws
import (
"fmt"
"math/rand"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elasticache"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSElasticacheCluster_basic(t *testing.T) {
var ec elasticache.CacheCluster
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSElasticacheClusterConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "cache_nodes.0.id", "0001"),
),
},
},
})
}
func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) {
var ec elasticache.CacheCluster
ri := genRandInt()
preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri, ri, ri)
postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri, ri, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "snapshot_window", "05:00-09:00"),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "snapshot_retention_limit", "3"),
),
},
resource.TestStep{
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "snapshot_window", "07:00-09:00"),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "snapshot_retention_limit", "7"),
),
},
},
})
}
func TestAccAWSElasticacheCluster_decreasingCacheNodes(t *testing.T) {
var ec elasticache.CacheCluster
ri := genRandInt()
preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfigDecreasingNodes, ri, ri, ri)
postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfigDecreasingNodes_update, ri, ri, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "num_cache_nodes", "3"),
),
},
resource.TestStep{
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "num_cache_nodes", "1"),
),
},
},
})
}
func TestAccAWSElasticacheCluster_vpc(t *testing.T) {
var csg elasticache.CacheSubnetGroup
var ec elasticache.CacheCluster
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSElasticacheClusterInVPCConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSubnetGroupExists("aws_elasticache_subnet_group.bar", &csg),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
testAccCheckAWSElasticacheClusterAttributes(&ec),
),
},
},
})
}
func testAccCheckAWSElasticacheClusterAttributes(v *elasticache.CacheCluster) resource.TestCheckFunc {
return func(s *terraform.State) error {
if v.NotificationConfiguration == nil {
return fmt.Errorf("Expected NotificationConfiguration for ElastiCache Cluster (%s)", *v.CacheClusterId)
}
if strings.ToLower(*v.NotificationConfiguration.TopicStatus) != "active" {
return fmt.Errorf("Expected NotificationConfiguration status to be 'active', got (%s)", *v.NotificationConfiguration.TopicStatus)
}
return nil
}
}
func testAccCheckAWSElasticacheClusterDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).elasticacheconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_elasticache_cluster" {
continue
}
res, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{
CacheClusterId: aws.String(rs.Primary.ID),
})
if err != nil {
// Verify the error is what we want
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "CacheClusterNotFound" {
continue
}
return err
}
if len(res.CacheClusters) > 0 {
return fmt.Errorf("still exist.")
}
}
return nil
}
func testAccCheckAWSElasticacheClusterExists(n string, v *elasticache.CacheCluster) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No cache cluster ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).elasticacheconn
resp, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{
CacheClusterId: aws.String(rs.Primary.ID),
})
if err != nil {
return fmt.Errorf("Elasticache error: %v", err)
}
for _, c := range resp.CacheClusters {
if *c.CacheClusterId == rs.Primary.ID {
*v = *c
}
}
return nil
}
}
func genRandInt() int {
return rand.New(rand.NewSource(time.Now().UnixNano())).Int() % 1000
}
var testAccAWSElasticacheClusterConfig = fmt.Sprintf(`
provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
ingress {
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elasticache_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
security_group_names = ["${aws_security_group.bar.name}"]
}
resource "aws_elasticache_cluster" "bar" {
cluster_id = "tf-test-%03d"
engine = "memcached"
node_type = "cache.m1.small"
num_cache_nodes = 1
port = 11211
parameter_group_name = "default.memcached1.4"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
}
`, genRandInt(), genRandInt(), genRandInt())
var testAccAWSElasticacheClusterConfig_snapshots = `
provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
ingress {
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elasticache_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
security_group_names = ["${aws_security_group.bar.name}"]
}
resource "aws_elasticache_cluster" "bar" {
cluster_id = "tf-test-%03d"
engine = "redis"
node_type = "cache.m1.small"
num_cache_nodes = 1
port = 6379
parameter_group_name = "default.redis2.8"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
snapshot_window = "05:00-09:00"
snapshot_retention_limit = 3
}
`
var testAccAWSElasticacheClusterConfig_snapshotsUpdated = `
provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
ingress {
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elasticache_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
security_group_names = ["${aws_security_group.bar.name}"]
}
resource "aws_elasticache_cluster" "bar" {
cluster_id = "tf-test-%03d"
engine = "redis"
node_type = "cache.m1.small"
num_cache_nodes = 1
port = 6379
parameter_group_name = "default.redis2.8"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
snapshot_window = "07:00-09:00"
snapshot_retention_limit = 7
apply_immediately = true
}
`
var testAccAWSElasticacheClusterConfigDecreasingNodes = `
provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
ingress {
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elasticache_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
security_group_names = ["${aws_security_group.bar.name}"]
}
resource "aws_elasticache_cluster" "bar" {
cluster_id = "tf-test-%03d"
engine = "memcached"
node_type = "cache.m1.small"
num_cache_nodes = 3
port = 11211
parameter_group_name = "default.memcached1.4"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
}
`
var testAccAWSElasticacheClusterConfigDecreasingNodes_update = `
provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
ingress {
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elasticache_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
security_group_names = ["${aws_security_group.bar.name}"]
}
resource "aws_elasticache_cluster" "bar" {
cluster_id = "tf-test-%03d"
engine = "memcached"
node_type = "cache.m1.small"
num_cache_nodes = 1
port = 11211
parameter_group_name = "default.memcached1.4"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
apply_immediately = true
}
`
var testAccAWSElasticacheClusterInVPCConfig = fmt.Sprintf(`
resource "aws_vpc" "foo" {
cidr_block = "192.168.0.0/16"
tags {
Name = "tf-test"
}
}
resource "aws_subnet" "foo" {
vpc_id = "${aws_vpc.foo.id}"
cidr_block = "192.168.0.0/20"
availability_zone = "us-west-2a"
tags {
Name = "tf-test"
}
}
resource "aws_elasticache_subnet_group" "bar" {
name = "tf-test-cache-subnet-%03d"
description = "tf-test-cache-subnet-group-descr"
subnet_ids = ["${aws_subnet.foo.id}"]
}
resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
vpc_id = "${aws_vpc.foo.id}"
ingress {
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elasticache_cluster" "bar" {
// Including uppercase letters in this name to ensure
// that we correctly handle the fact that the API
// normalizes names to lowercase.
cluster_id = "tf-TEST-%03d"
node_type = "cache.m1.small"
num_cache_nodes = 1
engine = "redis"
engine_version = "2.8.19"
port = 6379
subnet_group_name = "${aws_elasticache_subnet_group.bar.name}"
security_group_ids = ["${aws_security_group.bar.id}"]
parameter_group_name = "default.redis2.8"
notification_topic_arn = "${aws_sns_topic.topic_example.arn}"
}
resource "aws_sns_topic" "topic_example" {
name = "tf-ecache-cluster-test"
}
`, genRandInt(), genRandInt(), genRandInt())
| builtin/providers/aws/resource_aws_elasticache_cluster_test.go | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.000755955814383924,
0.00019438187882769853,
0.00016510415298398584,
0.00017019746883306652,
0.00009390444029122591
] |
{
"id": 4,
"code_window": [
"\n",
"import (\n",
"\t\"fmt\"\n",
"\t\"testing\"\n",
"\n",
"\t\"github.com/Azure/azure-sdk-for-go/management/virtualnetwork\"\n",
"\t\"github.com/hashicorp/terraform/helper/resource\"\n",
"\t\"github.com/hashicorp/terraform/terraform\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/Azure/azure-sdk-for-go/management\"\n"
],
"file_path": "builtin/providers/azure/resource_azure_virtual_network_test.go",
"type": "add",
"edit_start_line_idx": 6
} | package azure
import (
"fmt"
"testing"
"github.com/Azure/azure-sdk-for-go/management/virtualnetwork"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAzureVirtualNetwork_basic(t *testing.T) {
var network virtualnetwork.VirtualNetworkSite
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAzureVirtualNetworkDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureVirtualNetwork_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckAzureVirtualNetworkExists(
"azure_virtual_network.foo", &network),
testAccCheckAzureVirtualNetworkAttributes(&network),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "name", "terraform-vnet"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "location", "West US"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "address_space.0", "10.1.2.0/24"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "subnet.1787288781.name", "subnet1"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "subnet.1787288781.address_prefix", "10.1.2.0/25"),
),
},
},
})
}
func TestAccAzureVirtualNetwork_advanced(t *testing.T) {
var network virtualnetwork.VirtualNetworkSite
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAzureVirtualNetworkDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureVirtualNetwork_advanced,
Check: resource.ComposeTestCheckFunc(
testAccCheckAzureVirtualNetworkExists(
"azure_virtual_network.foo", &network),
testAccCheckAzureVirtualNetworkAttributes(&network),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "name", "terraform-vnet"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "location", "West US"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "address_space.0", "10.1.2.0/24"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "subnet.33778499.name", "subnet1"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "subnet.33778499.address_prefix", "10.1.2.0/25"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "subnet.33778499.security_group", "terraform-security-group1"),
),
},
},
})
}
func TestAccAzureVirtualNetwork_update(t *testing.T) {
var network virtualnetwork.VirtualNetworkSite
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAzureVirtualNetworkDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureVirtualNetwork_advanced,
Check: resource.ComposeTestCheckFunc(
testAccCheckAzureVirtualNetworkExists(
"azure_virtual_network.foo", &network),
testAccCheckAzureVirtualNetworkAttributes(&network),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "name", "terraform-vnet"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "location", "West US"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "address_space.0", "10.1.2.0/24"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "subnet.33778499.name", "subnet1"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "subnet.33778499.address_prefix", "10.1.2.0/25"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "subnet.33778499.security_group", "terraform-security-group1"),
),
},
resource.TestStep{
Config: testAccAzureVirtualNetwork_update,
Check: resource.ComposeTestCheckFunc(
testAccCheckAzureVirtualNetworkExists(
"azure_virtual_network.foo", &network),
testAccCheckAzureVirtualNetworkAttributes(&network),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "name", "terraform-vnet"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "location", "West US"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "address_space.0", "10.1.3.0/24"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "subnet.514595123.name", "subnet1"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "subnet.514595123.address_prefix", "10.1.3.128/25"),
resource.TestCheckResourceAttr(
"azure_virtual_network.foo", "subnet.514595123.security_group", "terraform-security-group2"),
),
},
},
})
}
func testAccCheckAzureVirtualNetworkExists(
n string,
network *virtualnetwork.VirtualNetworkSite) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No Virtual Network ID is set")
}
vnetClient := testAccProvider.Meta().(*Client).vnetClient
nc, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return err
}
for _, n := range nc.Configuration.VirtualNetworkSites {
if n.Name == rs.Primary.ID {
*network = n
return nil
}
}
return fmt.Errorf("Virtual Network not found")
}
}
func testAccCheckAzureVirtualNetworkAttributes(
network *virtualnetwork.VirtualNetworkSite) resource.TestCheckFunc {
return func(s *terraform.State) error {
if network.Name != "terraform-vnet" {
return fmt.Errorf("Bad name: %s", network.Name)
}
if network.Location != "West US" {
return fmt.Errorf("Bad location: %s", network.Location)
}
return nil
}
}
func testAccCheckAzureVirtualNetworkDestroy(s *terraform.State) error {
vnetClient := testAccProvider.Meta().(*Client).vnetClient
for _, rs := range s.RootModule().Resources {
if rs.Type != "azure_virtual_network" {
continue
}
if rs.Primary.ID == "" {
return fmt.Errorf("No Virtual Network ID is set")
}
nc, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return fmt.Errorf("Error retrieving Virtual Network Configuration: %s", err)
}
for _, n := range nc.Configuration.VirtualNetworkSites {
if n.Name == rs.Primary.ID {
return fmt.Errorf("Virtual Network %s still exists", rs.Primary.ID)
}
}
}
return nil
}
const testAccAzureVirtualNetwork_basic = `
resource "azure_virtual_network" "foo" {
name = "terraform-vnet"
address_space = ["10.1.2.0/24"]
location = "West US"
subnet {
name = "subnet1"
address_prefix = "10.1.2.0/25"
}
}`
const testAccAzureVirtualNetwork_advanced = `
resource "azure_security_group" "foo" {
name = "terraform-security-group1"
location = "West US"
}
resource "azure_security_group_rule" "foo" {
name = "terraform-secgroup-rule"
security_group_names = ["${azure_security_group.foo.name}"]
type = "Inbound"
action = "Deny"
priority = 200
source_address_prefix = "100.0.0.0/32"
source_port_range = "1000"
destination_address_prefix = "10.0.0.0/32"
destination_port_range = "1000"
protocol = "TCP"
}
resource "azure_virtual_network" "foo" {
name = "terraform-vnet"
address_space = ["10.1.2.0/24"]
location = "West US"
subnet {
name = "subnet1"
address_prefix = "10.1.2.0/25"
security_group = "${azure_security_group.foo.name}"
}
}`
const testAccAzureVirtualNetwork_update = `
resource "azure_security_group" "foo" {
name = "terraform-security-group1"
location = "West US"
}
resource "azure_security_group_rule" "foo" {
name = "terraform-secgroup-rule"
security_group_names = ["${azure_security_group.foo.name}"]
type = "Inbound"
action = "Deny"
priority = 200
source_address_prefix = "100.0.0.0/32"
source_port_range = "1000"
destination_address_prefix = "10.0.0.0/32"
destination_port_range = "1000"
protocol = "TCP"
}
resource "azure_security_group" "bar" {
name = "terraform-security-group2"
location = "West US"
}
resource "azure_virtual_network" "foo" {
name = "terraform-vnet"
address_space = ["10.1.3.0/24"]
location = "West US"
subnet {
name = "subnet1"
address_prefix = "10.1.3.128/25"
security_group = "${azure_security_group.bar.name}"
}
}`
| builtin/providers/azure/resource_azure_virtual_network_test.go | 1 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.7357369661331177,
0.028006428852677345,
0.00016316119581460953,
0.00042442139238119125,
0.13623693585395813
] |
{
"id": 4,
"code_window": [
"\n",
"import (\n",
"\t\"fmt\"\n",
"\t\"testing\"\n",
"\n",
"\t\"github.com/Azure/azure-sdk-for-go/management/virtualnetwork\"\n",
"\t\"github.com/hashicorp/terraform/helper/resource\"\n",
"\t\"github.com/hashicorp/terraform/terraform\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/Azure/azure-sdk-for-go/management\"\n"
],
"file_path": "builtin/providers/azure/resource_azure_virtual_network_test.go",
"type": "add",
"edit_start_line_idx": 6
} | ---
layout: "aws"
page_title: "AWS: aws_ami_from_instance"
sidebar_current: "docs-aws-resource-ami-from-instance"
description: |-
Creates an Amazon Machine Image (AMI) from an EBS-backed EC2 instance
---
# aws\_ami\_from\_instance
The "AMI from instance" resource allows the creation of an Amazon Machine
Image (AMI) modelled after an existing EBS-backed EC2 instance.
The created AMI will refer to implicitly-created snapshots of the instance's
EBS volumes and mimick its assigned block device configuration at the time
the resource is created.
This resource is best applied to an instance that is stopped when this instance
is created, so that the contents of the created image are predictable. When
applied to an instance that is running, *the instance will be stopped before taking
the snapshots and then started back up again*, resulting in a period of
downtime.
Note that the source instance is inspected only at the initial creation of this
resource. Ongoing updates to the referenced instance will not be propagated into
the generated AMI. Users may taint or otherwise recreate the resource in order
to produce a fresh snapshot.
## Example Usage
```
resource "aws_ami_from_instance" "example" {
name = "terraform-example"
source_instance_id = "i-xxxxxxxx"
}
```
## Argument Reference
The following arguments are supported:
* `name` - (Required) A region-unique name for the AMI.
* `source_instance_id` - (Required) The id of the instance to use as the basis of the AMI.
* `snapshot_without_reboot` - (Optional) Boolean that overrides the behavior of stopping
the instance before snapshotting. This is risky since it may cause a snapshot of an
inconsistent filesystem state, but can be used to avoid downtime if the user otherwise
guarantees that no filesystem writes will be underway at the time of snapshot.
## Attributes Reference
The following attributes are exported:
* `id` - The ID of the created AMI.
This resource also exports a full set of attributes corresponding to the arguments of the
`aws_ami` resource, allowing the properties of the created AMI to be used elsewhere in the
configuration.
| website/source/docs/providers/aws/r/ami_from_instance.html.markdown | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.00026198464911431074,
0.0001825017243390903,
0.0001654732768656686,
0.00016698703984729946,
0.00003555862349458039
] |
{
"id": 4,
"code_window": [
"\n",
"import (\n",
"\t\"fmt\"\n",
"\t\"testing\"\n",
"\n",
"\t\"github.com/Azure/azure-sdk-for-go/management/virtualnetwork\"\n",
"\t\"github.com/hashicorp/terraform/helper/resource\"\n",
"\t\"github.com/hashicorp/terraform/terraform\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/Azure/azure-sdk-for-go/management\"\n"
],
"file_path": "builtin/providers/azure/resource_azure_virtual_network_test.go",
"type": "add",
"edit_start_line_idx": 6
} | module "aws_instance" "web" {
source = "foo"
}
module "aws_instance" "web" {
source = "bar"
}
| config/test-fixtures/validate-dup-module/main.tf | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.00017292752454522997,
0.00017292752454522997,
0.00017292752454522997,
0.00017292752454522997,
0
] |
{
"id": 4,
"code_window": [
"\n",
"import (\n",
"\t\"fmt\"\n",
"\t\"testing\"\n",
"\n",
"\t\"github.com/Azure/azure-sdk-for-go/management/virtualnetwork\"\n",
"\t\"github.com/hashicorp/terraform/helper/resource\"\n",
"\t\"github.com/hashicorp/terraform/terraform\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/Azure/azure-sdk-for-go/management\"\n"
],
"file_path": "builtin/providers/azure/resource_azure_virtual_network_test.go",
"type": "add",
"edit_start_line_idx": 6
} | ---
layout: "intro"
page_title: "Terraform vs. Boto, Fog, etc."
sidebar_current: "vs-other-boto"
description: |-
Libraries like Boto, Fog, etc. are used to provide native access to cloud providers and services by using their APIs. Some libraries are focused on specific clouds, while others attempt to bridge them all and mask the semantic differences. Using a client library only provides low-level access to APIs, requiring application developers to create their own tooling to build and manage their infrastructure.
---
# Terraform vs. Boto, Fog, etc.
Libraries like Boto, Fog, etc. are used to provide native access
to cloud providers and services by using their APIs. Some
libraries are focused on specific clouds, while others attempt
to bridge them all and mask the semantic differences. Using a client
library only provides low-level access to APIs, requiring application
developers to create their own tooling to build and manage their infrastructure.
Terraform is not intended to give low-level programmatic access to
providers, but instead provides a high level syntax for describing
how cloud resources and services should be created, provisioned, and
combined. Terraform is very flexible, using a plugin-based model to
support providers and provisioners, giving it the ability to support
almost any service that exposes APIs.
| website/source/intro/vs/boto.html.markdown | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.00016964934184215963,
0.00016482283535879105,
0.0001605577563168481,
0.00016426140791736543,
0.0000037327945392462425
] |
{
"id": 5,
"code_window": [
"\t\t\treturn fmt.Errorf(\"No Virtual Network ID is set\")\n",
"\t\t}\n",
"\n",
"\t\tnc, err := vnetClient.GetVirtualNetworkConfiguration()\n",
"\t\tif err != nil {\n",
"\t\t\treturn fmt.Errorf(\"Error retrieving Virtual Network Configuration: %s\", err)\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif management.IsResourceNotFoundError(err) {\n",
"\t\t\t\t// This is desirable - no configuration = no networks\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"file_path": "builtin/providers/azure/resource_azure_virtual_network_test.go",
"type": "add",
"edit_start_line_idx": 187
} | package azure
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAzureLocalNetworkConnectionBasic(t *testing.T) {
name := "azure_local_network_connection.foo"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccAzureLocalNetworkConnectionDestroyed,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureLocalNetworkConnectionBasic,
Check: resource.ComposeTestCheckFunc(
testAccAzureLocalNetworkConnectionExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"),
resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.13"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.0/31"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.1/31"),
),
},
},
})
}
func TestAccAzureLocalNetworkConnectionUpdate(t *testing.T) {
name := "azure_local_network_connection.foo"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccAzureLocalNetworkConnectionDestroyed,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureLocalNetworkConnectionBasic,
Check: resource.ComposeTestCheckFunc(
testAccAzureLocalNetworkConnectionExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"),
resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.13"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.0/31"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.1/31"),
),
},
resource.TestStep{
Config: testAccAzureLocalNetworkConnectionUpdate,
Check: resource.ComposeTestCheckFunc(
testAccAzureLocalNetworkConnectionExists(name),
resource.TestCheckResourceAttr(name, "name", "terraform-local-network-connection"),
resource.TestCheckResourceAttr(name, "vpn_gateway_address", "10.11.12.14"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.0", "10.10.10.2/30"),
resource.TestCheckResourceAttr(name, "address_space_prefixes.1", "10.10.10.3/30"),
),
},
},
})
}
// testAccAzureLocalNetworkConnectionExists checks whether the given local network
// connection exists on Azure.
func testAccAzureLocalNetworkConnectionExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
resource, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Azure Local Network Connection not found: %s", name)
}
if resource.Primary.ID == "" {
return fmt.Errorf("Azure Local Network Connection ID not set.")
}
vnetClient := testAccProvider.Meta().(*Client).vnetClient
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return err
}
for _, lnet := range netConf.Configuration.LocalNetworkSites {
if lnet.Name == resource.Primary.ID {
return nil
}
break
}
return fmt.Errorf("Local Network Connection not found: %s", name)
}
}
// testAccAzureLocalNetworkConnectionDestroyed checks whether the local network
// connection has been destroyed on Azure or not.
func testAccAzureLocalNetworkConnectionDestroyed(s *terraform.State) error {
vnetClient := testAccProvider.Meta().(*Client).vnetClient
for _, resource := range s.RootModule().Resources {
if resource.Type != "azure_local_network_connection" {
continue
}
if resource.Primary.ID == "" {
return fmt.Errorf("Azure Local Network Connection ID not set.")
}
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
return err
}
for _, lnet := range netConf.Configuration.LocalNetworkSites {
if lnet.Name == resource.Primary.ID {
return fmt.Errorf("Azure Local Network Connection still exists.")
}
}
}
return nil
}
const testAccAzureLocalNetworkConnectionBasic = `
resource "azure_local_network_connection" "foo" {
name = "terraform-local-network-connection"
vpn_gateway_address = "10.11.12.13"
address_space_prefixes = ["10.10.10.0/31", "10.10.10.1/31"]
}
`
const testAccAzureLocalNetworkConnectionUpdate = `
resource "azure_local_network_connection" "foo" {
name = "terraform-local-network-connection"
vpn_gateway_address = "10.11.12.14"
address_space_prefixes = ["10.10.10.2/30", "10.10.10.3/30"]
}
`
| builtin/providers/azure/resource_azure_local_network_test.go | 1 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.9945005178451538,
0.21161016821861267,
0.00016450669500045478,
0.0003910216619260609,
0.40075695514678955
] |
{
"id": 5,
"code_window": [
"\t\t\treturn fmt.Errorf(\"No Virtual Network ID is set\")\n",
"\t\t}\n",
"\n",
"\t\tnc, err := vnetClient.GetVirtualNetworkConfiguration()\n",
"\t\tif err != nil {\n",
"\t\t\treturn fmt.Errorf(\"Error retrieving Virtual Network Configuration: %s\", err)\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif management.IsResourceNotFoundError(err) {\n",
"\t\t\t\t// This is desirable - no configuration = no networks\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"file_path": "builtin/providers/azure/resource_azure_virtual_network_test.go",
"type": "add",
"edit_start_line_idx": 187
} | resource "aws_instance" "bar" {
id = "foo"
num = "2"
}
| terraform/test-fixtures/apply-taint/main.tf | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.0001723267196211964,
0.0001723267196211964,
0.0001723267196211964,
0.0001723267196211964,
0
] |
{
"id": 5,
"code_window": [
"\t\t\treturn fmt.Errorf(\"No Virtual Network ID is set\")\n",
"\t\t}\n",
"\n",
"\t\tnc, err := vnetClient.GetVirtualNetworkConfiguration()\n",
"\t\tif err != nil {\n",
"\t\t\treturn fmt.Errorf(\"Error retrieving Virtual Network Configuration: %s\", err)\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif management.IsResourceNotFoundError(err) {\n",
"\t\t\t\t// This is desirable - no configuration = no networks\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"file_path": "builtin/providers/azure/resource_azure_virtual_network_test.go",
"type": "add",
"edit_start_line_idx": 187
} | ---
layout: "docker"
page_title: "Provider: Docker"
sidebar_current: "docs-docker-index"
description: |-
The Docker provider is used to interact with Docker containers and images.
---
# Docker Provider
The Docker provider is used to interact with Docker containers and images.
It uses the Docker API to manage the lifecycle of Docker containers. Because
the Docker provider uses the Docker API, it is immediately compatible not
only with single server Docker but Swarm and any additional Docker-compatible
API hosts.
Use the navigation to the left to read about the available resources.
<div class="alert alert-block alert-info">
<strong>Note:</strong> The Docker provider is new as of Terraform 0.4.
It is ready to be used but many features are still being added. If there
is a Docker feature missing, please report it in the GitHub repo.
</div>
## Example Usage
```
# Configure the Docker provider
provider "docker" {
host = "tcp://127.0.0.1:1234/"
}
# Create a container
resource "docker_container" "foo" {
image = "${docker_image.ubuntu.latest}"
name = "foo"
}
resource "docker_image" "ubuntu" {
name = "ubuntu:latest"
}
```
## Argument Reference
The following arguments are supported:
* `host` - (Required) This is the address to the Docker host. If this is
blank, the `DOCKER_HOST` environment variable will also be read.
* `cert_path` - (Optional) Path to a directory with certificate information
for connecting to the Docker host via TLS. If this is blank, the
`DOCKER_CERT_PATH` will also be checked.
| website/source/docs/providers/docker/index.html.markdown | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.00017606340406928211,
0.00016950913413893431,
0.00016194819181691855,
0.0001707740593701601,
0.000004522329618339427
] |
{
"id": 5,
"code_window": [
"\t\t\treturn fmt.Errorf(\"No Virtual Network ID is set\")\n",
"\t\t}\n",
"\n",
"\t\tnc, err := vnetClient.GetVirtualNetworkConfiguration()\n",
"\t\tif err != nil {\n",
"\t\t\treturn fmt.Errorf(\"Error retrieving Virtual Network Configuration: %s\", err)\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif management.IsResourceNotFoundError(err) {\n",
"\t\t\t\t// This is desirable - no configuration = no networks\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"file_path": "builtin/providers/azure/resource_azure_virtual_network_test.go",
"type": "add",
"edit_start_line_idx": 187
} | # Hello
| config/module/test-fixtures/basic/foo/main.tf | 0 | https://github.com/hashicorp/terraform/commit/42a3800ec2586326012ae83cb957adaa751adc9d | [
0.00017596641555428505,
0.00017596641555428505,
0.00017596641555428505,
0.00017596641555428505,
0
] |
{
"id": 1,
"code_window": [
"}\n",
"\n",
"func rooted(rel, root string) (string, error) {\n",
"\t// The root must not be empty.\n",
"\tif root == \"\" {\n",
"\t\treturn \"\", ErrInvalidFilename\n",
"\t}\n",
"\n",
"\tvar err error\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn \"\", errInvalidFilenameEmpty\n"
],
"file_path": "lib/fs/basicfs.go",
"type": "replace",
"edit_start_line_idx": 97
} | // Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package fs
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
var errNoHome = errors.New("no home directory found - set $HOME (or the platform equivalent)")
func ExpandTilde(path string) (string, error) {
if path == "~" {
return getHomeDir()
}
path = filepath.FromSlash(path)
if !strings.HasPrefix(path, fmt.Sprintf("~%c", PathSeparator)) {
return path, nil
}
home, err := getHomeDir()
if err != nil {
return "", err
}
return filepath.Join(home, path[2:]), nil
}
func getHomeDir() (string, error) {
if runtime.GOOS == "windows" {
// Legacy -- we prioritize this for historical reasons, whereas
// os.UserHomeDir uses %USERPROFILE% always.
home := filepath.Join(os.Getenv("HomeDrive"), os.Getenv("HomePath"))
if home != "" {
return home, nil
}
}
return os.UserHomeDir()
}
var windowsDisallowedCharacters = string([]rune{
'<', '>', ':', '"', '|', '?', '*',
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31,
})
func WindowsInvalidFilename(name string) bool {
// None of the path components should end in space or period, or be a
// reserved name.
// (https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file)
for _, part := range strings.Split(name, `\`) {
if len(part) == 0 {
continue
}
switch part[len(part)-1] {
case ' ', '.':
// Names ending in space or period are not valid.
return true
}
switch part {
case "CON", "PRN", "AUX", "NUL",
"COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
"LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9":
// These reserved names are not valid.
return true
}
}
// The path must not contain any disallowed characters
return strings.ContainsAny(name, windowsDisallowedCharacters)
}
// IsParent compares paths purely lexicographically, meaning it returns false
// if path and parent aren't both absolute or relative.
func IsParent(path, parent string) bool {
if parent == path {
// Twice the same root on windows would not be caught at the end.
return false
}
if filepath.IsAbs(path) != filepath.IsAbs(parent) {
return false
}
if parent == "" || parent == "." {
// The empty string is the parent of everything except the empty
// string and ".". (Avoids panic in the last step.)
return path != "" && path != "."
}
if parent == "/" {
// The root is the parent of everything except itself, which would
// not be caught below.
return path != "/"
}
if parent[len(parent)-1] != PathSeparator {
parent += string(PathSeparator)
}
return strings.HasPrefix(path, parent)
}
func CommonPrefix(first, second string) string {
if filepath.IsAbs(first) != filepath.IsAbs(second) {
// Whatever
return ""
}
firstParts := strings.Split(filepath.Clean(first), string(PathSeparator))
secondParts := strings.Split(filepath.Clean(second), string(PathSeparator))
isAbs := filepath.IsAbs(first) && filepath.IsAbs(second)
count := len(firstParts)
if len(secondParts) < len(firstParts) {
count = len(secondParts)
}
common := make([]string, 0, count)
for i := 0; i < count; i++ {
if firstParts[i] != secondParts[i] {
break
}
common = append(common, firstParts[i])
}
if isAbs {
if runtime.GOOS == "windows" && isVolumeNameOnly(common) {
// Because strings.Split strips out path separators, if we're at the volume name, we end up without a separator
// Wedge an empty element to be joined with.
common = append(common, "")
} else if len(common) == 1 {
// If isAbs on non Windows, first element in both first and second is "", hence joining that returns nothing.
return string(PathSeparator)
}
}
// This should only be true on Windows when drive letters are different or when paths are relative.
// In case of UNC paths we should end up with more than a single element hence joining is fine
if len(common) == 0 {
return ""
}
// This has to be strings.Join, because filepath.Join([]string{"", "", "?", "C:", "Audrius"}...) returns garbage
result := strings.Join(common, string(PathSeparator))
return filepath.Clean(result)
}
func isVolumeNameOnly(parts []string) bool {
isNormalVolumeName := len(parts) == 1 && strings.HasSuffix(parts[0], ":")
isUNCVolumeName := len(parts) == 4 && strings.HasSuffix(parts[3], ":")
return isNormalVolumeName || isUNCVolumeName
}
| lib/fs/util.go | 1 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.9876148700714111,
0.1601121574640274,
0.00017435371410101652,
0.0013957740738987923,
0.3287210762500763
] |
{
"id": 1,
"code_window": [
"}\n",
"\n",
"func rooted(rel, root string) (string, error) {\n",
"\t// The root must not be empty.\n",
"\tif root == \"\" {\n",
"\t\treturn \"\", ErrInvalidFilename\n",
"\t}\n",
"\n",
"\tvar err error\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn \"\", errInvalidFilenameEmpty\n"
],
"file_path": "lib/fs/basicfs.go",
"type": "replace",
"edit_start_line_idx": 97
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package ur
import "golang.org/x/sys/unix"
func memorySize() int64 {
mem, err := unix.SysctlUint64("hw.memsize")
if err != nil {
return 0
}
return int64(mem)
}
| lib/ur/memsize_darwin.go | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.0011329827830195427,
0.0006534461863338947,
0.0001739095605444163,
0.0006534461863338947,
0.00047953659668564796
] |
{
"id": 1,
"code_window": [
"}\n",
"\n",
"func rooted(rel, root string) (string, error) {\n",
"\t// The root must not be empty.\n",
"\tif root == \"\" {\n",
"\t\treturn \"\", ErrInvalidFilename\n",
"\t}\n",
"\n",
"\tvar err error\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn \"\", errInvalidFilenameEmpty\n"
],
"file_path": "lib/fs/basicfs.go",
"type": "replace",
"edit_start_line_idx": 97
} | // Copyright (C) 2017 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package syncthing
import "syscall"
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa379649(v=vs.85).aspx
const securityLocalSystemRID = "S-1-5-18"
func isSuperUser() bool {
tok, err := syscall.OpenCurrentProcessToken()
if err != nil {
l.Debugln("OpenCurrentProcessToken:", err)
return false
}
defer tok.Close()
user, err := tok.GetTokenUser()
if err != nil {
l.Debugln("GetTokenUser:", err)
return false
}
if user.User.Sid == nil {
l.Debugln("sid is nil")
return false
}
sid, err := user.User.Sid.String()
if err != nil {
l.Debugln("Sid.String():", err)
return false
}
l.Debugf("SID: %q", sid)
return sid == securityLocalSystemRID
}
| lib/syncthing/superuser_windows.go | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.0007799385348334908,
0.0003753135388251394,
0.000173218606505543,
0.0001921888324432075,
0.0002495564694982022
] |
{
"id": 1,
"code_window": [
"}\n",
"\n",
"func rooted(rel, root string) (string, error) {\n",
"\t// The root must not be empty.\n",
"\tif root == \"\" {\n",
"\t\treturn \"\", ErrInvalidFilename\n",
"\t}\n",
"\n",
"\tvar err error\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn \"\", errInvalidFilenameEmpty\n"
],
"file_path": "lib/fs/basicfs.go",
"type": "replace",
"edit_start_line_idx": 97
} | // ************************************************************
// This file is automatically generated by genxdr. Do not edit.
// ************************************************************
package protocol
import (
"github.com/calmh/xdr"
)
/*
header Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| magic |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| message Type |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| message Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct header {
unsigned int magic;
int messageType;
int messageLength;
}
*/
func (o header) XDRSize() int {
return 4 + 4 + 4
}
func (o header) MarshalXDR() ([]byte, error) {
buf := make([]byte, o.XDRSize())
m := &xdr.Marshaller{Data: buf}
return buf, o.MarshalXDRInto(m)
}
func (o header) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o header) MarshalXDRInto(m *xdr.Marshaller) error {
m.MarshalUint32(o.magic)
m.MarshalUint32(uint32(o.messageType))
m.MarshalUint32(uint32(o.messageLength))
return m.Error
}
func (o *header) UnmarshalXDR(bs []byte) error {
u := &xdr.Unmarshaller{Data: bs}
return o.UnmarshalXDRFrom(u)
}
func (o *header) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
o.magic = u.UnmarshalUint32()
o.messageType = int32(u.UnmarshalUint32())
o.messageLength = int32(u.UnmarshalUint32())
return u.Error
}
/*
Ping Structure:
(contains no fields)
struct Ping {
}
*/
func (o Ping) XDRSize() int {
return 0
}
func (o Ping) MarshalXDR() ([]byte, error) {
return nil, nil
}
func (o Ping) MustMarshalXDR() []byte {
return nil
}
func (o Ping) MarshalXDRInto(m *xdr.Marshaller) error {
return nil
}
func (o *Ping) UnmarshalXDR(bs []byte) error {
return nil
}
func (o *Ping) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
return nil
}
/*
Pong Structure:
(contains no fields)
struct Pong {
}
*/
func (o Pong) XDRSize() int {
return 0
}
func (o Pong) MarshalXDR() ([]byte, error) {
return nil, nil
}
func (o Pong) MustMarshalXDR() []byte {
return nil
}
func (o Pong) MarshalXDRInto(m *xdr.Marshaller) error {
return nil
}
func (o *Pong) UnmarshalXDR(bs []byte) error {
return nil
}
func (o *Pong) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
return nil
}
/*
JoinRelayRequest Structure:
(contains no fields)
struct JoinRelayRequest {
}
*/
func (o JoinRelayRequest) XDRSize() int {
return 0
}
func (o JoinRelayRequest) MarshalXDR() ([]byte, error) {
return nil, nil
}
func (o JoinRelayRequest) MustMarshalXDR() []byte {
return nil
}
func (o JoinRelayRequest) MarshalXDRInto(m *xdr.Marshaller) error {
return nil
}
func (o *JoinRelayRequest) UnmarshalXDR(bs []byte) error {
return nil
}
func (o *JoinRelayRequest) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
return nil
}
/*
RelayFull Structure:
(contains no fields)
struct RelayFull {
}
*/
func (o RelayFull) XDRSize() int {
return 0
}
func (o RelayFull) MarshalXDR() ([]byte, error) {
return nil, nil
}
func (o RelayFull) MustMarshalXDR() []byte {
return nil
}
func (o RelayFull) MarshalXDRInto(m *xdr.Marshaller) error {
return nil
}
func (o *RelayFull) UnmarshalXDR(bs []byte) error {
return nil
}
func (o *RelayFull) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
return nil
}
/*
JoinSessionRequest Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Key (length + padded data) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct JoinSessionRequest {
opaque Key<32>;
}
*/
func (o JoinSessionRequest) XDRSize() int {
return 4 + len(o.Key) + xdr.Padding(len(o.Key))
}
func (o JoinSessionRequest) MarshalXDR() ([]byte, error) {
buf := make([]byte, o.XDRSize())
m := &xdr.Marshaller{Data: buf}
return buf, o.MarshalXDRInto(m)
}
func (o JoinSessionRequest) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o JoinSessionRequest) MarshalXDRInto(m *xdr.Marshaller) error {
if l := len(o.Key); l > 32 {
return xdr.ElementSizeExceeded("Key", l, 32)
}
m.MarshalBytes(o.Key)
return m.Error
}
func (o *JoinSessionRequest) UnmarshalXDR(bs []byte) error {
u := &xdr.Unmarshaller{Data: bs}
return o.UnmarshalXDRFrom(u)
}
func (o *JoinSessionRequest) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
o.Key = u.UnmarshalBytesMax(32)
return u.Error
}
/*
Response Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Code |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Message (length + padded data) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct Response {
int Code;
string Message<>;
}
*/
func (o Response) XDRSize() int {
return 4 +
4 + len(o.Message) + xdr.Padding(len(o.Message))
}
func (o Response) MarshalXDR() ([]byte, error) {
buf := make([]byte, o.XDRSize())
m := &xdr.Marshaller{Data: buf}
return buf, o.MarshalXDRInto(m)
}
func (o Response) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o Response) MarshalXDRInto(m *xdr.Marshaller) error {
m.MarshalUint32(uint32(o.Code))
m.MarshalString(o.Message)
return m.Error
}
func (o *Response) UnmarshalXDR(bs []byte) error {
u := &xdr.Unmarshaller{Data: bs}
return o.UnmarshalXDRFrom(u)
}
func (o *Response) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
o.Code = int32(u.UnmarshalUint32())
o.Message = u.UnmarshalString()
return u.Error
}
/*
ConnectRequest Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ ID (length + padded data) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct ConnectRequest {
opaque ID<32>;
}
*/
func (o ConnectRequest) XDRSize() int {
return 4 + len(o.ID) + xdr.Padding(len(o.ID))
}
func (o ConnectRequest) MarshalXDR() ([]byte, error) {
buf := make([]byte, o.XDRSize())
m := &xdr.Marshaller{Data: buf}
return buf, o.MarshalXDRInto(m)
}
func (o ConnectRequest) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o ConnectRequest) MarshalXDRInto(m *xdr.Marshaller) error {
if l := len(o.ID); l > 32 {
return xdr.ElementSizeExceeded("ID", l, 32)
}
m.MarshalBytes(o.ID)
return m.Error
}
func (o *ConnectRequest) UnmarshalXDR(bs []byte) error {
u := &xdr.Unmarshaller{Data: bs}
return o.UnmarshalXDRFrom(u)
}
func (o *ConnectRequest) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
o.ID = u.UnmarshalBytesMax(32)
return u.Error
}
/*
SessionInvitation Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ From (length + padded data) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Key (length + padded data) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Address (length + padded data) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 16 zero bits | Port |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Server Socket (V=0 or 1) |V|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct SessionInvitation {
opaque From<32>;
opaque Key<32>;
opaque Address<32>;
unsigned int Port;
bool ServerSocket;
}
*/
func (o SessionInvitation) XDRSize() int {
return 4 + len(o.From) + xdr.Padding(len(o.From)) +
4 + len(o.Key) + xdr.Padding(len(o.Key)) +
4 + len(o.Address) + xdr.Padding(len(o.Address)) + 4 + 4
}
func (o SessionInvitation) MarshalXDR() ([]byte, error) {
buf := make([]byte, o.XDRSize())
m := &xdr.Marshaller{Data: buf}
return buf, o.MarshalXDRInto(m)
}
func (o SessionInvitation) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o SessionInvitation) MarshalXDRInto(m *xdr.Marshaller) error {
if l := len(o.From); l > 32 {
return xdr.ElementSizeExceeded("From", l, 32)
}
m.MarshalBytes(o.From)
if l := len(o.Key); l > 32 {
return xdr.ElementSizeExceeded("Key", l, 32)
}
m.MarshalBytes(o.Key)
if l := len(o.Address); l > 32 {
return xdr.ElementSizeExceeded("Address", l, 32)
}
m.MarshalBytes(o.Address)
m.MarshalUint16(o.Port)
m.MarshalBool(o.ServerSocket)
return m.Error
}
func (o *SessionInvitation) UnmarshalXDR(bs []byte) error {
u := &xdr.Unmarshaller{Data: bs}
return o.UnmarshalXDRFrom(u)
}
func (o *SessionInvitation) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
o.From = u.UnmarshalBytesMax(32)
o.Key = u.UnmarshalBytesMax(32)
o.Address = u.UnmarshalBytesMax(32)
o.Port = u.UnmarshalUint16()
o.ServerSocket = u.UnmarshalBool()
return u.Error
}
| lib/relay/protocol/packets_xdr.go | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.011602074839174747,
0.00044590956531465054,
0.00016692823555786163,
0.00017833443416748196,
0.0016639998648315668
] |
{
"id": 2,
"code_window": [
"\n",
"\tif strings.HasPrefix(file, pathSep+pathSep) {\n",
"\t\t// The relative path may pretend to be an absolute path within\n",
"\t\t// the root, but the double path separator on Windows implies\n",
"\t\t// something else and is out of spec.\n",
"\t\treturn \"\", ErrNotRelative\n",
"\t}\n",
"\n",
"\t// The relative path should be clean from internal dotdots and similar\n",
"\t// funkyness.\n",
"\tfile = filepath.Clean(file)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn \"\", errNotRelative\n"
],
"file_path": "lib/fs/filesystem.go",
"type": "replace",
"edit_start_line_idx": 236
} | // Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package fs
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
var errNoHome = errors.New("no home directory found - set $HOME (or the platform equivalent)")
func ExpandTilde(path string) (string, error) {
if path == "~" {
return getHomeDir()
}
path = filepath.FromSlash(path)
if !strings.HasPrefix(path, fmt.Sprintf("~%c", PathSeparator)) {
return path, nil
}
home, err := getHomeDir()
if err != nil {
return "", err
}
return filepath.Join(home, path[2:]), nil
}
func getHomeDir() (string, error) {
if runtime.GOOS == "windows" {
// Legacy -- we prioritize this for historical reasons, whereas
// os.UserHomeDir uses %USERPROFILE% always.
home := filepath.Join(os.Getenv("HomeDrive"), os.Getenv("HomePath"))
if home != "" {
return home, nil
}
}
return os.UserHomeDir()
}
var windowsDisallowedCharacters = string([]rune{
'<', '>', ':', '"', '|', '?', '*',
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31,
})
func WindowsInvalidFilename(name string) bool {
// None of the path components should end in space or period, or be a
// reserved name.
// (https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file)
for _, part := range strings.Split(name, `\`) {
if len(part) == 0 {
continue
}
switch part[len(part)-1] {
case ' ', '.':
// Names ending in space or period are not valid.
return true
}
switch part {
case "CON", "PRN", "AUX", "NUL",
"COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
"LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9":
// These reserved names are not valid.
return true
}
}
// The path must not contain any disallowed characters
return strings.ContainsAny(name, windowsDisallowedCharacters)
}
// IsParent compares paths purely lexicographically, meaning it returns false
// if path and parent aren't both absolute or relative.
func IsParent(path, parent string) bool {
if parent == path {
// Twice the same root on windows would not be caught at the end.
return false
}
if filepath.IsAbs(path) != filepath.IsAbs(parent) {
return false
}
if parent == "" || parent == "." {
// The empty string is the parent of everything except the empty
// string and ".". (Avoids panic in the last step.)
return path != "" && path != "."
}
if parent == "/" {
// The root is the parent of everything except itself, which would
// not be caught below.
return path != "/"
}
if parent[len(parent)-1] != PathSeparator {
parent += string(PathSeparator)
}
return strings.HasPrefix(path, parent)
}
func CommonPrefix(first, second string) string {
if filepath.IsAbs(first) != filepath.IsAbs(second) {
// Whatever
return ""
}
firstParts := strings.Split(filepath.Clean(first), string(PathSeparator))
secondParts := strings.Split(filepath.Clean(second), string(PathSeparator))
isAbs := filepath.IsAbs(first) && filepath.IsAbs(second)
count := len(firstParts)
if len(secondParts) < len(firstParts) {
count = len(secondParts)
}
common := make([]string, 0, count)
for i := 0; i < count; i++ {
if firstParts[i] != secondParts[i] {
break
}
common = append(common, firstParts[i])
}
if isAbs {
if runtime.GOOS == "windows" && isVolumeNameOnly(common) {
// Because strings.Split strips out path separators, if we're at the volume name, we end up without a separator
// Wedge an empty element to be joined with.
common = append(common, "")
} else if len(common) == 1 {
// If isAbs on non Windows, first element in both first and second is "", hence joining that returns nothing.
return string(PathSeparator)
}
}
// This should only be true on Windows when drive letters are different or when paths are relative.
// In case of UNC paths we should end up with more than a single element hence joining is fine
if len(common) == 0 {
return ""
}
// This has to be strings.Join, because filepath.Join([]string{"", "", "?", "C:", "Audrius"}...) returns garbage
result := strings.Join(common, string(PathSeparator))
return filepath.Clean(result)
}
func isVolumeNameOnly(parts []string) bool {
isNormalVolumeName := len(parts) == 1 && strings.HasSuffix(parts[0], ":")
isUNCVolumeName := len(parts) == 4 && strings.HasSuffix(parts[3], ":")
return isNormalVolumeName || isUNCVolumeName
}
| lib/fs/util.go | 1 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.01049560122191906,
0.002892830176278949,
0.00016717448306735605,
0.002063617343083024,
0.003017094684764743
] |
{
"id": 2,
"code_window": [
"\n",
"\tif strings.HasPrefix(file, pathSep+pathSep) {\n",
"\t\t// The relative path may pretend to be an absolute path within\n",
"\t\t// the root, but the double path separator on Windows implies\n",
"\t\t// something else and is out of spec.\n",
"\t\treturn \"\", ErrNotRelative\n",
"\t}\n",
"\n",
"\t// The relative path should be clean from internal dotdots and similar\n",
"\t// funkyness.\n",
"\tfile = filepath.Clean(file)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn \"\", errNotRelative\n"
],
"file_path": "lib/fs/filesystem.go",
"type": "replace",
"edit_start_line_idx": 236
} | baz
| lib/scanner/testdata/dir3/cfile | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.0001673058868618682,
0.0001673058868618682,
0.0001673058868618682,
0.0001673058868618682,
0
] |
{
"id": 2,
"code_window": [
"\n",
"\tif strings.HasPrefix(file, pathSep+pathSep) {\n",
"\t\t// The relative path may pretend to be an absolute path within\n",
"\t\t// the root, but the double path separator on Windows implies\n",
"\t\t// something else and is out of spec.\n",
"\t\treturn \"\", ErrNotRelative\n",
"\t}\n",
"\n",
"\t// The relative path should be clean from internal dotdots and similar\n",
"\t// funkyness.\n",
"\tfile = filepath.Clean(file)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn \"\", errNotRelative\n"
],
"file_path": "lib/fs/filesystem.go",
"type": "replace",
"edit_start_line_idx": 236
} | // Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package watchaggregator
import (
"github.com/syncthing/syncthing/lib/logger"
)
var (
l = logger.DefaultLogger.NewFacility("watchaggregator", "Filesystem event watcher")
)
| lib/watchaggregator/debug.go | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.00017714127898216248,
0.00017413892783224583,
0.0001711365912342444,
0.00017413892783224583,
0.0000030023438739590347
] |
{
"id": 2,
"code_window": [
"\n",
"\tif strings.HasPrefix(file, pathSep+pathSep) {\n",
"\t\t// The relative path may pretend to be an absolute path within\n",
"\t\t// the root, but the double path separator on Windows implies\n",
"\t\t// something else and is out of spec.\n",
"\t\treturn \"\", ErrNotRelative\n",
"\t}\n",
"\n",
"\t// The relative path should be clean from internal dotdots and similar\n",
"\t// funkyness.\n",
"\tfile = filepath.Clean(file)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn \"\", errNotRelative\n"
],
"file_path": "lib/fs/filesystem.go",
"type": "replace",
"edit_start_line_idx": 236
} | // Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
// +build !linux,!android,!windows
package fs
import "os"
func (*BasicFilesystem) underlyingLstat(name string) (fi os.FileInfo, err error) {
return os.Lstat(name)
}
| lib/fs/basicfs_lstat_regular.go | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.0001753756368998438,
0.00017043345724232495,
0.00016549126303289086,
0.00017043345724232495,
0.000004942186933476478
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\tif strings.HasPrefix(file, \"..\"+pathSep) {\n",
"\t\treturn \"\", ErrNotRelative\n",
"\t}\n",
"\n",
"\tif strings.HasPrefix(file, pathSep) {\n",
"\t\tif file == pathSep {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn \"\", errNotRelative\n"
],
"file_path": "lib/fs/filesystem.go",
"type": "replace",
"edit_start_line_idx": 249
} | // Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package fs
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
var errNoHome = errors.New("no home directory found - set $HOME (or the platform equivalent)")
func ExpandTilde(path string) (string, error) {
if path == "~" {
return getHomeDir()
}
path = filepath.FromSlash(path)
if !strings.HasPrefix(path, fmt.Sprintf("~%c", PathSeparator)) {
return path, nil
}
home, err := getHomeDir()
if err != nil {
return "", err
}
return filepath.Join(home, path[2:]), nil
}
func getHomeDir() (string, error) {
if runtime.GOOS == "windows" {
// Legacy -- we prioritize this for historical reasons, whereas
// os.UserHomeDir uses %USERPROFILE% always.
home := filepath.Join(os.Getenv("HomeDrive"), os.Getenv("HomePath"))
if home != "" {
return home, nil
}
}
return os.UserHomeDir()
}
var windowsDisallowedCharacters = string([]rune{
'<', '>', ':', '"', '|', '?', '*',
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31,
})
func WindowsInvalidFilename(name string) bool {
// None of the path components should end in space or period, or be a
// reserved name.
// (https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file)
for _, part := range strings.Split(name, `\`) {
if len(part) == 0 {
continue
}
switch part[len(part)-1] {
case ' ', '.':
// Names ending in space or period are not valid.
return true
}
switch part {
case "CON", "PRN", "AUX", "NUL",
"COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
"LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9":
// These reserved names are not valid.
return true
}
}
// The path must not contain any disallowed characters
return strings.ContainsAny(name, windowsDisallowedCharacters)
}
// IsParent compares paths purely lexicographically, meaning it returns false
// if path and parent aren't both absolute or relative.
func IsParent(path, parent string) bool {
if parent == path {
// Twice the same root on windows would not be caught at the end.
return false
}
if filepath.IsAbs(path) != filepath.IsAbs(parent) {
return false
}
if parent == "" || parent == "." {
// The empty string is the parent of everything except the empty
// string and ".". (Avoids panic in the last step.)
return path != "" && path != "."
}
if parent == "/" {
// The root is the parent of everything except itself, which would
// not be caught below.
return path != "/"
}
if parent[len(parent)-1] != PathSeparator {
parent += string(PathSeparator)
}
return strings.HasPrefix(path, parent)
}
func CommonPrefix(first, second string) string {
if filepath.IsAbs(first) != filepath.IsAbs(second) {
// Whatever
return ""
}
firstParts := strings.Split(filepath.Clean(first), string(PathSeparator))
secondParts := strings.Split(filepath.Clean(second), string(PathSeparator))
isAbs := filepath.IsAbs(first) && filepath.IsAbs(second)
count := len(firstParts)
if len(secondParts) < len(firstParts) {
count = len(secondParts)
}
common := make([]string, 0, count)
for i := 0; i < count; i++ {
if firstParts[i] != secondParts[i] {
break
}
common = append(common, firstParts[i])
}
if isAbs {
if runtime.GOOS == "windows" && isVolumeNameOnly(common) {
// Because strings.Split strips out path separators, if we're at the volume name, we end up without a separator
// Wedge an empty element to be joined with.
common = append(common, "")
} else if len(common) == 1 {
// If isAbs on non Windows, first element in both first and second is "", hence joining that returns nothing.
return string(PathSeparator)
}
}
// This should only be true on Windows when drive letters are different or when paths are relative.
// In case of UNC paths we should end up with more than a single element hence joining is fine
if len(common) == 0 {
return ""
}
// This has to be strings.Join, because filepath.Join([]string{"", "", "?", "C:", "Audrius"}...) returns garbage
result := strings.Join(common, string(PathSeparator))
return filepath.Clean(result)
}
func isVolumeNameOnly(parts []string) bool {
isNormalVolumeName := len(parts) == 1 && strings.HasSuffix(parts[0], ":")
isUNCVolumeName := len(parts) == 4 && strings.HasSuffix(parts[3], ":")
return isNormalVolumeName || isUNCVolumeName
}
| lib/fs/util.go | 1 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.0004098998906556517,
0.00021214786102063954,
0.00016612444596830755,
0.00017864241090137511,
0.00007421974441967905
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\tif strings.HasPrefix(file, \"..\"+pathSep) {\n",
"\t\treturn \"\", ErrNotRelative\n",
"\t}\n",
"\n",
"\tif strings.HasPrefix(file, pathSep) {\n",
"\t\tif file == pathSep {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn \"\", errNotRelative\n"
],
"file_path": "lib/fs/filesystem.go",
"type": "replace",
"edit_start_line_idx": 249
} | // Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
//go:generate go run ../../proto/scripts/protofmt.go database.proto
//go:generate protoc -I ../../ -I . --gogofast_out=. database.proto
package main
import (
"log"
"sort"
"time"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/util"
)
type clock interface {
Now() time.Time
}
type defaultClock struct{}
func (defaultClock) Now() time.Time {
return time.Now()
}
type database interface {
put(key string, rec DatabaseRecord) error
merge(key string, addrs []DatabaseAddress, seen int64) error
get(key string) (DatabaseRecord, error)
}
type levelDBStore struct {
db *leveldb.DB
inbox chan func()
stop chan struct{}
clock clock
marshalBuf []byte
}
func newLevelDBStore(dir string) (*levelDBStore, error) {
db, err := leveldb.OpenFile(dir, levelDBOptions)
if err != nil {
return nil, err
}
return &levelDBStore{
db: db,
inbox: make(chan func(), 16),
stop: make(chan struct{}),
clock: defaultClock{},
}, nil
}
func (s *levelDBStore) put(key string, rec DatabaseRecord) error {
t0 := time.Now()
defer func() {
databaseOperationSeconds.WithLabelValues(dbOpPut).Observe(time.Since(t0).Seconds())
}()
rc := make(chan error)
s.inbox <- func() {
size := rec.Size()
if len(s.marshalBuf) < size {
s.marshalBuf = make([]byte, size)
}
n, _ := rec.MarshalTo(s.marshalBuf)
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
}
err := <-rc
if err != nil {
databaseOperations.WithLabelValues(dbOpPut, dbResError).Inc()
} else {
databaseOperations.WithLabelValues(dbOpPut, dbResSuccess).Inc()
}
return err
}
func (s *levelDBStore) merge(key string, addrs []DatabaseAddress, seen int64) error {
t0 := time.Now()
defer func() {
databaseOperationSeconds.WithLabelValues(dbOpMerge).Observe(time.Since(t0).Seconds())
}()
rc := make(chan error)
newRec := DatabaseRecord{
Addresses: addrs,
Seen: seen,
}
s.inbox <- func() {
// grab the existing record
oldRec, err := s.get(key)
if err != nil {
// "not found" is not an error from get, so this is serious
// stuff only
rc <- err
return
}
newRec = merge(newRec, oldRec)
// We replicate s.put() functionality here ourselves instead of
// calling it because we want to serialize our get above together
// with the put in the same function.
size := newRec.Size()
if len(s.marshalBuf) < size {
s.marshalBuf = make([]byte, size)
}
n, _ := newRec.MarshalTo(s.marshalBuf)
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
}
err := <-rc
if err != nil {
databaseOperations.WithLabelValues(dbOpMerge, dbResError).Inc()
} else {
databaseOperations.WithLabelValues(dbOpMerge, dbResSuccess).Inc()
}
return err
}
func (s *levelDBStore) get(key string) (DatabaseRecord, error) {
t0 := time.Now()
defer func() {
databaseOperationSeconds.WithLabelValues(dbOpGet).Observe(time.Since(t0).Seconds())
}()
keyBs := []byte(key)
val, err := s.db.Get(keyBs, nil)
if err == leveldb.ErrNotFound {
databaseOperations.WithLabelValues(dbOpGet, dbResNotFound).Inc()
return DatabaseRecord{}, nil
}
if err != nil {
databaseOperations.WithLabelValues(dbOpGet, dbResError).Inc()
return DatabaseRecord{}, err
}
var rec DatabaseRecord
if err := rec.Unmarshal(val); err != nil {
databaseOperations.WithLabelValues(dbOpGet, dbResUnmarshalError).Inc()
return DatabaseRecord{}, nil
}
rec.Addresses = expire(rec.Addresses, s.clock.Now().UnixNano())
databaseOperations.WithLabelValues(dbOpGet, dbResSuccess).Inc()
return rec, nil
}
func (s *levelDBStore) Serve() {
t := time.NewTimer(0)
defer t.Stop()
defer s.db.Close()
// Start the statistics serve routine. It will exit with us when
// statisticsTrigger is closed.
statisticsTrigger := make(chan struct{})
statisticsDone := make(chan struct{})
go s.statisticsServe(statisticsTrigger, statisticsDone)
loop:
for {
select {
case fn := <-s.inbox:
// Run function in serialized order.
fn()
case <-t.C:
// Trigger the statistics routine to do its thing in the
// background.
statisticsTrigger <- struct{}{}
case <-statisticsDone:
// The statistics routine is done with one iteratation, schedule
// the next.
t.Reset(databaseStatisticsInterval)
case <-s.stop:
// We're done.
close(statisticsTrigger)
break loop
}
}
// Also wait for statisticsServe to return
<-statisticsDone
}
func (s *levelDBStore) statisticsServe(trigger <-chan struct{}, done chan<- struct{}) {
defer close(done)
for range trigger {
t0 := time.Now()
nowNanos := t0.UnixNano()
cutoff24h := t0.Add(-24 * time.Hour).UnixNano()
cutoff1w := t0.Add(-7 * 24 * time.Hour).UnixNano()
cutoff2Mon := t0.Add(-60 * 24 * time.Hour).UnixNano()
current, last24h, last1w, inactive, errors := 0, 0, 0, 0, 0
iter := s.db.NewIterator(&util.Range{}, nil)
for iter.Next() {
// Attempt to unmarshal the record and count the
// failure if there's something wrong with it.
var rec DatabaseRecord
if err := rec.Unmarshal(iter.Value()); err != nil {
errors++
continue
}
// If there are addresses that have not expired it's a current
// record, otherwise account it based on when it was last seen
// (last 24 hours or last week) or finally as inactice.
switch {
case len(expire(rec.Addresses, nowNanos)) > 0:
current++
case rec.Seen > cutoff24h:
last24h++
case rec.Seen > cutoff1w:
last1w++
case rec.Seen > cutoff2Mon:
inactive++
case rec.Missed < cutoff2Mon:
// It hasn't been seen lately and we haven't recorded
// someone asking for this device in a long time either;
// delete the record.
if err := s.db.Delete(iter.Key(), nil); err != nil {
databaseOperations.WithLabelValues(dbOpDelete, dbResError).Inc()
} else {
databaseOperations.WithLabelValues(dbOpDelete, dbResSuccess).Inc()
}
default:
inactive++
}
}
iter.Release()
databaseKeys.WithLabelValues("current").Set(float64(current))
databaseKeys.WithLabelValues("last24h").Set(float64(last24h))
databaseKeys.WithLabelValues("last1w").Set(float64(last1w))
databaseKeys.WithLabelValues("inactive").Set(float64(inactive))
databaseKeys.WithLabelValues("error").Set(float64(errors))
databaseStatisticsSeconds.Set(time.Since(t0).Seconds())
// Signal that we are done and can be scheduled again.
done <- struct{}{}
}
}
func (s *levelDBStore) Stop() {
close(s.stop)
}
// merge returns the merged result of the two database records a and b. The
// result is the union of the two address sets, with the newer expiry time
// chosen for any duplicates.
func merge(a, b DatabaseRecord) DatabaseRecord {
// Both lists must be sorted for this to work.
if !sort.IsSorted(databaseAddressOrder(a.Addresses)) {
log.Println("Warning: bug: addresses not correctly sorted in merge")
a.Addresses = sortedAddressCopy(a.Addresses)
}
if !sort.IsSorted(databaseAddressOrder(b.Addresses)) {
// no warning because this is the side we read from disk and it may
// legitimately predate correct sorting.
b.Addresses = sortedAddressCopy(b.Addresses)
}
res := DatabaseRecord{
Addresses: make([]DatabaseAddress, 0, len(a.Addresses)+len(b.Addresses)),
Seen: a.Seen,
}
if b.Seen > a.Seen {
res.Seen = b.Seen
}
aIdx := 0
bIdx := 0
aAddrs := a.Addresses
bAddrs := b.Addresses
loop:
for {
switch {
case aIdx == len(aAddrs) && bIdx == len(bAddrs):
// both lists are exhausted, we are done
break loop
case aIdx == len(aAddrs):
// a is exhausted, pick from b and continue
res.Addresses = append(res.Addresses, bAddrs[bIdx])
bIdx++
continue
case bIdx == len(bAddrs):
// b is exhausted, pick from a and continue
res.Addresses = append(res.Addresses, aAddrs[aIdx])
aIdx++
continue
}
// We have values left on both sides.
aVal := aAddrs[aIdx]
bVal := bAddrs[bIdx]
switch {
case aVal.Address == bVal.Address:
// update for same address, pick newer
if aVal.Expires > bVal.Expires {
res.Addresses = append(res.Addresses, aVal)
} else {
res.Addresses = append(res.Addresses, bVal)
}
aIdx++
bIdx++
case aVal.Address < bVal.Address:
// a is smallest, pick it and continue
res.Addresses = append(res.Addresses, aVal)
aIdx++
default:
// b is smallest, pick it and continue
res.Addresses = append(res.Addresses, bVal)
bIdx++
}
}
return res
}
// expire returns the list of addresses after removing expired entries.
// Expiration happen in place, so the slice given as the parameter is
// destroyed. Internal order is not preserved.
func expire(addrs []DatabaseAddress, now int64) []DatabaseAddress {
i := 0
for i < len(addrs) {
if addrs[i].Expires < now {
// This item is expired. Replace it with the last in the list
// (noop if we are at the last item).
addrs[i] = addrs[len(addrs)-1]
// Wipe the last item of the list to release references to
// strings and stuff.
addrs[len(addrs)-1] = DatabaseAddress{}
// Shorten the slice.
addrs = addrs[:len(addrs)-1]
continue
}
i++
}
return addrs
}
func sortedAddressCopy(addrs []DatabaseAddress) []DatabaseAddress {
sorted := make([]DatabaseAddress, len(addrs))
copy(sorted, addrs)
sort.Sort(databaseAddressOrder(sorted))
return sorted
}
type databaseAddressOrder []DatabaseAddress
func (s databaseAddressOrder) Less(a, b int) bool {
return s[a].Address < s[b].Address
}
func (s databaseAddressOrder) Swap(a, b int) {
s[a], s[b] = s[b], s[a]
}
func (s databaseAddressOrder) Len() int {
return len(s)
}
| cmd/stdiscosrv/database.go | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.0001775736891431734,
0.00017110827320721,
0.0001615809160284698,
0.0001725839829305187,
0.00000397270332541666
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\tif strings.HasPrefix(file, \"..\"+pathSep) {\n",
"\t\treturn \"\", ErrNotRelative\n",
"\t}\n",
"\n",
"\tif strings.HasPrefix(file, pathSep) {\n",
"\t\tif file == pathSep {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn \"\", errNotRelative\n"
],
"file_path": "lib/fs/filesystem.go",
"type": "replace",
"edit_start_line_idx": 249
} | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: lib/config/pullorder.proto
package config
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type PullOrder int32
const (
PullOrderRandom PullOrder = 0
PullOrderAlphabetic PullOrder = 1
PullOrderSmallestFirst PullOrder = 2
PullOrderLargestFirst PullOrder = 3
PullOrderOldestFirst PullOrder = 4
PullOrderNewestFirst PullOrder = 5
)
var PullOrder_name = map[int32]string{
0: "PULL_ORDER_RANDOM",
1: "PULL_ORDER_ALPHABETIC",
2: "PULL_ORDER_SMALLEST_FIRST",
3: "PULL_ORDER_LARGEST_FIRST",
4: "PULL_ORDER_OLDEST_FIRST",
5: "PULL_ORDER_NEWEST_FIRST",
}
var PullOrder_value = map[string]int32{
"PULL_ORDER_RANDOM": 0,
"PULL_ORDER_ALPHABETIC": 1,
"PULL_ORDER_SMALLEST_FIRST": 2,
"PULL_ORDER_LARGEST_FIRST": 3,
"PULL_ORDER_OLDEST_FIRST": 4,
"PULL_ORDER_NEWEST_FIRST": 5,
}
func (PullOrder) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_2fa3f5222a7755bf, []int{0}
}
func init() {
proto.RegisterEnum("config.PullOrder", PullOrder_name, PullOrder_value)
}
func init() { proto.RegisterFile("lib/config/pullorder.proto", fileDescriptor_2fa3f5222a7755bf) }
var fileDescriptor_2fa3f5222a7755bf = []byte{
// 343 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xca, 0xc9, 0x4c, 0xd2,
0x4f, 0xce, 0xcf, 0x4b, 0xcb, 0x4c, 0xd7, 0x2f, 0x28, 0xcd, 0xc9, 0xc9, 0x2f, 0x4a, 0x49, 0x2d,
0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83, 0x88, 0x4b, 0x29, 0x17, 0xa5, 0x16, 0xe4,
0x17, 0xeb, 0x83, 0x05, 0x93, 0x4a, 0xd3, 0xf4, 0xd3, 0xf3, 0xd3, 0xf3, 0xc1, 0x1c, 0x30, 0x0b,
0xa2, 0x58, 0xeb, 0x32, 0x13, 0x17, 0x67, 0x40, 0x69, 0x4e, 0x8e, 0x3f, 0xc8, 0x00, 0x21, 0x2d,
0x2e, 0xc1, 0x80, 0x50, 0x1f, 0x9f, 0x78, 0xff, 0x20, 0x17, 0xd7, 0xa0, 0xf8, 0x20, 0x47, 0x3f,
0x17, 0x7f, 0x5f, 0x01, 0x06, 0x29, 0xe1, 0xae, 0xb9, 0x0a, 0xfc, 0x70, 0x55, 0x41, 0x89, 0x79,
0x29, 0xf9, 0xb9, 0x42, 0x46, 0x5c, 0xa2, 0x48, 0x6a, 0x1d, 0x7d, 0x02, 0x3c, 0x1c, 0x9d, 0x5c,
0x43, 0x3c, 0x9d, 0x05, 0x18, 0xa5, 0xc4, 0xbb, 0xe6, 0x2a, 0x08, 0xc3, 0xd5, 0x3b, 0xe6, 0x14,
0x64, 0x24, 0x26, 0xa5, 0x96, 0x64, 0x26, 0x0b, 0x59, 0x72, 0x49, 0x22, 0xe9, 0x09, 0xf6, 0x75,
0xf4, 0xf1, 0x71, 0x0d, 0x0e, 0x89, 0x77, 0xf3, 0x0c, 0x0a, 0x0e, 0x11, 0x60, 0x92, 0x92, 0xea,
0x9a, 0xab, 0x20, 0x06, 0xd7, 0x17, 0x9c, 0x9b, 0x98, 0x93, 0x93, 0x5a, 0x5c, 0xe2, 0x96, 0x59,
0x54, 0x5c, 0x22, 0x64, 0xce, 0x25, 0x81, 0xa4, 0xd5, 0xc7, 0x31, 0xc8, 0x1d, 0xa1, 0x93, 0x59,
0x4a, 0xb2, 0x6b, 0xae, 0x82, 0x28, 0x5c, 0xa7, 0x4f, 0x62, 0x51, 0x3a, 0x5c, 0xa3, 0x29, 0x97,
0x38, 0x92, 0x46, 0x7f, 0x1f, 0x17, 0x84, 0x3e, 0x16, 0x29, 0x89, 0xae, 0xb9, 0x0a, 0x22, 0x70,
0x7d, 0xfe, 0x39, 0x29, 0x38, 0xb4, 0xf9, 0xb9, 0x86, 0x23, 0xb4, 0xb1, 0xa2, 0x69, 0xf3, 0x4b,
0x2d, 0x87, 0x69, 0x93, 0x62, 0x59, 0xb1, 0x44, 0x8e, 0xc1, 0xc9, 0xfd, 0xc4, 0x43, 0x39, 0x86,
0x0b, 0x0f, 0xe5, 0x18, 0x5e, 0x3c, 0x92, 0x63, 0x98, 0xf0, 0x58, 0x8e, 0x61, 0xc1, 0x63, 0x39,
0xc6, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xd2, 0x4c, 0xcf, 0x2c, 0xc9, 0x28,
0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x2f, 0xae, 0xcc, 0x4b, 0x2e, 0xc9, 0xc8, 0xcc, 0x4b, 0x47,
0x62, 0x21, 0xe2, 0x36, 0x89, 0x0d, 0x1c, 0x4b, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x34,
0x1f, 0xbc, 0x31, 0xf0, 0x01, 0x00, 0x00,
}
| lib/config/pullorder.pb.go | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.00017462407413404435,
0.00016841541219037026,
0.00016403163317590952,
0.00016840350872371346,
0.000003178015958837932
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\tif strings.HasPrefix(file, \"..\"+pathSep) {\n",
"\t\treturn \"\", ErrNotRelative\n",
"\t}\n",
"\n",
"\tif strings.HasPrefix(file, pathSep) {\n",
"\t\tif file == pathSep {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn \"\", errNotRelative\n"
],
"file_path": "lib/fs/filesystem.go",
"type": "replace",
"edit_start_line_idx": 249
} | // Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package osutil_test
import (
"io/ioutil"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/osutil"
)
func TestTraversesSymlink(t *testing.T) {
tmpDir, err := ioutil.TempDir(".", ".test-TraversesSymlink-")
if err != nil {
panic("Failed to create temporary testing dir")
}
defer os.RemoveAll(tmpDir)
testFs := fs.NewFilesystem(fs.FilesystemTypeBasic, tmpDir)
testFs.MkdirAll("a/b/c", 0755)
if err = fs.DebugSymlinkForTestsOnly(testFs, testFs, filepath.Join("a", "b"), filepath.Join("a", "l")); err != nil {
if runtime.GOOS == "windows" {
t.Skip("Symlinks aren't working")
}
t.Fatal(err)
}
// a/l -> b, so a/l/c should resolve by normal stat
info, err := testFs.Lstat("a/l/c")
if err != nil {
t.Fatal("unexpected error", err)
}
if !info.IsDir() {
t.Fatal("error in setup, a/l/c should be a directory")
}
cases := []struct {
name string
traverses bool
}{
// Exist
{".", false},
{"a", false},
{"a/b", false},
{"a/b/c", false},
// Don't exist
{"x", false},
{"a/x", false},
{"a/b/x", false},
{"a/x/c", false},
// Symlink or behind symlink
{"a/l", true},
{"a/l/c", true},
// Non-existing behind a symlink
{"a/l/x", true},
}
for _, tc := range cases {
if res := osutil.TraversesSymlink(testFs, tc.name); tc.traverses == (res == nil) {
t.Errorf("TraversesSymlink(%q) = %v, should be %v", tc.name, res, tc.traverses)
}
}
}
func TestIssue4875(t *testing.T) {
tmpDir, err := ioutil.TempDir("", ".test-Issue4875-")
if err != nil {
panic("Failed to create temporary testing dir")
}
defer os.RemoveAll(tmpDir)
testFs := fs.NewFilesystem(fs.FilesystemTypeBasic, tmpDir)
testFs.MkdirAll(filepath.Join("a", "b", "c"), 0755)
if err = fs.DebugSymlinkForTestsOnly(testFs, testFs, filepath.Join("a", "b"), filepath.Join("a", "l")); err != nil {
if runtime.GOOS == "windows" {
t.Skip("Symlinks aren't working")
}
t.Fatal(err)
}
// a/l -> b, so a/l/c should resolve by normal stat
info, err := testFs.Lstat("a/l/c")
if err != nil {
t.Fatal("unexpected error", err)
}
if !info.IsDir() {
t.Fatal("error in setup, a/l/c should be a directory")
}
testFs = fs.NewFilesystem(fs.FilesystemTypeBasic, filepath.Join(tmpDir, "a/l"))
if err := osutil.TraversesSymlink(testFs, "."); err != nil {
t.Error(`TraversesSymlink on filesystem with symlink at root returned error for ".":`, err)
}
}
var traversesSymlinkResult error
func BenchmarkTraversesSymlink(b *testing.B) {
os.RemoveAll("testdata")
defer os.RemoveAll("testdata")
fs := fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata")
fs.MkdirAll("a/b/c", 0755)
for i := 0; i < b.N; i++ {
traversesSymlinkResult = osutil.TraversesSymlink(fs, "a/b/c")
}
b.ReportAllocs()
}
| lib/osutil/traversessymlink_test.go | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.00017829204443842173,
0.0001700056454865262,
0.00016455329023301601,
0.0001703132875263691,
0.0000032501282021257794
] |
{
"id": 6,
"code_window": [
"\t\"path/filepath\"\n",
"\t\"runtime\"\n",
"\t\"strings\"\n",
")\n",
"\n",
"var errNoHome = errors.New(\"no home directory found - set $HOME (or the platform equivalent)\")\n",
"\n",
"func ExpandTilde(path string) (string, error) {\n",
"\tif path == \"~\" {\n",
"\t\treturn getHomeDir()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "lib/fs/util.go",
"type": "replace",
"edit_start_line_idx": 17
} | // Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package fs
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
var errNoHome = errors.New("no home directory found - set $HOME (or the platform equivalent)")
func ExpandTilde(path string) (string, error) {
if path == "~" {
return getHomeDir()
}
path = filepath.FromSlash(path)
if !strings.HasPrefix(path, fmt.Sprintf("~%c", PathSeparator)) {
return path, nil
}
home, err := getHomeDir()
if err != nil {
return "", err
}
return filepath.Join(home, path[2:]), nil
}
func getHomeDir() (string, error) {
if runtime.GOOS == "windows" {
// Legacy -- we prioritize this for historical reasons, whereas
// os.UserHomeDir uses %USERPROFILE% always.
home := filepath.Join(os.Getenv("HomeDrive"), os.Getenv("HomePath"))
if home != "" {
return home, nil
}
}
return os.UserHomeDir()
}
var windowsDisallowedCharacters = string([]rune{
'<', '>', ':', '"', '|', '?', '*',
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31,
})
func WindowsInvalidFilename(name string) bool {
// None of the path components should end in space or period, or be a
// reserved name.
// (https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file)
for _, part := range strings.Split(name, `\`) {
if len(part) == 0 {
continue
}
switch part[len(part)-1] {
case ' ', '.':
// Names ending in space or period are not valid.
return true
}
switch part {
case "CON", "PRN", "AUX", "NUL",
"COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
"LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9":
// These reserved names are not valid.
return true
}
}
// The path must not contain any disallowed characters
return strings.ContainsAny(name, windowsDisallowedCharacters)
}
// IsParent compares paths purely lexicographically, meaning it returns false
// if path and parent aren't both absolute or relative.
func IsParent(path, parent string) bool {
if parent == path {
// Twice the same root on windows would not be caught at the end.
return false
}
if filepath.IsAbs(path) != filepath.IsAbs(parent) {
return false
}
if parent == "" || parent == "." {
// The empty string is the parent of everything except the empty
// string and ".". (Avoids panic in the last step.)
return path != "" && path != "."
}
if parent == "/" {
// The root is the parent of everything except itself, which would
// not be caught below.
return path != "/"
}
if parent[len(parent)-1] != PathSeparator {
parent += string(PathSeparator)
}
return strings.HasPrefix(path, parent)
}
func CommonPrefix(first, second string) string {
if filepath.IsAbs(first) != filepath.IsAbs(second) {
// Whatever
return ""
}
firstParts := strings.Split(filepath.Clean(first), string(PathSeparator))
secondParts := strings.Split(filepath.Clean(second), string(PathSeparator))
isAbs := filepath.IsAbs(first) && filepath.IsAbs(second)
count := len(firstParts)
if len(secondParts) < len(firstParts) {
count = len(secondParts)
}
common := make([]string, 0, count)
for i := 0; i < count; i++ {
if firstParts[i] != secondParts[i] {
break
}
common = append(common, firstParts[i])
}
if isAbs {
if runtime.GOOS == "windows" && isVolumeNameOnly(common) {
// Because strings.Split strips out path separators, if we're at the volume name, we end up without a separator
// Wedge an empty element to be joined with.
common = append(common, "")
} else if len(common) == 1 {
// If isAbs on non Windows, first element in both first and second is "", hence joining that returns nothing.
return string(PathSeparator)
}
}
// This should only be true on Windows when drive letters are different or when paths are relative.
// In case of UNC paths we should end up with more than a single element hence joining is fine
if len(common) == 0 {
return ""
}
// This has to be strings.Join, because filepath.Join([]string{"", "", "?", "C:", "Audrius"}...) returns garbage
result := strings.Join(common, string(PathSeparator))
return filepath.Clean(result)
}
func isVolumeNameOnly(parts []string) bool {
isNormalVolumeName := len(parts) == 1 && strings.HasSuffix(parts[0], ":")
isUNCVolumeName := len(parts) == 4 && strings.HasSuffix(parts[3], ":")
return isNormalVolumeName || isUNCVolumeName
}
| lib/fs/util.go | 1 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.9981372356414795,
0.1198204979300499,
0.00016966309340205044,
0.0015962253091856837,
0.3186633288860321
] |
{
"id": 6,
"code_window": [
"\t\"path/filepath\"\n",
"\t\"runtime\"\n",
"\t\"strings\"\n",
")\n",
"\n",
"var errNoHome = errors.New(\"no home directory found - set $HOME (or the platform equivalent)\")\n",
"\n",
"func ExpandTilde(path string) (string, error) {\n",
"\tif path == \"~\" {\n",
"\t\treturn getHomeDir()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "lib/fs/util.go",
"type": "replace",
"edit_start_line_idx": 17
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
// +build integration
package integration
import (
"log"
"os"
"testing"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/rc"
)
func TestSymlinks(t *testing.T) {
if !symlinksSupported() {
t.Skip("symlinks unsupported")
}
// Use no versioning
id, _ := protocol.DeviceIDFromString(id2)
cfg, _, _ := config.Load("h2/config.xml", id, events.NoopLogger)
fld := cfg.Folders()["default"]
fld.Versioning = config.VersioningConfiguration{}
cfg.SetFolder(fld)
os.Rename("h2/config.xml", "h2/config.xml.orig")
defer os.Rename("h2/config.xml.orig", "h2/config.xml")
cfg.Save()
testSymlinks(t)
}
func TestSymlinksSimpleVersioning(t *testing.T) {
if !symlinksSupported() {
t.Skip("symlinks unsupported")
}
// Use simple versioning
id, _ := protocol.DeviceIDFromString(id2)
cfg, _, _ := config.Load("h2/config.xml", id, events.NoopLogger)
fld := cfg.Folders()["default"]
fld.Versioning = config.VersioningConfiguration{
Type: "simple",
Params: map[string]string{"keep": "5"},
}
cfg.SetFolder(fld)
os.Rename("h2/config.xml", "h2/config.xml.orig")
defer os.Rename("h2/config.xml.orig", "h2/config.xml")
cfg.Save()
testSymlinks(t)
}
func TestSymlinksStaggeredVersioning(t *testing.T) {
if !symlinksSupported() {
t.Skip("symlinks unsupported")
}
// Use staggered versioning
id, _ := protocol.DeviceIDFromString(id2)
cfg, _, _ := config.Load("h2/config.xml", id, events.NoopLogger)
fld := cfg.Folders()["default"]
fld.Versioning = config.VersioningConfiguration{
Type: "staggered",
}
cfg.SetFolder(fld)
os.Rename("h2/config.xml", "h2/config.xml.orig")
defer os.Rename("h2/config.xml.orig", "h2/config.xml")
cfg.Save()
testSymlinks(t)
}
func testSymlinks(t *testing.T) {
log.Println("Cleaning...")
err := removeAll("s1", "s2", "h1/index*", "h2/index*")
if err != nil {
t.Fatal(err)
}
log.Println("Generating files...")
err = generateFiles("s1", 100, 20, "../LICENSE")
if err != nil {
t.Fatal(err)
}
// A file that we will replace with a symlink later
fd, err := os.Create("s1/fileToReplace")
if err != nil {
t.Fatal(err)
}
fd.Close()
// A directory that we will replace with a symlink later
err = os.Mkdir("s1/dirToReplace", 0755)
if err != nil {
t.Fatal(err)
}
// A file and a symlink to that file
fd, err = os.Create("s1/file")
if err != nil {
t.Fatal(err)
}
fd.Close()
err = os.Symlink("file", "s1/fileLink")
if err != nil {
log.Fatal(err)
}
// A directory and a symlink to that directory
err = os.Mkdir("s1/dir", 0755)
if err != nil {
t.Fatal(err)
}
err = os.Symlink("dir", "s1/dirLink")
if err != nil {
log.Fatal(err)
}
// A link to something in the repo that does not exist
err = os.Symlink("does/not/exist", "s1/noneLink")
if err != nil {
log.Fatal(err)
}
// A link we will replace with a file later
err = os.Symlink("does/not/exist", "s1/repFileLink")
if err != nil {
log.Fatal(err)
}
// A link we will replace with a directory later
err = os.Symlink("does/not/exist", "s1/repDirLink")
if err != nil {
log.Fatal(err)
}
// A link we will remove later
err = os.Symlink("does/not/exist", "s1/removeLink")
if err != nil {
log.Fatal(err)
}
// Verify that the files and symlinks sync to the other side
sender := startInstance(t, 1)
defer checkedStop(t, sender)
receiver := startInstance(t, 2)
defer checkedStop(t, receiver)
sender.ResumeAll()
receiver.ResumeAll()
log.Println("Syncing...")
rc.AwaitSync("default", sender, receiver)
log.Println("Comparing directories...")
err = compareDirectories("s1", "s2")
if err != nil {
t.Fatal(err)
}
log.Println("Making some changes...")
// Remove one symlink
err = os.Remove("s1/fileLink")
if err != nil {
log.Fatal(err)
}
// Change the target of another
err = os.Remove("s1/dirLink")
if err != nil {
log.Fatal(err)
}
err = os.Symlink("file", "s1/dirLink")
if err != nil {
log.Fatal(err)
}
// Replace one with a file
err = os.Remove("s1/repFileLink")
if err != nil {
log.Fatal(err)
}
fd, err = os.Create("s1/repFileLink")
if err != nil {
log.Fatal(err)
}
fd.Close()
// Replace one with a directory
err = os.Remove("s1/repDirLink")
if err != nil {
log.Fatal(err)
}
err = os.Mkdir("s1/repDirLink", 0755)
if err != nil {
log.Fatal(err)
}
// Replace a file with a symlink
err = os.Remove("s1/fileToReplace")
if err != nil {
log.Fatal(err)
}
err = os.Symlink("somewhere/non/existent", "s1/fileToReplace")
if err != nil {
log.Fatal(err)
}
// Replace a directory with a symlink
err = os.RemoveAll("s1/dirToReplace")
if err != nil {
log.Fatal(err)
}
err = os.Symlink("somewhere/non/existent", "s1/dirToReplace")
if err != nil {
log.Fatal(err)
}
// Remove a broken symlink
err = os.Remove("s1/removeLink")
if err != nil {
log.Fatal(err)
}
// Sync these changes and recheck
log.Println("Syncing...")
if err := sender.Rescan("default"); err != nil {
t.Fatal(err)
}
rc.AwaitSync("default", sender, receiver)
log.Println("Comparing directories...")
err = compareDirectories("s1", "s2")
if err != nil {
t.Fatal(err)
}
}
| test/symlink_test.go | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.00024013016081880778,
0.00017207692144438624,
0.00016566034173592925,
0.00016948346456047148,
0.000013624599887407385
] |
{
"id": 6,
"code_window": [
"\t\"path/filepath\"\n",
"\t\"runtime\"\n",
"\t\"strings\"\n",
")\n",
"\n",
"var errNoHome = errors.New(\"no home directory found - set $HOME (or the platform equivalent)\")\n",
"\n",
"func ExpandTilde(path string) (string, error) {\n",
"\tif path == \"~\" {\n",
"\t\treturn getHomeDir()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "lib/fs/util.go",
"type": "replace",
"edit_start_line_idx": 17
} | // Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
package client
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"sort"
"time"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/relay/protocol"
)
type dynamicClient struct {
commonClient
pooladdr *url.URL
certs []tls.Certificate
timeout time.Duration
client RelayClient
}
func newDynamicClient(uri *url.URL, certs []tls.Certificate, invitations chan protocol.SessionInvitation, timeout time.Duration) RelayClient {
c := &dynamicClient{
pooladdr: uri,
certs: certs,
timeout: timeout,
}
c.commonClient = newCommonClient(invitations, c.serve, fmt.Sprintf("dynamicClient@%p", c))
return c
}
func (c *dynamicClient) serve(ctx context.Context) error {
uri := *c.pooladdr
// Trim off the `dynamic+` prefix
uri.Scheme = uri.Scheme[8:]
l.Debugln(c, "looking up dynamic relays")
req, err := http.NewRequest("GET", uri.String(), nil)
if err != nil {
l.Debugln(c, "failed to lookup dynamic relays", err)
return err
}
req.Cancel = ctx.Done()
data, err := http.DefaultClient.Do(req)
if err != nil {
l.Debugln(c, "failed to lookup dynamic relays", err)
return err
}
var ann dynamicAnnouncement
err = json.NewDecoder(data.Body).Decode(&ann)
data.Body.Close()
if err != nil {
l.Debugln(c, "failed to lookup dynamic relays", err)
return err
}
var addrs []string
for _, relayAnn := range ann.Relays {
ruri, err := url.Parse(relayAnn.URL)
if err != nil {
l.Debugln(c, "failed to parse dynamic relay address", relayAnn.URL, err)
continue
}
l.Debugln(c, "found", ruri)
addrs = append(addrs, ruri.String())
}
for _, addr := range relayAddressesOrder(ctx, addrs) {
select {
case <-ctx.Done():
l.Debugln(c, "stopping")
return nil
default:
ruri, err := url.Parse(addr)
if err != nil {
l.Debugln(c, "skipping relay", addr, err)
continue
}
client := newStaticClient(ruri, c.certs, c.invitations, c.timeout)
c.mut.Lock()
c.client = client
c.mut.Unlock()
c.client.Serve()
c.mut.Lock()
c.client = nil
c.mut.Unlock()
}
}
l.Debugln(c, "could not find a connectable relay")
return errors.New("could not find a connectable relay")
}
func (c *dynamicClient) Stop() {
c.mut.RLock()
if c.client != nil {
c.client.Stop()
}
c.mut.RUnlock()
c.commonClient.Stop()
}
func (c *dynamicClient) Error() error {
c.mut.RLock()
defer c.mut.RUnlock()
if c.client == nil {
return c.commonClient.Error()
}
return c.client.Error()
}
func (c *dynamicClient) Latency() time.Duration {
c.mut.RLock()
defer c.mut.RUnlock()
if c.client == nil {
return time.Hour
}
return c.client.Latency()
}
func (c *dynamicClient) String() string {
return fmt.Sprintf("DynamicClient:%p:%s@%s", c, c.URI(), c.pooladdr)
}
func (c *dynamicClient) URI() *url.URL {
c.mut.RLock()
defer c.mut.RUnlock()
if c.client == nil {
return nil
}
return c.client.URI()
}
// This is the announcement received from the relay server;
// {"relays": [{"url": "relay://10.20.30.40:5060"}, ...]}
type dynamicAnnouncement struct {
Relays []struct {
URL string
}
}
// relayAddressesOrder checks the latency to each relay, rounds latency down to
// the closest 50ms, and puts them in buckets of 50ms latency ranges. Then
// shuffles each bucket, and returns all addresses starting with the ones from
// the lowest latency bucket, ending with the highest latency buceket.
func relayAddressesOrder(ctx context.Context, input []string) []string {
buckets := make(map[int][]string)
for _, relay := range input {
latency, err := osutil.GetLatencyForURL(ctx, relay)
if err != nil {
latency = time.Hour
}
id := int(latency/time.Millisecond) / 50
buckets[id] = append(buckets[id], relay)
select {
case <-ctx.Done():
return nil
default:
}
}
var ids []int
for id, bucket := range buckets {
rand.Shuffle(bucket)
ids = append(ids, id)
}
sort.Ints(ids)
addresses := make([]string, 0, len(input))
for _, id := range ids {
addresses = append(addresses, buckets[id]...)
}
return addresses
}
| lib/relay/client/dynamic.go | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.001194777898490429,
0.00028595729963853955,
0.00016440227045677602,
0.0001716522965580225,
0.0003064599877689034
] |
{
"id": 6,
"code_window": [
"\t\"path/filepath\"\n",
"\t\"runtime\"\n",
"\t\"strings\"\n",
")\n",
"\n",
"var errNoHome = errors.New(\"no home directory found - set $HOME (or the platform equivalent)\")\n",
"\n",
"func ExpandTilde(path string) (string, error) {\n",
"\tif path == \"~\" {\n",
"\t\treturn getHomeDir()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "lib/fs/util.go",
"type": "replace",
"edit_start_line_idx": 17
} | /syncthing
/stdiscosrv
syncthing.exe
stdiscosrv.exe
*.tar.gz
*.zip
*.asc
*.deb
.jshintrc
coverage.out
files/pidx
bin
perfstats*.csv
coverage.xml
syncthing.sig
RELEASE
deb
*.bz2
/repos
/proto/scripts/protoc-gen-gosyncthing
| .gitignore | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.0001729380601318553,
0.00016893574502319098,
0.0001627340679988265,
0.00017113513604272157,
0.000004446595994522795
] |
{
"id": 8,
"code_window": [
"\t\tswitch part[len(part)-1] {\n",
"\t\tcase ' ', '.':\n",
"\t\t\t// Names ending in space or period are not valid.\n",
"\t\t\treturn true\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t\t\treturn errInvalidFilenameWindowsSpacePeriod\n"
],
"file_path": "lib/fs/util.go",
"type": "replace",
"edit_start_line_idx": 68
} | // Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package fs
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
var errNoHome = errors.New("no home directory found - set $HOME (or the platform equivalent)")
func ExpandTilde(path string) (string, error) {
if path == "~" {
return getHomeDir()
}
path = filepath.FromSlash(path)
if !strings.HasPrefix(path, fmt.Sprintf("~%c", PathSeparator)) {
return path, nil
}
home, err := getHomeDir()
if err != nil {
return "", err
}
return filepath.Join(home, path[2:]), nil
}
func getHomeDir() (string, error) {
if runtime.GOOS == "windows" {
// Legacy -- we prioritize this for historical reasons, whereas
// os.UserHomeDir uses %USERPROFILE% always.
home := filepath.Join(os.Getenv("HomeDrive"), os.Getenv("HomePath"))
if home != "" {
return home, nil
}
}
return os.UserHomeDir()
}
var windowsDisallowedCharacters = string([]rune{
'<', '>', ':', '"', '|', '?', '*',
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31,
})
func WindowsInvalidFilename(name string) bool {
// None of the path components should end in space or period, or be a
// reserved name.
// (https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file)
for _, part := range strings.Split(name, `\`) {
if len(part) == 0 {
continue
}
switch part[len(part)-1] {
case ' ', '.':
// Names ending in space or period are not valid.
return true
}
switch part {
case "CON", "PRN", "AUX", "NUL",
"COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
"LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9":
// These reserved names are not valid.
return true
}
}
// The path must not contain any disallowed characters
return strings.ContainsAny(name, windowsDisallowedCharacters)
}
// IsParent compares paths purely lexicographically, meaning it returns false
// if path and parent aren't both absolute or relative.
func IsParent(path, parent string) bool {
if parent == path {
// Twice the same root on windows would not be caught at the end.
return false
}
if filepath.IsAbs(path) != filepath.IsAbs(parent) {
return false
}
if parent == "" || parent == "." {
// The empty string is the parent of everything except the empty
// string and ".". (Avoids panic in the last step.)
return path != "" && path != "."
}
if parent == "/" {
// The root is the parent of everything except itself, which would
// not be caught below.
return path != "/"
}
if parent[len(parent)-1] != PathSeparator {
parent += string(PathSeparator)
}
return strings.HasPrefix(path, parent)
}
func CommonPrefix(first, second string) string {
if filepath.IsAbs(first) != filepath.IsAbs(second) {
// Whatever
return ""
}
firstParts := strings.Split(filepath.Clean(first), string(PathSeparator))
secondParts := strings.Split(filepath.Clean(second), string(PathSeparator))
isAbs := filepath.IsAbs(first) && filepath.IsAbs(second)
count := len(firstParts)
if len(secondParts) < len(firstParts) {
count = len(secondParts)
}
common := make([]string, 0, count)
for i := 0; i < count; i++ {
if firstParts[i] != secondParts[i] {
break
}
common = append(common, firstParts[i])
}
if isAbs {
if runtime.GOOS == "windows" && isVolumeNameOnly(common) {
// Because strings.Split strips out path separators, if we're at the volume name, we end up without a separator
// Wedge an empty element to be joined with.
common = append(common, "")
} else if len(common) == 1 {
// If isAbs on non Windows, first element in both first and second is "", hence joining that returns nothing.
return string(PathSeparator)
}
}
// This should only be true on Windows when drive letters are different or when paths are relative.
// In case of UNC paths we should end up with more than a single element hence joining is fine
if len(common) == 0 {
return ""
}
// This has to be strings.Join, because filepath.Join([]string{"", "", "?", "C:", "Audrius"}...) returns garbage
result := strings.Join(common, string(PathSeparator))
return filepath.Clean(result)
}
func isVolumeNameOnly(parts []string) bool {
isNormalVolumeName := len(parts) == 1 && strings.HasSuffix(parts[0], ":")
isUNCVolumeName := len(parts) == 4 && strings.HasSuffix(parts[3], ":")
return isNormalVolumeName || isUNCVolumeName
}
| lib/fs/util.go | 1 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.9927910566329956,
0.060213033109903336,
0.00016646775475237519,
0.0001822319027269259,
0.2332199513912201
] |
{
"id": 8,
"code_window": [
"\t\tswitch part[len(part)-1] {\n",
"\t\tcase ' ', '.':\n",
"\t\t\t// Names ending in space or period are not valid.\n",
"\t\t\treturn true\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t\t\treturn errInvalidFilenameWindowsSpacePeriod\n"
],
"file_path": "lib/fs/util.go",
"type": "replace",
"edit_start_line_idx": 68
} | .daterangepicker {
position: absolute;
color: inherit;
background-color: #fff;
border-radius: 4px;
border: 1px solid #ddd;
width: 278px;
max-width: none;
padding: 0;
margin-top: 7px;
top: 100px;
left: 20px;
z-index: 3001;
display: none;
font-family: arial;
font-size: 15px;
line-height: 1em;
}
.daterangepicker:before, .daterangepicker:after {
position: absolute;
display: inline-block;
border-bottom-color: rgba(0, 0, 0, 0.2);
content: '';
}
.daterangepicker:before {
top: -7px;
border-right: 7px solid transparent;
border-left: 7px solid transparent;
border-bottom: 7px solid #ccc;
}
.daterangepicker:after {
top: -6px;
border-right: 6px solid transparent;
border-bottom: 6px solid #fff;
border-left: 6px solid transparent;
}
.daterangepicker.opensleft:before {
right: 9px;
}
.daterangepicker.opensleft:after {
right: 10px;
}
.daterangepicker.openscenter:before {
left: 0;
right: 0;
width: 0;
margin-left: auto;
margin-right: auto;
}
.daterangepicker.openscenter:after {
left: 0;
right: 0;
width: 0;
margin-left: auto;
margin-right: auto;
}
.daterangepicker.opensright:before {
left: 9px;
}
.daterangepicker.opensright:after {
left: 10px;
}
.daterangepicker.drop-up {
margin-top: -7px;
}
.daterangepicker.drop-up:before {
top: initial;
bottom: -7px;
border-bottom: initial;
border-top: 7px solid #ccc;
}
.daterangepicker.drop-up:after {
top: initial;
bottom: -6px;
border-bottom: initial;
border-top: 6px solid #fff;
}
.daterangepicker.single .daterangepicker .ranges, .daterangepicker.single .calendar {
float: none;
}
.daterangepicker.single .drp-selected {
display: none;
}
.daterangepicker.show-calendar .calendar {
display: block;
}
.daterangepicker.show-calendar .drp-buttons {
display: block;
}
.daterangepicker.auto-apply .drp-buttons {
display: none;
}
.daterangepicker .calendar {
display: none;
max-width: 270px;
}
.daterangepicker .calendar.left {
padding: 12px 0 12px 8px;
}
.daterangepicker .calendar.right {
padding: 12px 8px;
}
.daterangepicker .calendar.single .calendar-table {
border: none;
}
.daterangepicker .calendar-table .next span, .daterangepicker .calendar-table .prev span {
color: #fff;
border: solid black;
border-width: 0 2px 2px 0;
border-radius: 0;
display: inline-block;
padding: 3px;
}
.daterangepicker .calendar-table .next span {
transform: rotate(-45deg);
-webkit-transform: rotate(-45deg);
}
.daterangepicker .calendar-table .prev span {
transform: rotate(135deg);
-webkit-transform: rotate(135deg);
}
.daterangepicker .calendar-table th, .daterangepicker .calendar-table td {
white-space: nowrap;
text-align: center;
vertical-align: middle;
min-width: 32px;
width: 32px;
height: 24px;
line-height: 24px;
font-size: 12px;
border-radius: 4px;
border: 1px solid transparent;
white-space: nowrap;
cursor: pointer;
}
.daterangepicker .calendar-table {
border: 1px solid #fff;
border-radius: 4px;
background-color: #fff;
}
.daterangepicker .calendar-table table {
width: 100%;
margin: 0;
border-spacing: 0;
border-collapse: collapse;
}
.daterangepicker td.available:hover, .daterangepicker th.available:hover {
background-color: #eee;
border-color: transparent;
color: inherit;
}
.daterangepicker td.week, .daterangepicker th.week {
font-size: 80%;
color: #ccc;
}
.daterangepicker td.off, .daterangepicker td.off.in-range, .daterangepicker td.off.start-date, .daterangepicker td.off.end-date {
background-color: #fff;
border-color: transparent;
color: #999;
}
.daterangepicker td.in-range {
background-color: #ebf4f8;
border-color: transparent;
color: #000;
border-radius: 0;
}
.daterangepicker td.start-date {
border-radius: 4px 0 0 4px;
}
.daterangepicker td.end-date {
border-radius: 0 4px 4px 0;
}
.daterangepicker td.start-date.end-date {
border-radius: 4px;
}
.daterangepicker td.active, .daterangepicker td.active:hover {
background-color: #357ebd;
border-color: transparent;
color: #fff;
}
.daterangepicker th.month {
width: auto;
}
.daterangepicker td.disabled, .daterangepicker option.disabled {
color: #999;
cursor: not-allowed;
text-decoration: line-through;
}
.daterangepicker select.monthselect, .daterangepicker select.yearselect {
font-size: 12px;
padding: 1px;
height: auto;
margin: 0;
cursor: default;
}
.daterangepicker select.monthselect {
margin-right: 2%;
width: 56%;
}
.daterangepicker select.yearselect {
width: 40%;
}
.daterangepicker select.hourselect, .daterangepicker select.minuteselect, .daterangepicker select.secondselect, .daterangepicker select.ampmselect {
width: 50px;
margin: 0 auto;
background: #eee;
border: 1px solid #eee;
padding: 2px;
outline: 0;
font-size: 12px;
}
.daterangepicker .calendar-time {
text-align: center;
margin: 4px auto 0 auto;
line-height: 30px;
position: relative;
}
.daterangepicker .calendar-time select.disabled {
color: #ccc;
cursor: not-allowed;
}
.daterangepicker .drp-buttons {
clear: both;
text-align: right;
padding: 8px 12px;
border-top: 1px solid #ddd;
display: none;
line-height: 12px;
vertical-align: middle;
}
.daterangepicker .drp-selected {
display: inline-block;
font-size: 12px;
padding-right: 8px;
}
.daterangepicker .drp-buttons .btn {
margin-left: 8px;
font-size: 12px;
font-weight: bold;
padding: 4px 8px;
}
.daterangepicker.show-ranges .calendar.left {
border-left: 1px solid #ddd;
}
.daterangepicker .ranges {
float: none;
text-align: left;
margin: 0;
}
.daterangepicker.show-calendar .ranges {
margin-top: 12px;
}
.daterangepicker .ranges ul {
list-style: none;
margin: 0 auto;
padding: 0;
width: 100%;
}
.daterangepicker .ranges li {
font-size: 12px;
padding: 8px 12px;
cursor: pointer;
}
.daterangepicker .ranges li:hover {
background-color: #eee;
}
.daterangepicker .ranges li.active {
background-color: #08c;
color: #fff;
}
/* Larger Screen Styling */
@media (min-width: 564px) {
.daterangepicker {
width: auto; }
.daterangepicker .ranges ul {
width: 160px; }
.daterangepicker.single .ranges ul {
width: 100%; }
.daterangepicker.single .calendar.left {
clear: none; }
.daterangepicker.single.ltr .ranges, .daterangepicker.single.ltr .calendar {
float: left; }
.daterangepicker.single.rtl .ranges, .daterangepicker.single.rtl .calendar {
float: right; }
.daterangepicker.ltr {
direction: ltr;
text-align: left; }
.daterangepicker.ltr .calendar.left {
clear: left;
margin-right: 0; }
.daterangepicker.ltr .calendar.left .calendar-table {
border-right: none;
border-top-right-radius: 0;
border-bottom-right-radius: 0; }
.daterangepicker.ltr .calendar.right {
margin-left: 0; }
.daterangepicker.ltr .calendar.right .calendar-table {
border-left: none;
border-top-left-radius: 0;
border-bottom-left-radius: 0; }
.daterangepicker.ltr .left .daterangepicker_input {
padding-right: 12px; }
.daterangepicker.ltr .calendar.left .calendar-table {
padding-right: 12px; }
.daterangepicker.ltr .ranges, .daterangepicker.ltr .calendar {
float: left; }
.daterangepicker.rtl {
direction: rtl;
text-align: right; }
.daterangepicker.rtl .calendar.left {
clear: right;
margin-left: 0; }
.daterangepicker.rtl .calendar.left .calendar-table {
border-left: none;
border-top-left-radius: 0;
border-bottom-left-radius: 0; }
.daterangepicker.rtl .calendar.right {
margin-right: 0; }
.daterangepicker.rtl .calendar.right .calendar-table {
border-right: none;
border-top-right-radius: 0;
border-bottom-right-radius: 0; }
.daterangepicker.rtl .left .daterangepicker_input {
padding-left: 12px; }
.daterangepicker.rtl .calendar.left .calendar-table {
padding-left: 12px; }
.daterangepicker.rtl .ranges, .daterangepicker.rtl .calendar {
text-align: right;
float: right; } }
@media (min-width: 730px) {
.daterangepicker .ranges {
width: auto; }
.daterangepicker.ltr .ranges {
float: left; }
.daterangepicker.rtl .ranges {
float: right; }
.daterangepicker .calendar.left {
clear: none !important; } }
| gui/default/vendor/daterangepicker/daterangepicker.css | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.0001742724416544661,
0.00017038380610756576,
0.0001638492103666067,
0.00017078725795727223,
0.0000025232570806110743
] |
{
"id": 8,
"code_window": [
"\t\tswitch part[len(part)-1] {\n",
"\t\tcase ' ', '.':\n",
"\t\t\t// Names ending in space or period are not valid.\n",
"\t\t\treturn true\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t\t\treturn errInvalidFilenameWindowsSpacePeriod\n"
],
"file_path": "lib/fs/util.go",
"type": "replace",
"edit_start_line_idx": 68
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
// +build integration
package integration
import (
"log"
"os"
"testing"
"github.com/syncthing/syncthing/lib/rc"
)
func TestScanSubdir(t *testing.T) {
log.Println("Cleaning...")
err := removeAll("s1", "s2", "h1/index*", "h2/index*")
if err != nil {
t.Fatal(err)
}
log.Println("Generating files...")
err = generateFiles("s1", 10, 10, "../LICENSE")
if err != nil {
t.Fatal(err)
}
// 1. Scan a single file in a known directory "file1.txt"
// 2. Scan a single file in an unknown directory "filetest/file1.txt"
// 3. Scan a single file in a deep unknown directory "filetest/1/2/3/4/5/6/7/file1.txt"
// 4. Scan a directory in a deep unknown directory "dirtest/1/2/3/4/5/6/7"
// 5. Scan a deleted file in a known directory "filetest/file1.txt"
// 6. Scan a deleted file in a deep unknown directory "rmdirtest/1/2/3/4/5/6/7"
// 7. 'Accidentally' forget to scan 1 of the 2 files in a known directory
// Verify that the files and directories sync to the other side
sender := startInstance(t, 1)
defer checkedStop(t, sender)
receiver := startInstance(t, 2)
defer checkedStop(t, receiver)
sender.ResumeAll()
receiver.ResumeAll()
log.Println("Syncing...")
rc.AwaitSync("default", sender, receiver)
// Delay scans for the moment
if err := sender.RescanDelay("default", 86400); err != nil {
t.Fatal(err)
}
log.Println("Comparing directories...")
err = compareDirectories("s1", "s2")
if err != nil {
t.Fatal(err)
}
// 1
log.Println("Creating new file...")
if fd, err := os.Create("s1/file1.txt"); err != nil {
t.Fatal(err)
} else {
fd.Close()
}
if err := sender.RescanSub("default", "file1.txt", 86400); err != nil {
t.Fatal(err)
}
log.Println("Syncing...")
rc.AwaitSync("default", sender, receiver)
log.Println("Comparing directories...")
err = compareDirectories("s1", "s2")
if err != nil {
t.Fatal(err)
}
// 2
log.Println("Creating a file in an unknown directory")
os.MkdirAll("s1/filetest", 0755)
if fd, err := os.Create("s1/filetest/file1.txt"); err != nil {
t.Fatal(err)
} else {
fd.Close()
}
if err := sender.RescanSub("default", "filetest/file1.txt", 86400); err != nil {
t.Fatal(err)
}
log.Println("Syncing...")
rc.AwaitSync("default", sender, receiver)
log.Println("Comparing directories...")
err = compareDirectories("s1", "s2")
if err != nil {
t.Fatal(err)
}
// 3
log.Println("Creating a file in an unknown deep directory")
os.MkdirAll("s1/filetest/1/2/3/4/5/6/7", 0755)
if fd, err := os.Create("s1/filetest/1/2/3/4/5/6/7/file1.txt"); err != nil {
t.Fatal(err)
} else {
fd.Close()
}
if err := sender.RescanSub("default", "filetest/1/2/3/4/5/6/7/file1.txt", 86400); err != nil {
t.Fatal(err)
}
log.Println("Syncing...")
rc.AwaitSync("default", sender, receiver)
log.Println("Comparing directories...")
err = compareDirectories("s1", "s2")
if err != nil {
t.Fatal(err)
}
// 4
log.Println("Creating a directory in an unknown directory")
err = os.MkdirAll("s1/dirtest/1/2/3/4/5/6/7", 0755)
if err != nil {
t.Fatal(err)
}
if err := sender.RescanSub("default", "dirtest/1/2/3/4/5/6/7", 86400); err != nil {
t.Fatal(err)
}
log.Println("Syncing...")
rc.AwaitSync("default", sender, receiver)
log.Println("Comparing directories...")
err = compareDirectories("s1", "s2")
if err != nil {
t.Fatal(err)
}
// 5
log.Println("Scan a deleted file in a known directory")
if err := os.Remove("s1/filetest/file1.txt"); err != nil {
t.Fatal(err)
}
if err := sender.RescanSub("default", "filetest/file1.txt", 86400); err != nil {
t.Fatal(err)
}
log.Println("Syncing...")
rc.AwaitSync("default", sender, receiver)
log.Println("Comparing directories...")
err = compareDirectories("s1", "s2")
if err != nil {
t.Fatal(err)
}
// 6
log.Println("Scan a deleted file in an unknown directory")
if err := sender.RescanSub("default", "rmdirtest/1/2/3/4/5/6/7", 86400); err != nil {
t.Fatal(err)
}
log.Println("Syncing...")
rc.AwaitSync("default", sender, receiver)
log.Println("Comparing directories...")
err = compareDirectories("s1", "s2")
if err != nil {
t.Fatal(err)
}
// 7
log.Println("'Accidentally' forget to scan 1 of the 2 files")
if fd, err := os.Create("s1/filetest/1/2/3/4/5/6/7/file2.txt"); err != nil {
t.Fatal(err)
} else {
fd.Close()
}
if fd, err := os.Create("s1/filetest/1/2/3/4/5/6/7/file3.txt"); err != nil {
t.Fatal(err)
} else {
fd.Close()
}
if err := sender.RescanSub("default", "filetest/1/2/3/4/5/6/7/file2.txt", 86400); err != nil {
t.Fatal(err)
}
log.Println("Syncing...")
rc.AwaitSync("default", sender, receiver)
log.Println("Comparing directories...")
err = compareDirectories("s1", "s2")
if err == nil {
t.Fatal("filetest/1/2/3/4/5/6/7/file3.txt should not be synced")
}
os.Remove("s1/filetest/1/2/3/4/5/6/7/file3.txt")
err = compareDirectories("s1", "s2")
if err != nil {
t.Fatal(err)
}
}
| test/scan_test.go | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.00017424502584617585,
0.0001705629110801965,
0.000164490076713264,
0.0001706846960587427,
0.0000022760179945180425
] |
{
"id": 8,
"code_window": [
"\t\tswitch part[len(part)-1] {\n",
"\t\tcase ' ', '.':\n",
"\t\t\t// Names ending in space or period are not valid.\n",
"\t\t\treturn true\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t\t\treturn errInvalidFilenameWindowsSpacePeriod\n"
],
"file_path": "lib/fs/util.go",
"type": "replace",
"edit_start_line_idx": 68
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
// +build !solaris,!windows
package main
import (
"fmt"
"os"
"runtime"
"syscall"
"time"
"github.com/syncthing/syncthing/lib/protocol"
)
func init() {
if innerProcess && os.Getenv("STPERFSTATS") != "" {
go savePerfStats(fmt.Sprintf("perfstats-%d.csv", syscall.Getpid()))
}
}
func savePerfStats(file string) {
fd, err := os.Create(file)
if err != nil {
panic(err)
}
var prevUsage int64
var prevTime int64
var rusage syscall.Rusage
var memstats runtime.MemStats
var prevIn, prevOut int64
t0 := time.Now()
for t := range time.NewTicker(250 * time.Millisecond).C {
if err := syscall.Getrusage(syscall.RUSAGE_SELF, &rusage); err != nil {
continue
}
curTime := time.Now().UnixNano()
timeDiff := curTime - prevTime
curUsage := rusage.Utime.Nano() + rusage.Stime.Nano()
usageDiff := curUsage - prevUsage
cpuUsagePercent := 100 * float64(usageDiff) / float64(timeDiff)
prevTime = curTime
prevUsage = curUsage
in, out := protocol.TotalInOut()
var inRate, outRate float64
if timeDiff > 0 {
inRate = float64(in-prevIn) / (float64(timeDiff) / 1e9) // bytes per second
outRate = float64(out-prevOut) / (float64(timeDiff) / 1e9) // bytes per second
}
prevIn, prevOut = in, out
runtime.ReadMemStats(&memstats)
startms := int(t.Sub(t0).Seconds() * 1000)
fmt.Fprintf(fd, "%d\t%f\t%d\t%d\t%.0f\t%.0f\n", startms, cpuUsagePercent, memstats.Alloc, memstats.Sys-memstats.HeapReleased, inRate, outRate)
}
}
| cmd/syncthing/perfstats_unix.go | 0 | https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb | [
0.00017353652219753712,
0.00016927465912885964,
0.0001669002085691318,
0.00016827126091811806,
0.0000022311776319838827
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.