hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 1,
"code_window": [
"\t\tclient := tfplugin.Client(meta)\n",
"\t\treturn newProvisionerClient(client)\n",
"\t}\n",
"}\n",
"\n",
"func internalProvisionerFactory(client *plugin.Client) terraform.ProvisionerFactory {\n",
"\treturn func() (provisioners.Interface, error) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"func internalProvisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory {\n"
],
"file_path": "command/plugins.go",
"type": "replace",
"edit_start_line_idx": 399
} | package plugin
import (
"context"
"errors"
"io"
"log"
"sync"
plugin "github.com/hashicorp/go-plugin"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/plugin/convert"
"github.com/hashicorp/terraform/plugin/proto"
"github.com/hashicorp/terraform/provisioners"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/msgpack"
"google.golang.org/grpc"
)
// GRPCProvisionerPlugin is the plugin.GRPCPlugin implementation.
type GRPCProvisionerPlugin struct {
plugin.Plugin
GRPCProvisioner func() proto.ProvisionerServer
}
func (p *GRPCProvisionerPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) {
return &GRPCProvisioner{
client: proto.NewProvisionerClient(c),
ctx: ctx,
}, nil
}
func (p *GRPCProvisionerPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error {
proto.RegisterProvisionerServer(s, p.GRPCProvisioner())
return nil
}
// provisioners.Interface grpc implementation
type GRPCProvisioner struct {
// PluginClient provides a reference to the plugin.Client which controls the plugin process.
// This allows the GRPCProvider a way to shutdown the plugin process.
PluginClient *plugin.Client
client proto.ProvisionerClient
ctx context.Context
// Cache the schema since we need it for serialization in each method call.
mu sync.Mutex
schema *configschema.Block
}
func (p *GRPCProvisioner) GetSchema() (resp provisioners.GetSchemaResponse) {
p.mu.Lock()
defer p.mu.Unlock()
if p.schema != nil {
return provisioners.GetSchemaResponse{
Provisioner: p.schema,
}
}
protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request))
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
if protoResp.Provisioner == nil {
resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provisioner schema"))
return resp
}
resp.Provisioner = convert.ProtoToConfigSchema(protoResp.Provisioner.Block)
p.schema = resp.Provisioner
return resp
}
func (p *GRPCProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) {
schema := p.GetSchema()
if schema.Diagnostics.HasErrors() {
resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics)
return resp
}
mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType())
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
protoReq := &proto.ValidateProvisionerConfig_Request{
Config: &proto.DynamicValue{Msgpack: mp},
}
protoResp, err := p.client.ValidateProvisionerConfig(p.ctx, protoReq)
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
return resp
}
func (p *GRPCProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) {
schema := p.GetSchema()
if schema.Diagnostics.HasErrors() {
resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics)
return resp
}
mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType())
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
// connection is always assumed to be a simple string map
connMP, err := msgpack.Marshal(r.Connection, cty.Map(cty.String))
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
protoReq := &proto.ProvisionResource_Request{
Config: &proto.DynamicValue{Msgpack: mp},
Connection: &proto.DynamicValue{Msgpack: connMP},
}
outputClient, err := p.client.ProvisionResource(p.ctx, protoReq)
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
for {
rcv, err := outputClient.Recv()
if rcv != nil {
r.UIOutput.Output(rcv.Output)
}
if err != nil {
if err != io.EOF {
resp.Diagnostics = resp.Diagnostics.Append(err)
}
break
}
if len(rcv.Diagnostics) > 0 {
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(rcv.Diagnostics))
break
}
}
return resp
}
func (p *GRPCProvisioner) Stop() error {
protoResp, err := p.client.Stop(p.ctx, &proto.Stop_Request{})
if err != nil {
return err
}
if protoResp.Error != "" {
return errors.New(protoResp.Error)
}
return nil
}
func (p *GRPCProvisioner) Close() error {
// check this since it's not automatically inserted during plugin creation
if p.PluginClient == nil {
log.Println("[DEBUG] provider has no plugin.Client")
return nil
}
p.PluginClient.Kill()
return nil
}
| plugin/grpc_provisioner.go | 1 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.4163050353527069,
0.06168704479932785,
0.00016650838369969279,
0.009801466017961502,
0.12554030120372772
] |
{
"id": 1,
"code_window": [
"\t\tclient := tfplugin.Client(meta)\n",
"\t\treturn newProvisionerClient(client)\n",
"\t}\n",
"}\n",
"\n",
"func internalProvisionerFactory(client *plugin.Client) terraform.ProvisionerFactory {\n",
"\treturn func() (provisioners.Interface, error) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"func internalProvisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory {\n"
],
"file_path": "command/plugins.go",
"type": "replace",
"edit_start_line_idx": 399
} | module "child" {
source = "./child"
}
resource "aws_instance" "foo" {
num = "2"
}
resource "aws_instance" "bar" {
foo = "bar"
}
| terraform/test-fixtures/apply-module/main.tf | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.00017536642553750426,
0.00017330938135273755,
0.00017125233716797084,
0.00017330938135273755,
0.00000205704418476671
] |
{
"id": 1,
"code_window": [
"\t\tclient := tfplugin.Client(meta)\n",
"\t\treturn newProvisionerClient(client)\n",
"\t}\n",
"}\n",
"\n",
"func internalProvisionerFactory(client *plugin.Client) terraform.ProvisionerFactory {\n",
"\treturn func() (provisioners.Interface, error) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"func internalProvisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory {\n"
],
"file_path": "command/plugins.go",
"type": "replace",
"edit_start_line_idx": 399
} | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux,!gccgo
package unix
// SyscallNoError may be used instead of Syscall for syscalls that don't fail.
func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr)
// RawSyscallNoError may be used instead of RawSyscall for syscalls that don't
// fail.
func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr)
| vendor/golang.org/x/sys/unix/syscall_linux_gc.go | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.00028208730509504676,
0.00022515305317938328,
0.00016821878671180457,
0.00022515305317938328,
0.000056934259191621095
] |
{
"id": 1,
"code_window": [
"\t\tclient := tfplugin.Client(meta)\n",
"\t\treturn newProvisionerClient(client)\n",
"\t}\n",
"}\n",
"\n",
"func internalProvisionerFactory(client *plugin.Client) terraform.ProvisionerFactory {\n",
"\treturn func() (provisioners.Interface, error) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"func internalProvisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory {\n"
],
"file_path": "command/plugins.go",
"type": "replace",
"edit_start_line_idx": 399
} | Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.
| vendor/github.com/hashicorp/vault/LICENSE | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.00017783566727302969,
0.00017385152750648558,
0.00016870952094905078,
0.00017411846783943474,
0.0000023552718175778864
] |
{
"id": 2,
"code_window": [
"\treturn func() (provisioners.Interface, error) {\n",
"\t\treturn newProvisionerClient(client)\n",
"\t}\n",
"}\n",
"\n",
"func newProvisionerClient(client *plugin.Client) (provisioners.Interface, error) {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tclient, err := internalPluginClient(\"provisioner\", meta.Name)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, fmt.Errorf(\"[WARN] failed to build command line for internal plugin %q: %s\", meta.Name, err)\n",
"\t\t}\n"
],
"file_path": "command/plugins.go",
"type": "add",
"edit_start_line_idx": 401
} | package command
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
plugin "github.com/hashicorp/go-plugin"
"github.com/kardianos/osext"
terraformProvider "github.com/hashicorp/terraform/builtin/providers/terraform"
tfplugin "github.com/hashicorp/terraform/plugin"
"github.com/hashicorp/terraform/plugin/discovery"
"github.com/hashicorp/terraform/providers"
"github.com/hashicorp/terraform/provisioners"
"github.com/hashicorp/terraform/terraform"
)
// multiVersionProviderResolver is an implementation of
// terraform.ResourceProviderResolver that matches the given version constraints
// against a set of versioned provider plugins to find the newest version of
// each that satisfies the given constraints.
type multiVersionProviderResolver struct {
Available discovery.PluginMetaSet
// Internal is a map that overrides the usual plugin selection process
// for internal plugins. These plugins do not support version constraints
// (will produce an error if one is set). This should be used only in
// exceptional circumstances since it forces the provider's release
// schedule to be tied to that of Terraform Core.
Internal map[string]providers.Factory
}
func choosePlugins(avail discovery.PluginMetaSet, internal map[string]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta {
candidates := avail.ConstrainVersions(reqd)
ret := map[string]discovery.PluginMeta{}
for name, metas := range candidates {
// If the provider is in our internal map then we ignore any
// discovered plugins for it since these are dealt with separately.
if _, isInternal := internal[name]; isInternal {
continue
}
if len(metas) == 0 {
continue
}
ret[name] = metas.Newest()
}
return ret
}
func (r *multiVersionProviderResolver) ResolveProviders(
reqd discovery.PluginRequirements,
) (map[string]providers.Factory, []error) {
factories := make(map[string]providers.Factory, len(reqd))
var errs []error
chosen := choosePlugins(r.Available, r.Internal, reqd)
for name, req := range reqd {
if factory, isInternal := r.Internal[name]; isInternal {
if !req.Versions.Unconstrained() {
errs = append(errs, fmt.Errorf("provider.%s: this provider is built in to Terraform and so it does not support version constraints", name))
continue
}
factories[name] = factory
continue
}
if newest, available := chosen[name]; available {
digest, err := newest.SHA256()
if err != nil {
errs = append(errs, fmt.Errorf("provider.%s: failed to load plugin to verify its signature: %s", name, err))
continue
}
if !reqd[name].AcceptsSHA256(digest) {
errs = append(errs, fmt.Errorf("provider.%s: new or changed plugin executable", name))
continue
}
factories[name] = providerFactory(newest)
} else {
msg := fmt.Sprintf("provider.%s: no suitable version installed", name)
required := req.Versions.String()
// no version is unconstrained
if required == "" {
required = "(any version)"
}
foundVersions := []string{}
for meta := range r.Available.WithName(name) {
foundVersions = append(foundVersions, fmt.Sprintf("%q", meta.Version))
}
found := "none"
if len(foundVersions) > 0 {
found = strings.Join(foundVersions, ", ")
}
msg += fmt.Sprintf("\n version requirements: %q\n versions installed: %s", required, found)
errs = append(errs, errors.New(msg))
}
}
return factories, errs
}
// store the user-supplied path for plugin discovery
func (m *Meta) storePluginPath(pluginPath []string) error {
if len(pluginPath) == 0 {
return nil
}
path := filepath.Join(m.DataDir(), PluginPathFile)
// remove the plugin dir record if the path was set to an empty string
if len(pluginPath) == 1 && (pluginPath[0] == "") {
err := os.Remove(path)
if !os.IsNotExist(err) {
return err
}
return nil
}
js, err := json.MarshalIndent(pluginPath, "", " ")
if err != nil {
return err
}
// if this fails, so will WriteFile
os.MkdirAll(m.DataDir(), 0755)
return ioutil.WriteFile(path, js, 0644)
}
// Load the user-defined plugin search path into Meta.pluginPath if the file
// exists.
func (m *Meta) loadPluginPath() ([]string, error) {
js, err := ioutil.ReadFile(filepath.Join(m.DataDir(), PluginPathFile))
if os.IsNotExist(err) {
return nil, nil
}
if err != nil {
return nil, err
}
var pluginPath []string
if err := json.Unmarshal(js, &pluginPath); err != nil {
return nil, err
}
return pluginPath, nil
}
// the default location for automatically installed plugins
func (m *Meta) pluginDir() string {
return filepath.Join(m.DataDir(), "plugins", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH))
}
// pluginDirs return a list of directories to search for plugins.
//
// Earlier entries in this slice get priority over later when multiple copies
// of the same plugin version are found, but newer versions always override
// older versions where both satisfy the provider version constraints.
func (m *Meta) pluginDirs(includeAutoInstalled bool) []string {
// user defined paths take precedence
if len(m.pluginPath) > 0 {
return m.pluginPath
}
// When searching the following directories, earlier entries get precedence
// if the same plugin version is found twice, but newer versions will
// always get preference below regardless of where they are coming from.
// TODO: Add auto-install dir, default vendor dir and optional override
// vendor dir(s).
dirs := []string{"."}
// Look in the same directory as the Terraform executable.
// If found, this replaces what we found in the config path.
exePath, err := osext.Executable()
if err != nil {
log.Printf("[ERROR] Error discovering exe directory: %s", err)
} else {
dirs = append(dirs, filepath.Dir(exePath))
}
// add the user vendor directory
dirs = append(dirs, DefaultPluginVendorDir)
if includeAutoInstalled {
dirs = append(dirs, m.pluginDir())
}
dirs = append(dirs, m.GlobalPluginDirs...)
return dirs
}
func (m *Meta) pluginCache() discovery.PluginCache {
dir := m.PluginCacheDir
if dir == "" {
return nil // cache disabled
}
dir = filepath.Join(dir, pluginMachineName)
return discovery.NewLocalPluginCache(dir)
}
// providerPluginSet returns the set of valid providers that were discovered in
// the defined search paths.
func (m *Meta) providerPluginSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", m.pluginDirs(true))
// Add providers defined in the legacy .terraformrc,
if m.PluginOverrides != nil {
for k, v := range m.PluginOverrides.Providers {
log.Printf("[DEBUG] found plugin override in .terraformrc: %q, %q", k, v)
}
plugins = plugins.OverridePaths(m.PluginOverrides.Providers)
}
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q, %q, %q", p.Name, p.Version, p.Path)
}
return plugins
}
// providerPluginAutoInstalledSet returns the set of providers that exist
// within the auto-install directory.
func (m *Meta) providerPluginAutoInstalledSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", []string{m.pluginDir()})
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q", p.Name)
}
return plugins
}
// providerPluginManuallyInstalledSet returns the set of providers that exist
// in all locations *except* the auto-install directory.
func (m *Meta) providerPluginManuallyInstalledSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", m.pluginDirs(false))
// Add providers defined in the legacy .terraformrc,
if m.PluginOverrides != nil {
for k, v := range m.PluginOverrides.Providers {
log.Printf("[DEBUG] found plugin override in .terraformrc: %q, %q", k, v)
}
plugins = plugins.OverridePaths(m.PluginOverrides.Providers)
}
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q, %q, %q", p.Name, p.Version, p.Path)
}
return plugins
}
func (m *Meta) providerResolver() providers.Resolver {
return &multiVersionProviderResolver{
Available: m.providerPluginSet(),
Internal: m.internalProviders(),
}
}
func (m *Meta) internalProviders() map[string]providers.Factory {
return map[string]providers.Factory{
"terraform": func() (providers.Interface, error) {
return terraformProvider.NewProvider(), nil
},
}
}
// filter the requirements returning only the providers that we can't resolve
func (m *Meta) missingPlugins(avail discovery.PluginMetaSet, reqd discovery.PluginRequirements) discovery.PluginRequirements {
missing := make(discovery.PluginRequirements)
candidates := avail.ConstrainVersions(reqd)
internal := m.internalProviders()
for name, versionSet := range reqd {
// internal providers can't be missing
if _, ok := internal[name]; ok {
continue
}
log.Printf("[DEBUG] plugin requirements: %q=%q", name, versionSet.Versions)
if metas := candidates[name]; metas.Count() == 0 {
missing[name] = versionSet
}
}
return missing
}
func (m *Meta) provisionerFactories() map[string]terraform.ProvisionerFactory {
dirs := m.pluginDirs(true)
plugins := discovery.FindPlugins("provisioner", dirs)
plugins, _ = plugins.ValidateVersions()
// For now our goal is to just find the latest version of each plugin
// we have on the system. All provisioners should be at version 0.0.0
// currently, so there should actually only be one instance of each plugin
// name here, even though the discovery interface forces us to pretend
// that might not be true.
factories := make(map[string]terraform.ProvisionerFactory)
// Wire up the internal provisioners first. These might be overridden
// by discovered provisioners below.
for name := range InternalProvisioners {
client, err := internalPluginClient("provisioner", name)
if err != nil {
log.Printf("[WARN] failed to build command line for internal plugin %q: %s", name, err)
continue
}
factories[name] = internalProvisionerFactory(client)
}
byName := plugins.ByName()
for name, metas := range byName {
// Since we validated versions above and we partitioned the sets
// by name, we're guaranteed that the metas in our set all have
// valid versions and that there's at least one meta.
newest := metas.Newest()
factories[name] = provisionerFactory(newest)
}
return factories
}
func internalPluginClient(kind, name string) (*plugin.Client, error) {
cmdLine, err := BuildPluginCommandString(kind, name)
if err != nil {
return nil, err
}
// See the docstring for BuildPluginCommandString for why we need to do
// this split here.
cmdArgv := strings.Split(cmdLine, TFSPACE)
cfg := &plugin.ClientConfig{
Cmd: exec.Command(cmdArgv[0], cmdArgv[1:]...),
HandshakeConfig: tfplugin.Handshake,
Managed: true,
VersionedPlugins: tfplugin.VersionedPlugins,
AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},
}
return plugin.NewClient(cfg), nil
}
func providerFactory(meta discovery.PluginMeta) providers.Factory {
return func() (providers.Interface, error) {
client := tfplugin.Client(meta)
// Request the RPC client so we can get the provider
// so we can build the actual RPC-implemented provider.
rpcClient, err := client.Client()
if err != nil {
return nil, err
}
raw, err := rpcClient.Dispense(tfplugin.ProviderPluginName)
if err != nil {
return nil, err
}
// store the client so that the plugin can kill the child process
p := raw.(*tfplugin.GRPCProvider)
p.PluginClient = client
return p, nil
}
}
func provisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory {
return func() (provisioners.Interface, error) {
client := tfplugin.Client(meta)
return newProvisionerClient(client)
}
}
func internalProvisionerFactory(client *plugin.Client) terraform.ProvisionerFactory {
return func() (provisioners.Interface, error) {
return newProvisionerClient(client)
}
}
func newProvisionerClient(client *plugin.Client) (provisioners.Interface, error) {
// Request the RPC client so we can get the provisioner
// so we can build the actual RPC-implemented provisioner.
rpcClient, err := client.Client()
if err != nil {
return nil, err
}
raw, err := rpcClient.Dispense(tfplugin.ProvisionerPluginName)
if err != nil {
return nil, err
}
// store the client so that the plugin can kill the child process
p := raw.(*tfplugin.GRPCProvisioner)
p.PluginClient = client
return p, nil
}
| command/plugins.go | 1 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.9983325600624084,
0.10722310841083527,
0.00016226025763899088,
0.00018377580272499472,
0.2951671779155731
] |
{
"id": 2,
"code_window": [
"\treturn func() (provisioners.Interface, error) {\n",
"\t\treturn newProvisionerClient(client)\n",
"\t}\n",
"}\n",
"\n",
"func newProvisionerClient(client *plugin.Client) (provisioners.Interface, error) {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tclient, err := internalPluginClient(\"provisioner\", meta.Name)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, fmt.Errorf(\"[WARN] failed to build command line for internal plugin %q: %s\", meta.Name, err)\n",
"\t\t}\n"
],
"file_path": "command/plugins.go",
"type": "add",
"edit_start_line_idx": 401
} |
output "foo" {
value = "hello"
}
output "bar" {
value = local.bar
}
output "baz" {
value = "ssshhhhhhh"
sensitive = true
}
output "cheeze_pizza" {
description = "Nothing special"
value = "🍕"
}
output "π" {
value = 3.14159265359
depends_on = [
pizza.cheese,
]
}
| configs/test-fixtures/valid-files/outputs.tf | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.00017605989705771208,
0.00017499639943707734,
0.00017391984874848276,
0.0001750094525050372,
8.737198413655278e-7
] |
{
"id": 2,
"code_window": [
"\treturn func() (provisioners.Interface, error) {\n",
"\t\treturn newProvisionerClient(client)\n",
"\t}\n",
"}\n",
"\n",
"func newProvisionerClient(client *plugin.Client) (provisioners.Interface, error) {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tclient, err := internalPluginClient(\"provisioner\", meta.Name)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, fmt.Errorf(\"[WARN] failed to build command line for internal plugin %q: %s\", meta.Name, err)\n",
"\t\t}\n"
],
"file_path": "command/plugins.go",
"type": "add",
"edit_start_line_idx": 401
} | {
"version": 1,
"serial": 1,
"modules": [
{
"path": [
"root"
],
"outputs": {
"numbers": "0,1"
},
"resources": {
"null_resource.bar": {
"type": "null_resource",
"depends_on": [
"null_resource.foo"
],
"primary": {
"id": "6456912646020570139",
"attributes": {
"id": "6456912646020570139",
"triggers.#": "1",
"triggers.whaaat": "0,1"
}
}
},
"null_resource.foo.0": {
"type": "null_resource",
"primary": {
"id": "3597404161631769617",
"attributes": {
"id": "3597404161631769617",
"triggers.#": "1",
"triggers.what": "0"
}
}
},
"null_resource.foo.1": {
"type": "null_resource",
"primary": {
"id": "3214385801340650197",
"attributes": {
"id": "3214385801340650197",
"triggers.#": "1",
"triggers.what": "1"
}
}
}
}
}
]
}
| states/statefile/testdata/roundtrip/v1-simple.in.tfstate | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.00017440225929021835,
0.00017035093333106488,
0.0001674356171861291,
0.00016999263607431203,
0.0000022378740140993614
] |
{
"id": 2,
"code_window": [
"\treturn func() (provisioners.Interface, error) {\n",
"\t\treturn newProvisionerClient(client)\n",
"\t}\n",
"}\n",
"\n",
"func newProvisionerClient(client *plugin.Client) (provisioners.Interface, error) {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tclient, err := internalPluginClient(\"provisioner\", meta.Name)\n",
"\t\tif err != nil {\n",
"\t\t\treturn nil, fmt.Errorf(\"[WARN] failed to build command line for internal plugin %q: %s\", meta.Name, err)\n",
"\t\t}\n"
],
"file_path": "command/plugins.go",
"type": "add",
"edit_start_line_idx": 401
} | ---
layout: "docs"
page_title: "Provisioner: chef"
sidebar_current: "docs-provisioners-chef"
description: |-
The `chef` provisioner installs, configures and runs the Chef client on a resource.
---
# Chef Provisioner
The `chef` provisioner installs, configures and runs the Chef Client on a remote
resource. The `chef` provisioner supports both `ssh` and `winrm` type
[connections](/docs/provisioners/connection.html).
## Requirements
The `chef` provisioner has some prerequisites for specific connection types:
* For `ssh` type connections, `cURL` must be available on the remote host.
* For `winrm` connections, `PowerShell 2.0` must be available on the remote host.
Without these prerequisites, your provisioning execution will fail.
## Example usage
```hcl
resource "aws_instance" "web" {
# ...
provisioner "chef" {
attributes_json = <<-EOF
{
"key": "value",
"app": {
"cluster1": {
"nodes": [
"webserver1",
"webserver2"
]
}
}
}
EOF
environment = "_default"
run_list = ["cookbook::recipe"]
node_name = "webserver1"
secret_key = "${file("../encrypted_data_bag_secret")}"
server_url = "https://chef.company.com/organizations/org1"
recreate_client = true
user_name = "bork"
user_key = "${file("../bork.pem")}"
version = "12.4.1"
# If you have a self signed cert on your chef server change this to :verify_none
ssl_verify_mode = ":verify_peer"
}
}
```
## Argument Reference
The following arguments are supported:
* `attributes_json (string)` - (Optional) A raw JSON string with initial node attributes
for the new node. These can also be loaded from a file on disk using
[the `file` function](/docs/configuration/functions/file.html).
* `channel (string)` - (Optional) The Chef Client release channel to install from. If not
set, the `stable` channel will be used.
* `client_options (array)` - (Optional) A list of optional Chef Client configuration
options. See the [Chef Client ](https://docs.chef.io/config_rb_client.html) documentation
for all available options.
* `disable_reporting (boolean)` - (Optional) If `true` the Chef Client will not try to send
reporting data (used by Chef Reporting) to the Chef Server (defaults to `false`).
* `environment (string)` - (Optional) The Chef environment the new node will be joining
(defaults to `_default`).
* `fetch_chef_certificates (boolean)` (Optional) If `true` the SSL certificates configured
on your Chef Server will be fetched and trusted. See the knife [ssl_fetch](https://docs.chef.io/knife_ssl_fetch.html)
documentation for more details.
* `log_to_file (boolean)` - (Optional) If `true`, the output of the initial Chef Client run
will be logged to a local file instead of the console. The file will be created in a
subdirectory called `logfiles` created in your current directory. The filename will be
the `node_name` of the new node.
* `use_policyfile (boolean)` - (Optional) If `true`, use the policy files to bootstrap the
node. Setting `policy_group` and `policy_name` are required if this is `true`. (defaults to
`false`).
* `policy_group (string)` - (Optional) The name of a policy group that exists on the Chef
server. Required if `use_policyfile` is set; `policy_name` must also be specified.
* `policy_name (string)` - (Optional) The name of a policy, as identified by the `name`
setting in a Policyfile.rb file. Required if `use_policyfile` is set; `policy_group`
must also be specified.
* `http_proxy (string)` - (Optional) The proxy server for Chef Client HTTP connections.
* `https_proxy (string)` - (Optional) The proxy server for Chef Client HTTPS connections.
* `named_run_list (string)` - (Optional) The name of an alternate run-list to invoke during the
initial Chef Client run. The run-list must already exist in the Policyfile that defines
`policy_name`. Only applies when `use_policyfile` is `true`.
* `no_proxy (array)` - (Optional) A list of URLs that should bypass the proxy.
* `node_name (string)` - (Required) The name of the node to register with the Chef Server.
* `ohai_hints (array)` - (Optional) A list with
[Ohai hints](https://docs.chef.io/ohai.html#hints) to upload to the node.
* `os_type (string)` - (Optional) The OS type of the node. Valid options are: `linux` and
`windows`. If not supplied, the connection type will be used to determine the OS type (`ssh`
will assume `linux` and `winrm` will assume `windows`).
* `prevent_sudo (boolean)` - (Optional) Prevent the use of the `sudo` command while installing, configuring
and running the initial Chef Client run. This option is only used with `ssh` type
[connections](/docs/provisioners/connection.html).
* `recreate_client (boolean)` - (Optional) If `true`, first delete any existing Chef Node and
Client before registering the new Chef Client.
* `run_list (array)` - (Optional) A list with recipes that will be invoked during the initial
Chef Client run. The run-list will also be saved to the Chef Server after a successful
initial run. Required if `use_policyfile` is `false`; ignored when `use_policyfile` is `true`
(see `named_run_list` to specify a run-list defined in a Policyfile).
* `secret_key (string)` - (Optional) The contents of the secret key that is used
by the Chef Client to decrypt data bags on the Chef Server. The key will be uploaded to the remote
machine. This can also be loaded from a file on disk using
[the `file` function](/docs/configuration/functions/file.html).
* `server_url (string)` - (Required) The URL to the Chef server. This includes the path to
the organization. See the example.
* `skip_install (boolean)` - (Optional) Skip the installation of Chef Client on the remote
machine. This assumes Chef Client is already installed when you run the `chef`
provisioner.
* `skip_register (boolean)` - (Optional) Skip the registration of Chef Client on the remote
machine. This assumes Chef Client is already registered and the private key (`client.pem`)
is available in the default Chef configuration directory when you run the `chef`
provisioner.
* `ssl_verify_mode (string)` - (Optional) Used to set the verify mode for Chef Client HTTPS
requests. The options are `:verify_none`, or `:verify_peer` which is default.
* `user_name (string)` - (Required) The name of an existing Chef user to register
the new Chef Client and optionally configure Chef Vaults.
* `user_key (string)` - (Required) The contents of the user key that will be used to
authenticate with the Chef Server. This can also be loaded from a file on disk using
[the `file` function](/docs/configuration/functions/file.html).
* `vault_json (string)` - (Optional) A raw JSON string with Chef Vaults and Items to which the new node
should have access. These can also be loaded from a file on disk using
[the `file` function](/docs/configuration/functions/file.html).
* `version (string)` - (Optional) The Chef Client version to install on the remote machine.
If not set, the latest available version will be installed.
| website/docs/provisioners/chef.html.markdown | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.004119533114135265,
0.0007022703066468239,
0.00016321752627845854,
0.00016818592848721892,
0.0011713558342307806
] |
{
"id": 3,
"code_window": [
"\tinstanceState := &terraform.InstanceState{\n",
"\t\tEphemeral: terraform.EphemeralState{\n",
"\t\t\tConnInfo: conn,\n",
"\t\t},\n",
"\t}\n",
"\n",
"\terr = s.provisioner.Apply(uiOutput{srv}, instanceState, resourceConfig)\n",
"\tif err != nil {\n",
"\t\tsrvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tMeta: make(map[string]interface{}),\n"
],
"file_path": "helper/plugin/grpc_provisioner.go",
"type": "add",
"edit_start_line_idx": 126
} | package command
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
plugin "github.com/hashicorp/go-plugin"
"github.com/kardianos/osext"
terraformProvider "github.com/hashicorp/terraform/builtin/providers/terraform"
tfplugin "github.com/hashicorp/terraform/plugin"
"github.com/hashicorp/terraform/plugin/discovery"
"github.com/hashicorp/terraform/providers"
"github.com/hashicorp/terraform/provisioners"
"github.com/hashicorp/terraform/terraform"
)
// multiVersionProviderResolver is an implementation of
// terraform.ResourceProviderResolver that matches the given version constraints
// against a set of versioned provider plugins to find the newest version of
// each that satisfies the given constraints.
type multiVersionProviderResolver struct {
Available discovery.PluginMetaSet
// Internal is a map that overrides the usual plugin selection process
// for internal plugins. These plugins do not support version constraints
// (will produce an error if one is set). This should be used only in
// exceptional circumstances since it forces the provider's release
// schedule to be tied to that of Terraform Core.
Internal map[string]providers.Factory
}
func choosePlugins(avail discovery.PluginMetaSet, internal map[string]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta {
candidates := avail.ConstrainVersions(reqd)
ret := map[string]discovery.PluginMeta{}
for name, metas := range candidates {
// If the provider is in our internal map then we ignore any
// discovered plugins for it since these are dealt with separately.
if _, isInternal := internal[name]; isInternal {
continue
}
if len(metas) == 0 {
continue
}
ret[name] = metas.Newest()
}
return ret
}
func (r *multiVersionProviderResolver) ResolveProviders(
reqd discovery.PluginRequirements,
) (map[string]providers.Factory, []error) {
factories := make(map[string]providers.Factory, len(reqd))
var errs []error
chosen := choosePlugins(r.Available, r.Internal, reqd)
for name, req := range reqd {
if factory, isInternal := r.Internal[name]; isInternal {
if !req.Versions.Unconstrained() {
errs = append(errs, fmt.Errorf("provider.%s: this provider is built in to Terraform and so it does not support version constraints", name))
continue
}
factories[name] = factory
continue
}
if newest, available := chosen[name]; available {
digest, err := newest.SHA256()
if err != nil {
errs = append(errs, fmt.Errorf("provider.%s: failed to load plugin to verify its signature: %s", name, err))
continue
}
if !reqd[name].AcceptsSHA256(digest) {
errs = append(errs, fmt.Errorf("provider.%s: new or changed plugin executable", name))
continue
}
factories[name] = providerFactory(newest)
} else {
msg := fmt.Sprintf("provider.%s: no suitable version installed", name)
required := req.Versions.String()
// no version is unconstrained
if required == "" {
required = "(any version)"
}
foundVersions := []string{}
for meta := range r.Available.WithName(name) {
foundVersions = append(foundVersions, fmt.Sprintf("%q", meta.Version))
}
found := "none"
if len(foundVersions) > 0 {
found = strings.Join(foundVersions, ", ")
}
msg += fmt.Sprintf("\n version requirements: %q\n versions installed: %s", required, found)
errs = append(errs, errors.New(msg))
}
}
return factories, errs
}
// store the user-supplied path for plugin discovery
func (m *Meta) storePluginPath(pluginPath []string) error {
if len(pluginPath) == 0 {
return nil
}
path := filepath.Join(m.DataDir(), PluginPathFile)
// remove the plugin dir record if the path was set to an empty string
if len(pluginPath) == 1 && (pluginPath[0] == "") {
err := os.Remove(path)
if !os.IsNotExist(err) {
return err
}
return nil
}
js, err := json.MarshalIndent(pluginPath, "", " ")
if err != nil {
return err
}
// if this fails, so will WriteFile
os.MkdirAll(m.DataDir(), 0755)
return ioutil.WriteFile(path, js, 0644)
}
// Load the user-defined plugin search path into Meta.pluginPath if the file
// exists.
func (m *Meta) loadPluginPath() ([]string, error) {
js, err := ioutil.ReadFile(filepath.Join(m.DataDir(), PluginPathFile))
if os.IsNotExist(err) {
return nil, nil
}
if err != nil {
return nil, err
}
var pluginPath []string
if err := json.Unmarshal(js, &pluginPath); err != nil {
return nil, err
}
return pluginPath, nil
}
// the default location for automatically installed plugins
func (m *Meta) pluginDir() string {
return filepath.Join(m.DataDir(), "plugins", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH))
}
// pluginDirs return a list of directories to search for plugins.
//
// Earlier entries in this slice get priority over later when multiple copies
// of the same plugin version are found, but newer versions always override
// older versions where both satisfy the provider version constraints.
func (m *Meta) pluginDirs(includeAutoInstalled bool) []string {
// user defined paths take precedence
if len(m.pluginPath) > 0 {
return m.pluginPath
}
// When searching the following directories, earlier entries get precedence
// if the same plugin version is found twice, but newer versions will
// always get preference below regardless of where they are coming from.
// TODO: Add auto-install dir, default vendor dir and optional override
// vendor dir(s).
dirs := []string{"."}
// Look in the same directory as the Terraform executable.
// If found, this replaces what we found in the config path.
exePath, err := osext.Executable()
if err != nil {
log.Printf("[ERROR] Error discovering exe directory: %s", err)
} else {
dirs = append(dirs, filepath.Dir(exePath))
}
// add the user vendor directory
dirs = append(dirs, DefaultPluginVendorDir)
if includeAutoInstalled {
dirs = append(dirs, m.pluginDir())
}
dirs = append(dirs, m.GlobalPluginDirs...)
return dirs
}
func (m *Meta) pluginCache() discovery.PluginCache {
dir := m.PluginCacheDir
if dir == "" {
return nil // cache disabled
}
dir = filepath.Join(dir, pluginMachineName)
return discovery.NewLocalPluginCache(dir)
}
// providerPluginSet returns the set of valid providers that were discovered in
// the defined search paths.
func (m *Meta) providerPluginSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", m.pluginDirs(true))
// Add providers defined in the legacy .terraformrc,
if m.PluginOverrides != nil {
for k, v := range m.PluginOverrides.Providers {
log.Printf("[DEBUG] found plugin override in .terraformrc: %q, %q", k, v)
}
plugins = plugins.OverridePaths(m.PluginOverrides.Providers)
}
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q, %q, %q", p.Name, p.Version, p.Path)
}
return plugins
}
// providerPluginAutoInstalledSet returns the set of providers that exist
// within the auto-install directory.
func (m *Meta) providerPluginAutoInstalledSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", []string{m.pluginDir()})
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q", p.Name)
}
return plugins
}
// providerPluginManuallyInstalledSet returns the set of providers that exist
// in all locations *except* the auto-install directory.
func (m *Meta) providerPluginManuallyInstalledSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", m.pluginDirs(false))
// Add providers defined in the legacy .terraformrc,
if m.PluginOverrides != nil {
for k, v := range m.PluginOverrides.Providers {
log.Printf("[DEBUG] found plugin override in .terraformrc: %q, %q", k, v)
}
plugins = plugins.OverridePaths(m.PluginOverrides.Providers)
}
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q, %q, %q", p.Name, p.Version, p.Path)
}
return plugins
}
func (m *Meta) providerResolver() providers.Resolver {
return &multiVersionProviderResolver{
Available: m.providerPluginSet(),
Internal: m.internalProviders(),
}
}
func (m *Meta) internalProviders() map[string]providers.Factory {
return map[string]providers.Factory{
"terraform": func() (providers.Interface, error) {
return terraformProvider.NewProvider(), nil
},
}
}
// filter the requirements returning only the providers that we can't resolve
func (m *Meta) missingPlugins(avail discovery.PluginMetaSet, reqd discovery.PluginRequirements) discovery.PluginRequirements {
missing := make(discovery.PluginRequirements)
candidates := avail.ConstrainVersions(reqd)
internal := m.internalProviders()
for name, versionSet := range reqd {
// internal providers can't be missing
if _, ok := internal[name]; ok {
continue
}
log.Printf("[DEBUG] plugin requirements: %q=%q", name, versionSet.Versions)
if metas := candidates[name]; metas.Count() == 0 {
missing[name] = versionSet
}
}
return missing
}
func (m *Meta) provisionerFactories() map[string]terraform.ProvisionerFactory {
dirs := m.pluginDirs(true)
plugins := discovery.FindPlugins("provisioner", dirs)
plugins, _ = plugins.ValidateVersions()
// For now our goal is to just find the latest version of each plugin
// we have on the system. All provisioners should be at version 0.0.0
// currently, so there should actually only be one instance of each plugin
// name here, even though the discovery interface forces us to pretend
// that might not be true.
factories := make(map[string]terraform.ProvisionerFactory)
// Wire up the internal provisioners first. These might be overridden
// by discovered provisioners below.
for name := range InternalProvisioners {
client, err := internalPluginClient("provisioner", name)
if err != nil {
log.Printf("[WARN] failed to build command line for internal plugin %q: %s", name, err)
continue
}
factories[name] = internalProvisionerFactory(client)
}
byName := plugins.ByName()
for name, metas := range byName {
// Since we validated versions above and we partitioned the sets
// by name, we're guaranteed that the metas in our set all have
// valid versions and that there's at least one meta.
newest := metas.Newest()
factories[name] = provisionerFactory(newest)
}
return factories
}
func internalPluginClient(kind, name string) (*plugin.Client, error) {
cmdLine, err := BuildPluginCommandString(kind, name)
if err != nil {
return nil, err
}
// See the docstring for BuildPluginCommandString for why we need to do
// this split here.
cmdArgv := strings.Split(cmdLine, TFSPACE)
cfg := &plugin.ClientConfig{
Cmd: exec.Command(cmdArgv[0], cmdArgv[1:]...),
HandshakeConfig: tfplugin.Handshake,
Managed: true,
VersionedPlugins: tfplugin.VersionedPlugins,
AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},
}
return plugin.NewClient(cfg), nil
}
func providerFactory(meta discovery.PluginMeta) providers.Factory {
return func() (providers.Interface, error) {
client := tfplugin.Client(meta)
// Request the RPC client so we can get the provider
// so we can build the actual RPC-implemented provider.
rpcClient, err := client.Client()
if err != nil {
return nil, err
}
raw, err := rpcClient.Dispense(tfplugin.ProviderPluginName)
if err != nil {
return nil, err
}
// store the client so that the plugin can kill the child process
p := raw.(*tfplugin.GRPCProvider)
p.PluginClient = client
return p, nil
}
}
func provisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory {
return func() (provisioners.Interface, error) {
client := tfplugin.Client(meta)
return newProvisionerClient(client)
}
}
func internalProvisionerFactory(client *plugin.Client) terraform.ProvisionerFactory {
return func() (provisioners.Interface, error) {
return newProvisionerClient(client)
}
}
func newProvisionerClient(client *plugin.Client) (provisioners.Interface, error) {
// Request the RPC client so we can get the provisioner
// so we can build the actual RPC-implemented provisioner.
rpcClient, err := client.Client()
if err != nil {
return nil, err
}
raw, err := rpcClient.Dispense(tfplugin.ProvisionerPluginName)
if err != nil {
return nil, err
}
// store the client so that the plugin can kill the child process
p := raw.(*tfplugin.GRPCProvisioner)
p.PluginClient = client
return p, nil
}
| command/plugins.go | 1 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.9845955967903137,
0.025203686207532883,
0.0001623803545953706,
0.0001732290256768465,
0.14826856553554535
] |
{
"id": 3,
"code_window": [
"\tinstanceState := &terraform.InstanceState{\n",
"\t\tEphemeral: terraform.EphemeralState{\n",
"\t\t\tConnInfo: conn,\n",
"\t\t},\n",
"\t}\n",
"\n",
"\terr = s.provisioner.Apply(uiOutput{srv}, instanceState, resourceConfig)\n",
"\tif err != nil {\n",
"\t\tsrvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tMeta: make(map[string]interface{}),\n"
],
"file_path": "helper/plugin/grpc_provisioner.go",
"type": "add",
"edit_start_line_idx": 126
} | package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/efs"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsEfsMountTarget() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsEfsMountTargetRead,
Schema: map[string]*schema.Schema{
"mount_target_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"file_system_id": {
Type: schema.TypeString,
Computed: true,
},
"ip_address": {
Type: schema.TypeString,
Computed: true,
},
"security_groups": {
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
Computed: true,
},
"subnet_id": {
Type: schema.TypeString,
Computed: true,
},
"network_interface_id": {
Type: schema.TypeString,
Computed: true,
},
"dns_name": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsEfsMountTargetRead(d *schema.ResourceData, meta interface{}) error {
efsconn := meta.(*AWSClient).efsconn
describeEfsOpts := &efs.DescribeMountTargetsInput{
MountTargetId: aws.String(d.Get("mount_target_id").(string)),
}
log.Printf("[DEBUG] Reading EFS Mount Target: %s", describeEfsOpts)
resp, err := efsconn.DescribeMountTargets(describeEfsOpts)
if err != nil {
return fmt.Errorf("Error retrieving EFS Mount Target: %s", err)
}
if len(resp.MountTargets) != 1 {
return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(resp.MountTargets))
}
mt := resp.MountTargets[0]
log.Printf("[DEBUG] Found EFS mount target: %#v", mt)
d.SetId(*mt.MountTargetId)
d.Set("file_system_id", mt.FileSystemId)
d.Set("ip_address", mt.IpAddress)
d.Set("subnet_id", mt.SubnetId)
d.Set("network_interface_id", mt.NetworkInterfaceId)
sgResp, err := efsconn.DescribeMountTargetSecurityGroups(&efs.DescribeMountTargetSecurityGroupsInput{
MountTargetId: aws.String(d.Id()),
})
if err != nil {
return err
}
err = d.Set("security_groups", schema.NewSet(schema.HashString, flattenStringList(sgResp.SecurityGroups)))
if err != nil {
return err
}
if err := d.Set("dns_name", resourceAwsEfsMountTargetDnsName(*mt.FileSystemId, meta.(*AWSClient).region)); err != nil {
return fmt.Errorf("Error setting dns_name error: %#v", err)
}
return nil
}
| vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_efs_mount_target.go | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.7278649210929871,
0.0915670245885849,
0.00016519014025107026,
0.0001758639409672469,
0.2191227227449417
] |
{
"id": 3,
"code_window": [
"\tinstanceState := &terraform.InstanceState{\n",
"\t\tEphemeral: terraform.EphemeralState{\n",
"\t\t\tConnInfo: conn,\n",
"\t\t},\n",
"\t}\n",
"\n",
"\terr = s.provisioner.Apply(uiOutput{srv}, instanceState, resourceConfig)\n",
"\tif err != nil {\n",
"\t\tsrvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tMeta: make(map[string]interface{}),\n"
],
"file_path": "helper/plugin/grpc_provisioner.go",
"type": "add",
"edit_start_line_idx": 126
} | package uuid
import (
"crypto/rand"
"encoding/hex"
"fmt"
)
// GenerateRandomBytes is used to generate random bytes of given size.
func GenerateRandomBytes(size int) ([]byte, error) {
buf := make([]byte, size)
if _, err := rand.Read(buf); err != nil {
return nil, fmt.Errorf("failed to read random bytes: %v", err)
}
return buf, nil
}
// GenerateUUID is used to generate a random UUID
func GenerateUUID() (string, error) {
buf, err := GenerateRandomBytes(16)
if err != nil {
return "", err
}
return FormatUUID(buf)
}
func FormatUUID(buf []byte) (string, error) {
if len(buf) != 16 {
return "", fmt.Errorf("wrong length byte slice (%d)", len(buf))
}
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
buf[0:4],
buf[4:6],
buf[6:8],
buf[8:10],
buf[10:16]), nil
}
func ParseUUID(uuid string) ([]byte, error) {
if len(uuid) != 36 {
return nil, fmt.Errorf("uuid string is wrong length")
}
hyph := []byte("-")
if uuid[8] != hyph[0] ||
uuid[13] != hyph[0] ||
uuid[18] != hyph[0] ||
uuid[23] != hyph[0] {
return nil, fmt.Errorf("uuid is improperly formatted")
}
hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36]
ret, err := hex.DecodeString(hexStr)
if err != nil {
return nil, err
}
if len(ret) != 16 {
return nil, fmt.Errorf("decoded hex is the wrong length")
}
return ret, nil
}
| vendor/github.com/hashicorp/go-uuid/uuid.go | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.00951291061937809,
0.0015123613411560655,
0.00016928721743170172,
0.00017608708003535867,
0.003266229759901762
] |
{
"id": 3,
"code_window": [
"\tinstanceState := &terraform.InstanceState{\n",
"\t\tEphemeral: terraform.EphemeralState{\n",
"\t\t\tConnInfo: conn,\n",
"\t\t},\n",
"\t}\n",
"\n",
"\terr = s.provisioner.Apply(uiOutput{srv}, instanceState, resourceConfig)\n",
"\tif err != nil {\n",
"\t\tsrvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err)\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tMeta: make(map[string]interface{}),\n"
],
"file_path": "helper/plugin/grpc_provisioner.go",
"type": "add",
"edit_start_line_idx": 126
} | variable "foo" {
default = "${aws_instance.foo.bar}"
}
| config/test-fixtures/validate-var-default-interpolate/main.tf | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.00017411929729860276,
0.00017411929729860276,
0.00017411929729860276,
0.00017411929729860276,
0
] |
{
"id": 4,
"code_window": [
"\tprotoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request))\n",
"\tif err != nil {\n",
"\t\tresp.Diagnostics = resp.Diagnostics.Append(err)\n",
"\t\treturn resp\n",
"\t}\n",
"\n",
"\tif protoResp.Provisioner == nil {\n",
"\t\tresp.Diagnostics = resp.Diagnostics.Append(errors.New(\"missing provisioner schema\"))\n",
"\t\treturn resp\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tresp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))\n"
],
"file_path": "plugin/grpc_provisioner.go",
"type": "add",
"edit_start_line_idx": 66
} | package plugin
import (
"context"
"errors"
"io"
"log"
"sync"
plugin "github.com/hashicorp/go-plugin"
"github.com/hashicorp/terraform/configs/configschema"
"github.com/hashicorp/terraform/plugin/convert"
"github.com/hashicorp/terraform/plugin/proto"
"github.com/hashicorp/terraform/provisioners"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/msgpack"
"google.golang.org/grpc"
)
// GRPCProvisionerPlugin is the plugin.GRPCPlugin implementation.
type GRPCProvisionerPlugin struct {
plugin.Plugin
GRPCProvisioner func() proto.ProvisionerServer
}
func (p *GRPCProvisionerPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) {
return &GRPCProvisioner{
client: proto.NewProvisionerClient(c),
ctx: ctx,
}, nil
}
func (p *GRPCProvisionerPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error {
proto.RegisterProvisionerServer(s, p.GRPCProvisioner())
return nil
}
// provisioners.Interface grpc implementation
type GRPCProvisioner struct {
// PluginClient provides a reference to the plugin.Client which controls the plugin process.
// This allows the GRPCProvider a way to shutdown the plugin process.
PluginClient *plugin.Client
client proto.ProvisionerClient
ctx context.Context
// Cache the schema since we need it for serialization in each method call.
mu sync.Mutex
schema *configschema.Block
}
func (p *GRPCProvisioner) GetSchema() (resp provisioners.GetSchemaResponse) {
p.mu.Lock()
defer p.mu.Unlock()
if p.schema != nil {
return provisioners.GetSchemaResponse{
Provisioner: p.schema,
}
}
protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request))
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
if protoResp.Provisioner == nil {
resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provisioner schema"))
return resp
}
resp.Provisioner = convert.ProtoToConfigSchema(protoResp.Provisioner.Block)
p.schema = resp.Provisioner
return resp
}
func (p *GRPCProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) {
schema := p.GetSchema()
if schema.Diagnostics.HasErrors() {
resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics)
return resp
}
mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType())
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
protoReq := &proto.ValidateProvisionerConfig_Request{
Config: &proto.DynamicValue{Msgpack: mp},
}
protoResp, err := p.client.ValidateProvisionerConfig(p.ctx, protoReq)
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
return resp
}
func (p *GRPCProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) {
schema := p.GetSchema()
if schema.Diagnostics.HasErrors() {
resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics)
return resp
}
mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType())
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
// connection is always assumed to be a simple string map
connMP, err := msgpack.Marshal(r.Connection, cty.Map(cty.String))
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
protoReq := &proto.ProvisionResource_Request{
Config: &proto.DynamicValue{Msgpack: mp},
Connection: &proto.DynamicValue{Msgpack: connMP},
}
outputClient, err := p.client.ProvisionResource(p.ctx, protoReq)
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
for {
rcv, err := outputClient.Recv()
if rcv != nil {
r.UIOutput.Output(rcv.Output)
}
if err != nil {
if err != io.EOF {
resp.Diagnostics = resp.Diagnostics.Append(err)
}
break
}
if len(rcv.Diagnostics) > 0 {
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(rcv.Diagnostics))
break
}
}
return resp
}
func (p *GRPCProvisioner) Stop() error {
protoResp, err := p.client.Stop(p.ctx, &proto.Stop_Request{})
if err != nil {
return err
}
if protoResp.Error != "" {
return errors.New(protoResp.Error)
}
return nil
}
func (p *GRPCProvisioner) Close() error {
// check this since it's not automatically inserted during plugin creation
if p.PluginClient == nil {
log.Println("[DEBUG] provider has no plugin.Client")
return nil
}
p.PluginClient.Kill()
return nil
}
| plugin/grpc_provisioner.go | 1 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.9987214207649231,
0.18556277453899384,
0.00016775060794316232,
0.015770982950925827,
0.35289672017097473
] |
{
"id": 4,
"code_window": [
"\tprotoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request))\n",
"\tif err != nil {\n",
"\t\tresp.Diagnostics = resp.Diagnostics.Append(err)\n",
"\t\treturn resp\n",
"\t}\n",
"\n",
"\tif protoResp.Provisioner == nil {\n",
"\t\tresp.Diagnostics = resp.Diagnostics.Append(errors.New(\"missing provisioner schema\"))\n",
"\t\treturn resp\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tresp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))\n"
],
"file_path": "plugin/grpc_provisioner.go",
"type": "add",
"edit_start_line_idx": 66
} | /*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"errors"
"fmt"
"math/rand"
"net"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/naming"
)
// Client API for LoadBalancer service.
// Mostly copied from generated pb.go file.
// To avoid circular dependency.
type loadBalancerClient struct {
cc *ClientConn
}
func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) {
desc := &StreamDesc{
StreamName: "BalanceLoad",
ServerStreams: true,
ClientStreams: true,
}
stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
if err != nil {
return nil, err
}
x := &balanceLoadClientStream{stream}
return x, nil
}
type balanceLoadClientStream struct {
ClientStream
}
func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
m := new(lbpb.LoadBalanceResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// NewGRPCLBBalancer creates a grpclb load balancer.
func NewGRPCLBBalancer(r naming.Resolver) Balancer {
return &balancer{
r: r,
}
}
type remoteBalancerInfo struct {
addr string
// the server name used for authentication with the remote LB server.
name string
}
// grpclbAddrInfo consists of the information of a backend server.
type grpclbAddrInfo struct {
addr Address
connected bool
// dropForRateLimiting indicates whether this particular request should be
// dropped by the client for rate limiting.
dropForRateLimiting bool
// dropForLoadBalancing indicates whether this particular request should be
// dropped by the client for load balancing.
dropForLoadBalancing bool
}
type balancer struct {
r naming.Resolver
target string
mu sync.Mutex
seq int // a sequence number to make sure addrCh does not get stale addresses.
w naming.Watcher
addrCh chan []Address
rbs []remoteBalancerInfo
addrs []*grpclbAddrInfo
next int
waitCh chan struct{}
done bool
expTimer *time.Timer
rand *rand.Rand
clientStats lbpb.ClientStats
}
func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
updates, err := w.Next()
if err != nil {
grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err)
return err
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return ErrClientConnClosing
}
for _, update := range updates {
switch update.Op {
case naming.Add:
var exist bool
for _, v := range b.rbs {
// TODO: Is the same addr with different server name a different balancer?
if update.Addr == v.addr {
exist = true
break
}
}
if exist {
continue
}
md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB)
if !ok {
// TODO: Revisit the handling here and may introduce some fallback mechanism.
grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata)
continue
}
switch md.AddrType {
case naming.Backend:
// TODO: Revisit the handling here and may introduce some fallback mechanism.
grpclog.Errorf("The name resolution does not give grpclb addresses")
continue
case naming.GRPCLB:
b.rbs = append(b.rbs, remoteBalancerInfo{
addr: update.Addr,
name: md.ServerName,
})
default:
grpclog.Errorf("Received unknow address type %d", md.AddrType)
continue
}
case naming.Delete:
for i, v := range b.rbs {
if update.Addr == v.addr {
copy(b.rbs[i:], b.rbs[i+1:])
b.rbs = b.rbs[:len(b.rbs)-1]
break
}
}
default:
grpclog.Errorf("Unknown update.Op %v", update.Op)
}
}
// TODO: Fall back to the basic round-robin load balancing if the resulting address is
// not a load balancer.
select {
case <-ch:
default:
}
ch <- b.rbs
return nil
}
func (b *balancer) serverListExpire(seq int) {
b.mu.Lock()
defer b.mu.Unlock()
// TODO: gRPC interanls do not clear the connections when the server list is stale.
// This means RPCs will keep using the existing server list until b receives new
// server list even though the list is expired. Revisit this behavior later.
if b.done || seq < b.seq {
return
}
b.next = 0
b.addrs = nil
// Ask grpc internals to close all the corresponding connections.
b.addrCh <- nil
}
func convertDuration(d *lbpb.Duration) time.Duration {
if d == nil {
return 0
}
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
}
func (b *balancer) processServerList(l *lbpb.ServerList, seq int) {
if l == nil {
return
}
servers := l.GetServers()
expiration := convertDuration(l.GetExpirationInterval())
var (
sl []*grpclbAddrInfo
addrs []Address
)
for _, s := range servers {
md := metadata.Pairs("lb-token", s.LoadBalanceToken)
ip := net.IP(s.IpAddress)
ipStr := ip.String()
if ip.To4() == nil {
// Add square brackets to ipv6 addresses, otherwise net.Dial() and
// net.SplitHostPort() will return too many colons error.
ipStr = fmt.Sprintf("[%s]", ipStr)
}
addr := Address{
Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
Metadata: &md,
}
sl = append(sl, &grpclbAddrInfo{
addr: addr,
dropForRateLimiting: s.DropForRateLimiting,
dropForLoadBalancing: s.DropForLoadBalancing,
})
addrs = append(addrs, addr)
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done || seq < b.seq {
return
}
if len(sl) > 0 {
// reset b.next to 0 when replacing the server list.
b.next = 0
b.addrs = sl
b.addrCh <- addrs
if b.expTimer != nil {
b.expTimer.Stop()
b.expTimer = nil
}
if expiration > 0 {
b.expTimer = time.AfterFunc(expiration, func() {
b.serverListExpire(seq)
})
}
}
return
}
func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-done:
return
}
b.mu.Lock()
stats := b.clientStats
b.clientStats = lbpb.ClientStats{} // Clear the stats.
b.mu.Unlock()
t := time.Now()
stats.Timestamp = &lbpb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := s.Send(&lbpb.LoadBalanceRequest{
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
ClientStats: &stats,
},
}); err != nil {
grpclog.Errorf("grpclb: failed to send load report: %v", err)
return
}
}
}
func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := lbc.BalanceLoad(ctx)
if err != nil {
grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
return
}
b.mu.Lock()
if b.done {
b.mu.Unlock()
return
}
b.mu.Unlock()
initReq := &lbpb.LoadBalanceRequest{
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
InitialRequest: &lbpb.InitialLoadBalanceRequest{
Name: b.target,
},
},
}
if err := stream.Send(initReq); err != nil {
grpclog.Errorf("grpclb: failed to send init request: %v", err)
// TODO: backoff on retry?
return true
}
reply, err := stream.Recv()
if err != nil {
grpclog.Errorf("grpclb: failed to recv init response: %v", err)
// TODO: backoff on retry?
return true
}
initResp := reply.GetInitialResponse()
if initResp == nil {
grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.")
return
}
// TODO: Support delegation.
if initResp.LoadBalancerDelegate != "" {
// delegation
grpclog.Errorf("TODO: Delegation is not supported yet.")
return
}
streamDone := make(chan struct{})
defer close(streamDone)
b.mu.Lock()
b.clientStats = lbpb.ClientStats{} // Clear client stats.
b.mu.Unlock()
if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
go b.sendLoadReport(stream, d, streamDone)
}
// Retrieve the server list.
for {
reply, err := stream.Recv()
if err != nil {
grpclog.Errorf("grpclb: failed to recv server list: %v", err)
break
}
b.mu.Lock()
if b.done || seq < b.seq {
b.mu.Unlock()
return
}
b.seq++ // tick when receiving a new list of servers.
seq = b.seq
b.mu.Unlock()
if serverList := reply.GetServerList(); serverList != nil {
b.processServerList(serverList, seq)
}
}
return true
}
func (b *balancer) Start(target string, config BalancerConfig) error {
b.rand = rand.New(rand.NewSource(time.Now().Unix()))
// TODO: Fall back to the basic direct connection if there is no name resolver.
if b.r == nil {
return errors.New("there is no name resolver installed")
}
b.target = target
b.mu.Lock()
if b.done {
b.mu.Unlock()
return ErrClientConnClosing
}
b.addrCh = make(chan []Address)
w, err := b.r.Resolve(target)
if err != nil {
b.mu.Unlock()
grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err)
return err
}
b.w = w
b.mu.Unlock()
balancerAddrsCh := make(chan []remoteBalancerInfo, 1)
// Spawn a goroutine to monitor the name resolution of remote load balancer.
go func() {
for {
if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil {
grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err)
close(balancerAddrsCh)
return
}
}
}()
// Spawn a goroutine to talk to the remote load balancer.
go func() {
var (
cc *ClientConn
// ccError is closed when there is an error in the current cc.
// A new rb should be picked from rbs and connected.
ccError chan struct{}
rb *remoteBalancerInfo
rbs []remoteBalancerInfo
rbIdx int
)
defer func() {
if ccError != nil {
select {
case <-ccError:
default:
close(ccError)
}
}
if cc != nil {
cc.Close()
}
}()
for {
var ok bool
select {
case rbs, ok = <-balancerAddrsCh:
if !ok {
return
}
foundIdx := -1
if rb != nil {
for i, trb := range rbs {
if trb == *rb {
foundIdx = i
break
}
}
}
if foundIdx >= 0 {
if foundIdx >= 1 {
// Move the address in use to the beginning of the list.
b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0]
rbIdx = 0
}
continue // If found, don't dial new cc.
} else if len(rbs) > 0 {
// Pick a random one from the list, instead of always using the first one.
if l := len(rbs); l > 1 && rb != nil {
tmpIdx := b.rand.Intn(l - 1)
b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0]
}
rbIdx = 0
rb = &rbs[0]
} else {
// foundIdx < 0 && len(rbs) <= 0.
rb = nil
}
case <-ccError:
ccError = nil
if rbIdx < len(rbs)-1 {
rbIdx++
rb = &rbs[rbIdx]
} else {
rb = nil
}
}
if rb == nil {
continue
}
if cc != nil {
cc.Close()
}
// Talk to the remote load balancer to get the server list.
var (
err error
dopts []DialOption
)
if creds := config.DialCreds; creds != nil {
if rb.name != "" {
if err := creds.OverrideServerName(rb.name); err != nil {
grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err)
continue
}
}
dopts = append(dopts, WithTransportCredentials(creds))
} else {
dopts = append(dopts, WithInsecure())
}
if dialer := config.Dialer; dialer != nil {
// WithDialer takes a different type of function, so we instead use a special DialOption here.
dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
}
ccError = make(chan struct{})
cc, err = Dial(rb.addr, dopts...)
if err != nil {
grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
close(ccError)
continue
}
b.mu.Lock()
b.seq++ // tick when getting a new balancer address
seq := b.seq
b.next = 0
b.mu.Unlock()
go func(cc *ClientConn, ccError chan struct{}) {
lbc := &loadBalancerClient{cc}
b.callRemoteBalancer(lbc, seq)
cc.Close()
select {
case <-ccError:
default:
close(ccError)
}
}(cc, ccError)
}
}()
return nil
}
func (b *balancer) down(addr Address, err error) {
b.mu.Lock()
defer b.mu.Unlock()
for _, a := range b.addrs {
if addr == a.addr {
a.connected = false
break
}
}
}
func (b *balancer) Up(addr Address) func(error) {
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return nil
}
var cnt int
for _, a := range b.addrs {
if a.addr == addr {
if a.connected {
return nil
}
a.connected = true
}
if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing {
cnt++
}
}
// addr is the only one which is connected. Notify the Get() callers who are blocking.
if cnt == 1 && b.waitCh != nil {
close(b.waitCh)
b.waitCh = nil
}
return func(err error) {
b.down(addr, err)
}
}
func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
var ch chan struct{}
b.mu.Lock()
if b.done {
b.mu.Unlock()
err = ErrClientConnClosing
return
}
seq := b.seq
defer func() {
if err != nil {
return
}
put = func() {
s, ok := rpcInfoFromContext(ctx)
if !ok {
return
}
b.mu.Lock()
defer b.mu.Unlock()
if b.done || seq < b.seq {
return
}
b.clientStats.NumCallsFinished++
if !s.bytesSent {
b.clientStats.NumCallsFinishedWithClientFailedToSend++
} else if s.bytesReceived {
b.clientStats.NumCallsFinishedKnownReceived++
}
}
}()
b.clientStats.NumCallsStarted++
if len(b.addrs) > 0 {
if b.next >= len(b.addrs) {
b.next = 0
}
next := b.next
for {
a := b.addrs[next]
next = (next + 1) % len(b.addrs)
if a.connected {
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
addr = a.addr
b.next = next
b.mu.Unlock()
return
}
if !opts.BlockingWait {
b.next = next
if a.dropForLoadBalancing {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
} else if a.dropForRateLimiting {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
}
b.mu.Unlock()
err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr)
return
}
}
if next == b.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
if !opts.BlockingWait {
if len(b.addrs) == 0 {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = Errorf(codes.Unavailable, "there is no address available")
return
}
// Returns the next addr on b.addrs for a failfast RPC.
addr = b.addrs[b.next].addr
b.next++
b.mu.Unlock()
return
}
// Wait on b.waitCh for non-failfast RPCs.
if b.waitCh == nil {
ch = make(chan struct{})
b.waitCh = ch
} else {
ch = b.waitCh
}
b.mu.Unlock()
for {
select {
case <-ctx.Done():
b.mu.Lock()
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = ctx.Err()
return
case <-ch:
b.mu.Lock()
if b.done {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithClientFailedToSend++
b.mu.Unlock()
err = ErrClientConnClosing
return
}
if len(b.addrs) > 0 {
if b.next >= len(b.addrs) {
b.next = 0
}
next := b.next
for {
a := b.addrs[next]
next = (next + 1) % len(b.addrs)
if a.connected {
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
addr = a.addr
b.next = next
b.mu.Unlock()
return
}
if !opts.BlockingWait {
b.next = next
if a.dropForLoadBalancing {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
} else if a.dropForRateLimiting {
b.clientStats.NumCallsFinished++
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
}
b.mu.Unlock()
err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr)
return
}
}
if next == b.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
// The newly added addr got removed by Down() again.
if b.waitCh == nil {
ch = make(chan struct{})
b.waitCh = ch
} else {
ch = b.waitCh
}
b.mu.Unlock()
}
}
}
func (b *balancer) Notify() <-chan []Address {
return b.addrCh
}
func (b *balancer) Close() error {
b.mu.Lock()
defer b.mu.Unlock()
if b.done {
return errBalancerClosed
}
b.done = true
if b.expTimer != nil {
b.expTimer.Stop()
}
if b.waitCh != nil {
close(b.waitCh)
}
if b.addrCh != nil {
close(b.addrCh)
}
if b.w != nil {
b.w.Close()
}
return nil
}
| vendor/google.golang.org/grpc/grpclb.go | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.0003538666060194373,
0.00017451081657782197,
0.00016105332178995013,
0.00016894424334168434,
0.000026915948183159344
] |
{
"id": 4,
"code_window": [
"\tprotoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request))\n",
"\tif err != nil {\n",
"\t\tresp.Diagnostics = resp.Diagnostics.Append(err)\n",
"\t\treturn resp\n",
"\t}\n",
"\n",
"\tif protoResp.Provisioner == nil {\n",
"\t\tresp.Diagnostics = resp.Diagnostics.Append(errors.New(\"missing provisioner schema\"))\n",
"\t\treturn resp\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tresp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))\n"
],
"file_path": "plugin/grpc_provisioner.go",
"type": "add",
"edit_start_line_idx": 66
} | package swift
import (
"context"
"fmt"
"log"
"strconv"
"strings"
"time"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/helper/schema"
tf_openstack "github.com/terraform-providers/terraform-provider-openstack/openstack"
)
// New creates a new backend for Swift remote state.
func New() backend.Backend {
s := &schema.Backend{
Schema: map[string]*schema.Schema{
"auth_url": &schema.Schema{
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_URL", nil),
Description: descriptions["auth_url"],
},
"user_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("OS_USER_ID", ""),
Description: descriptions["user_name"],
},
"user_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("OS_USERNAME", ""),
Description: descriptions["user_name"],
},
"tenant_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
"OS_TENANT_ID",
"OS_PROJECT_ID",
}, ""),
Description: descriptions["tenant_id"],
},
"tenant_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
"OS_TENANT_NAME",
"OS_PROJECT_NAME",
}, ""),
Description: descriptions["tenant_name"],
},
"password": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Sensitive: true,
DefaultFunc: schema.EnvDefaultFunc("OS_PASSWORD", ""),
Description: descriptions["password"],
},
"token": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_TOKEN", ""),
Description: descriptions["token"],
},
"domain_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
"OS_USER_DOMAIN_ID",
"OS_PROJECT_DOMAIN_ID",
"OS_DOMAIN_ID",
}, ""),
Description: descriptions["domain_id"],
},
"domain_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
"OS_USER_DOMAIN_NAME",
"OS_PROJECT_DOMAIN_NAME",
"OS_DOMAIN_NAME",
"OS_DEFAULT_DOMAIN",
}, ""),
Description: descriptions["domain_name"],
},
"region_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""),
Description: descriptions["region_name"],
},
"insecure": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("OS_INSECURE", ""),
Description: descriptions["insecure"],
},
"endpoint_type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("OS_ENDPOINT_TYPE", ""),
},
"cacert_file": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("OS_CACERT", ""),
Description: descriptions["cacert_file"],
},
"cert": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("OS_CERT", ""),
Description: descriptions["cert"],
},
"key": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("OS_KEY", ""),
Description: descriptions["key"],
},
"path": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: descriptions["path"],
Deprecated: "Use container instead",
ConflictsWith: []string{"container"},
},
"container": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: descriptions["container"],
},
"archive_path": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: descriptions["archive_path"],
Deprecated: "Use archive_container instead",
ConflictsWith: []string{"archive_container"},
},
"archive_container": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: descriptions["archive_container"],
},
"expire_after": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: descriptions["expire_after"],
},
},
}
result := &Backend{Backend: s}
result.Backend.ConfigureFunc = result.configure
return result
}
var descriptions map[string]string
func init() {
descriptions = map[string]string{
"auth_url": "The Identity authentication URL.",
"user_name": "Username to login with.",
"user_id": "User ID to login with.",
"tenant_id": "The ID of the Tenant (Identity v2) or Project (Identity v3)\n" +
"to login with.",
"tenant_name": "The name of the Tenant (Identity v2) or Project (Identity v3)\n" +
"to login with.",
"password": "Password to login with.",
"token": "Authentication token to use as an alternative to username/password.",
"domain_id": "The ID of the Domain to scope to (Identity v3).",
"domain_name": "The name of the Domain to scope to (Identity v3).",
"region_name": "The name of the Region to use.",
"insecure": "Trust self-signed certificates.",
"cacert_file": "A Custom CA certificate.",
"endpoint_type": "The catalog endpoint type to use.",
"cert": "A client certificate to authenticate with.",
"key": "A client private key to authenticate with.",
"path": "Swift container path to use.",
"container": "Swift container to create",
"archive_path": "Swift container path to archive state to.",
"archive_container": "Swift container to archive state to.",
"expire_after": "Archive object expiry duration.",
}
}
type Backend struct {
*schema.Backend
// Fields below are set from configure
client *gophercloud.ServiceClient
archive bool
archiveContainer string
expireSecs int
container string
}
func (b *Backend) configure(ctx context.Context) error {
if b.client != nil {
return nil
}
// Grab the resource data
data := schema.FromContextBackendConfig(ctx)
config := &tf_openstack.Config{
CACertFile: data.Get("cacert_file").(string),
ClientCertFile: data.Get("cert").(string),
ClientKeyFile: data.Get("key").(string),
DomainID: data.Get("domain_id").(string),
DomainName: data.Get("domain_name").(string),
EndpointType: data.Get("endpoint_type").(string),
IdentityEndpoint: data.Get("auth_url").(string),
Insecure: data.Get("insecure").(bool),
Password: data.Get("password").(string),
Token: data.Get("token").(string),
TenantID: data.Get("tenant_id").(string),
TenantName: data.Get("tenant_name").(string),
Username: data.Get("user_name").(string),
UserID: data.Get("user_id").(string),
}
if err := config.LoadAndValidate(); err != nil {
return err
}
// Assign Container
b.container = data.Get("container").(string)
if b.container == "" {
// Check deprecated field
b.container = data.Get("path").(string)
}
// Enable object archiving?
if archiveContainer, ok := data.GetOk("archive_container"); ok {
log.Printf("[DEBUG] Archive_container set, enabling object versioning")
b.archive = true
b.archiveContainer = archiveContainer.(string)
} else if archivePath, ok := data.GetOk("archive_path"); ok {
log.Printf("[DEBUG] Archive_path set, enabling object versioning")
b.archive = true
b.archiveContainer = archivePath.(string)
}
// Enable object expiry?
if expireRaw, ok := data.GetOk("expire_after"); ok {
expire := expireRaw.(string)
log.Printf("[DEBUG] Requested that remote state expires after %s", expire)
if strings.HasSuffix(expire, "d") {
log.Printf("[DEBUG] Got a days expire after duration. Converting to hours")
days, err := strconv.Atoi(expire[:len(expire)-1])
if err != nil {
return fmt.Errorf("Error converting expire_after value %s to int: %s", expire, err)
}
expire = fmt.Sprintf("%dh", days*24)
log.Printf("[DEBUG] Expire after %s hours", expire)
}
expireDur, err := time.ParseDuration(expire)
if err != nil {
log.Printf("[DEBUG] Error parsing duration %s: %s", expire, err)
return fmt.Errorf("Error parsing expire_after duration '%s': %s", expire, err)
}
log.Printf("[DEBUG] Seconds duration = %d", int(expireDur.Seconds()))
b.expireSecs = int(expireDur.Seconds())
}
objClient, err := openstack.NewObjectStorageV1(config.OsClient, gophercloud.EndpointOpts{
Region: data.Get("region_name").(string),
})
if err != nil {
return err
}
b.client = objClient
return nil
}
| backend/remote-state/swift/backend.go | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.0015761511167511344,
0.00024341662356164306,
0.00016253248031716794,
0.0001711942022666335,
0.00028948241379112005
] |
{
"id": 4,
"code_window": [
"\tprotoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request))\n",
"\tif err != nil {\n",
"\t\tresp.Diagnostics = resp.Diagnostics.Append(err)\n",
"\t\treturn resp\n",
"\t}\n",
"\n",
"\tif protoResp.Provisioner == nil {\n",
"\t\tresp.Diagnostics = resp.Diagnostics.Append(errors.New(\"missing provisioner schema\"))\n",
"\t\treturn resp\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tresp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))\n"
],
"file_path": "plugin/grpc_provisioner.go",
"type": "add",
"edit_start_line_idx": 66
} | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package timeseries implements a time series structure for stats collection.
package timeseries // import "golang.org/x/net/internal/timeseries"
import (
"fmt"
"log"
"time"
)
const (
timeSeriesNumBuckets = 64
minuteHourSeriesNumBuckets = 60
)
var timeSeriesResolutions = []time.Duration{
1 * time.Second,
10 * time.Second,
1 * time.Minute,
10 * time.Minute,
1 * time.Hour,
6 * time.Hour,
24 * time.Hour, // 1 day
7 * 24 * time.Hour, // 1 week
4 * 7 * 24 * time.Hour, // 4 weeks
16 * 7 * 24 * time.Hour, // 16 weeks
}
var minuteHourSeriesResolutions = []time.Duration{
1 * time.Second,
1 * time.Minute,
}
// An Observable is a kind of data that can be aggregated in a time series.
type Observable interface {
Multiply(ratio float64) // Multiplies the data in self by a given ratio
Add(other Observable) // Adds the data from a different observation to self
Clear() // Clears the observation so it can be reused.
CopyFrom(other Observable) // Copies the contents of a given observation to self
}
// Float attaches the methods of Observable to a float64.
type Float float64
// NewFloat returns a Float.
func NewFloat() Observable {
f := Float(0)
return &f
}
// String returns the float as a string.
func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
// Value returns the float's value.
func (f *Float) Value() float64 { return float64(*f) }
func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
func (f *Float) Add(other Observable) {
o := other.(*Float)
*f += *o
}
func (f *Float) Clear() { *f = 0 }
func (f *Float) CopyFrom(other Observable) {
o := other.(*Float)
*f = *o
}
// A Clock tells the current time.
type Clock interface {
Time() time.Time
}
type defaultClock int
var defaultClockInstance defaultClock
func (defaultClock) Time() time.Time { return time.Now() }
// Information kept per level. Each level consists of a circular list of
// observations. The start of the level may be derived from end and the
// len(buckets) * sizeInMillis.
type tsLevel struct {
oldest int // index to oldest bucketed Observable
newest int // index to newest bucketed Observable
end time.Time // end timestamp for this level
size time.Duration // duration of the bucketed Observable
buckets []Observable // collections of observations
provider func() Observable // used for creating new Observable
}
func (l *tsLevel) Clear() {
l.oldest = 0
l.newest = len(l.buckets) - 1
l.end = time.Time{}
for i := range l.buckets {
if l.buckets[i] != nil {
l.buckets[i].Clear()
l.buckets[i] = nil
}
}
}
func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
l.size = size
l.provider = f
l.buckets = make([]Observable, numBuckets)
}
// Keeps a sequence of levels. Each level is responsible for storing data at
// a given resolution. For example, the first level stores data at a one
// minute resolution while the second level stores data at a one hour
// resolution.
// Each level is represented by a sequence of buckets. Each bucket spans an
// interval equal to the resolution of the level. New observations are added
// to the last bucket.
type timeSeries struct {
provider func() Observable // make more Observable
numBuckets int // number of buckets in each level
levels []*tsLevel // levels of bucketed Observable
lastAdd time.Time // time of last Observable tracked
total Observable // convenient aggregation of all Observable
clock Clock // Clock for getting current time
pending Observable // observations not yet bucketed
pendingTime time.Time // what time are we keeping in pending
dirty bool // if there are pending observations
}
// init initializes a level according to the supplied criteria.
func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
ts.provider = f
ts.numBuckets = numBuckets
ts.clock = clock
ts.levels = make([]*tsLevel, len(resolutions))
for i := range resolutions {
if i > 0 && resolutions[i-1] >= resolutions[i] {
log.Print("timeseries: resolutions must be monotonically increasing")
break
}
newLevel := new(tsLevel)
newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
ts.levels[i] = newLevel
}
ts.Clear()
}
// Clear removes all observations from the time series.
func (ts *timeSeries) Clear() {
ts.lastAdd = time.Time{}
ts.total = ts.resetObservation(ts.total)
ts.pending = ts.resetObservation(ts.pending)
ts.pendingTime = time.Time{}
ts.dirty = false
for i := range ts.levels {
ts.levels[i].Clear()
}
}
// Add records an observation at the current time.
func (ts *timeSeries) Add(observation Observable) {
ts.AddWithTime(observation, ts.clock.Time())
}
// AddWithTime records an observation at the specified time.
func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
smallBucketDuration := ts.levels[0].size
if t.After(ts.lastAdd) {
ts.lastAdd = t
}
if t.After(ts.pendingTime) {
ts.advance(t)
ts.mergePendingUpdates()
ts.pendingTime = ts.levels[0].end
ts.pending.CopyFrom(observation)
ts.dirty = true
} else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
// The observation is close enough to go into the pending bucket.
// This compensates for clock skewing and small scheduling delays
// by letting the update stay in the fast path.
ts.pending.Add(observation)
ts.dirty = true
} else {
ts.mergeValue(observation, t)
}
}
// mergeValue inserts the observation at the specified time in the past into all levels.
func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
for _, level := range ts.levels {
index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
if 0 <= index && index < ts.numBuckets {
bucketNumber := (level.oldest + index) % ts.numBuckets
if level.buckets[bucketNumber] == nil {
level.buckets[bucketNumber] = level.provider()
}
level.buckets[bucketNumber].Add(observation)
}
}
ts.total.Add(observation)
}
// mergePendingUpdates applies the pending updates into all levels.
func (ts *timeSeries) mergePendingUpdates() {
if ts.dirty {
ts.mergeValue(ts.pending, ts.pendingTime)
ts.pending = ts.resetObservation(ts.pending)
ts.dirty = false
}
}
// advance cycles the buckets at each level until the latest bucket in
// each level can hold the time specified.
func (ts *timeSeries) advance(t time.Time) {
if !t.After(ts.levels[0].end) {
return
}
for i := 0; i < len(ts.levels); i++ {
level := ts.levels[i]
if !level.end.Before(t) {
break
}
// If the time is sufficiently far, just clear the level and advance
// directly.
if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
for _, b := range level.buckets {
ts.resetObservation(b)
}
level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
}
for t.After(level.end) {
level.end = level.end.Add(level.size)
level.newest = level.oldest
level.oldest = (level.oldest + 1) % ts.numBuckets
ts.resetObservation(level.buckets[level.newest])
}
t = level.end
}
}
// Latest returns the sum of the num latest buckets from the level.
func (ts *timeSeries) Latest(level, num int) Observable {
now := ts.clock.Time()
if ts.levels[0].end.Before(now) {
ts.advance(now)
}
ts.mergePendingUpdates()
result := ts.provider()
l := ts.levels[level]
index := l.newest
for i := 0; i < num; i++ {
if l.buckets[index] != nil {
result.Add(l.buckets[index])
}
if index == 0 {
index = ts.numBuckets
}
index--
}
return result
}
// LatestBuckets returns a copy of the num latest buckets from level.
func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
if level < 0 || level > len(ts.levels) {
log.Print("timeseries: bad level argument: ", level)
return nil
}
if num < 0 || num >= ts.numBuckets {
log.Print("timeseries: bad num argument: ", num)
return nil
}
results := make([]Observable, num)
now := ts.clock.Time()
if ts.levels[0].end.Before(now) {
ts.advance(now)
}
ts.mergePendingUpdates()
l := ts.levels[level]
index := l.newest
for i := 0; i < num; i++ {
result := ts.provider()
results[i] = result
if l.buckets[index] != nil {
result.CopyFrom(l.buckets[index])
}
if index == 0 {
index = ts.numBuckets
}
index -= 1
}
return results
}
// ScaleBy updates observations by scaling by factor.
func (ts *timeSeries) ScaleBy(factor float64) {
for _, l := range ts.levels {
for i := 0; i < ts.numBuckets; i++ {
l.buckets[i].Multiply(factor)
}
}
ts.total.Multiply(factor)
ts.pending.Multiply(factor)
}
// Range returns the sum of observations added over the specified time range.
// If start or finish times don't fall on bucket boundaries of the same
// level, then return values are approximate answers.
func (ts *timeSeries) Range(start, finish time.Time) Observable {
return ts.ComputeRange(start, finish, 1)[0]
}
// Recent returns the sum of observations from the last delta.
func (ts *timeSeries) Recent(delta time.Duration) Observable {
now := ts.clock.Time()
return ts.Range(now.Add(-delta), now)
}
// Total returns the total of all observations.
func (ts *timeSeries) Total() Observable {
ts.mergePendingUpdates()
return ts.total
}
// ComputeRange computes a specified number of values into a slice using
// the observations recorded over the specified time period. The return
// values are approximate if the start or finish times don't fall on the
// bucket boundaries at the same level or if the number of buckets spanning
// the range is not an integral multiple of num.
func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
if start.After(finish) {
log.Printf("timeseries: start > finish, %v>%v", start, finish)
return nil
}
if num < 0 {
log.Printf("timeseries: num < 0, %v", num)
return nil
}
results := make([]Observable, num)
for _, l := range ts.levels {
if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
ts.extract(l, start, finish, num, results)
return results
}
}
// Failed to find a level that covers the desired range. So just
// extract from the last level, even if it doesn't cover the entire
// desired range.
ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
return results
}
// RecentList returns the specified number of values in slice over the most
// recent time period of the specified range.
func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
if delta < 0 {
return nil
}
now := ts.clock.Time()
return ts.ComputeRange(now.Add(-delta), now, num)
}
// extract returns a slice of specified number of observations from a given
// level over a given range.
func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
ts.mergePendingUpdates()
srcInterval := l.size
dstInterval := finish.Sub(start) / time.Duration(num)
dstStart := start
srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
srcIndex := 0
// Where should scanning start?
if dstStart.After(srcStart) {
advance := dstStart.Sub(srcStart) / srcInterval
srcIndex += int(advance)
srcStart = srcStart.Add(advance * srcInterval)
}
// The i'th value is computed as show below.
// interval = (finish/start)/num
// i'th value = sum of observation in range
// [ start + i * interval,
// start + (i + 1) * interval )
for i := 0; i < num; i++ {
results[i] = ts.resetObservation(results[i])
dstEnd := dstStart.Add(dstInterval)
for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
srcEnd := srcStart.Add(srcInterval)
if srcEnd.After(ts.lastAdd) {
srcEnd = ts.lastAdd
}
if !srcEnd.Before(dstStart) {
srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
// dst completely contains src.
if srcValue != nil {
results[i].Add(srcValue)
}
} else {
// dst partially overlaps src.
overlapStart := maxTime(srcStart, dstStart)
overlapEnd := minTime(srcEnd, dstEnd)
base := srcEnd.Sub(srcStart)
fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
used := ts.provider()
if srcValue != nil {
used.CopyFrom(srcValue)
}
used.Multiply(fraction)
results[i].Add(used)
}
if srcEnd.After(dstEnd) {
break
}
}
srcIndex++
srcStart = srcStart.Add(srcInterval)
}
dstStart = dstStart.Add(dstInterval)
}
}
// resetObservation clears the content so the struct may be reused.
func (ts *timeSeries) resetObservation(observation Observable) Observable {
if observation == nil {
observation = ts.provider()
} else {
observation.Clear()
}
return observation
}
// TimeSeries tracks data at granularities from 1 second to 16 weeks.
type TimeSeries struct {
timeSeries
}
// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
func NewTimeSeries(f func() Observable) *TimeSeries {
return NewTimeSeriesWithClock(f, defaultClockInstance)
}
// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
// assigning timestamps.
func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
ts := new(TimeSeries)
ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
return ts
}
// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
type MinuteHourSeries struct {
timeSeries
}
// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
}
// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
// assigning timestamps.
func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
ts := new(MinuteHourSeries)
ts.timeSeries.init(minuteHourSeriesResolutions, f,
minuteHourSeriesNumBuckets, clock)
return ts
}
func (ts *MinuteHourSeries) Minute() Observable {
return ts.timeSeries.Latest(0, 60)
}
func (ts *MinuteHourSeries) Hour() Observable {
return ts.timeSeries.Latest(1, 60)
}
func minTime(a, b time.Time) time.Time {
if a.Before(b) {
return a
}
return b
}
func maxTime(a, b time.Time) time.Time {
if a.After(b) {
return a
}
return b
}
| vendor/golang.org/x/net/internal/timeseries/timeseries.go | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.0001778846635716036,
0.000171648251125589,
0.00015942884783726186,
0.0001721695443848148,
0.000004007667030236917
] |
{
"id": 0,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvclient\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/allocator/storepool\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/rpc\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 35
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package optionalnodeliveness
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/errorutil"
)
// Interface is the interface used in Container.
type Interface interface {
Self() (livenesspb.Liveness, bool)
GetLiveness(nodeID roachpb.NodeID) (liveness.Record, bool)
GetLivenessesFromKV(ctx context.Context) ([]livenesspb.Liveness, error)
IsAvailable(roachpb.NodeID) bool
IsAvailableNotDraining(roachpb.NodeID) bool
IsLive(roachpb.NodeID) (bool, error)
}
// Container optionally gives access to liveness information about
// the KV nodes. It is typically not available to anyone but the system tenant.
type Container struct {
w errorutil.TenantSQLDeprecatedWrapper
}
// MakeContainer initializes an Container wrapping a
// (possibly nil) *NodeLiveness.
//
// Use of node liveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
//
// See TenantSQLDeprecatedWrapper for details.
func MakeContainer(nl Interface) Container {
return Container{
w: errorutil.MakeTenantSQLDeprecatedWrapper(nl, nl != nil),
}
}
// OptionalErr returns the NodeLiveness instance if available. Otherwise, it
// returns an error referring to the optionally passed in issues.
//
// Use of NodeLiveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
func (nl *Container) OptionalErr(issue int) (Interface, error) {
v, err := nl.w.OptionalErr(issue)
if err != nil {
return nil, err
}
return v.(Interface), nil
}
var _ = (*Container)(nil).OptionalErr // silence unused lint
// Optional returns the NodeLiveness instance and true if available.
// Otherwise, returns nil and false. Prefer OptionalErr where possible.
//
// Use of NodeLiveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
func (nl *Container) Optional(issue int) (Interface, bool) {
v, ok := nl.w.Optional()
if !ok {
return nil, false
}
return v.(Interface), true
}
| pkg/sql/optionalnodeliveness/node_liveness.go | 1 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.021220337599515915,
0.0029598260298371315,
0.0001618413662072271,
0.00017255776037927717,
0.006913557182997465
] |
{
"id": 0,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvclient\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/allocator/storepool\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/rpc\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 35
} | setup
CREATE TABLE t(
a INT PRIMARY KEY,
b INT,
C INT,
INDEX t_idx_b(b),
INDEX t_idx_c(c)
);
CREATE SEQUENCE sq1;
CREATE VIEW v AS SELECT a FROM t;
CREATE TYPE notmyworkday AS ENUM ('Monday', 'Tuesday');
CREATE FUNCTION f(a notmyworkday) RETURNS INT VOLATILE LANGUAGE SQL AS $$
SELECT a FROM t;
SELECT b FROM t@t_idx_b;
SELECT c FROM t@t_idx_c;
SELECT a FROM v;
SELECT nextval('sq1');
$$;
----
build
DROP FUNCTION f;
----
- [[Owner:{DescID: 109}, ABSENT], PUBLIC]
{descriptorId: 109, owner: root}
- [[UserPrivileges:{DescID: 109, Name: admin}, ABSENT], PUBLIC]
{descriptorId: 109, privileges: "2", userName: admin, withGrantOption: "2"}
- [[UserPrivileges:{DescID: 109, Name: root}, ABSENT], PUBLIC]
{descriptorId: 109, privileges: "2", userName: root, withGrantOption: "2"}
- [[Function:{DescID: 109}, ABSENT], PUBLIC]
{functionId: 109, params: [{class: {class: IN}, name: a, type: {closedTypeIds: [107, 108], type: {family: EnumFamily, oid: 100107, udtMetadata: {arrayTypeOid: 100108}}}}], returnType: {type: {family: IntFamily, oid: 20, width: 64}}}
- [[SchemaChild:{DescID: 109, ReferencedDescID: 101}, ABSENT], PUBLIC]
{childObjectId: 109, schemaId: 101}
- [[FunctionName:{DescID: 109}, ABSENT], PUBLIC]
{functionId: 109, name: f}
- [[FunctionVolatility:{DescID: 109}, ABSENT], PUBLIC]
{functionId: 109, volatility: {volatility: VOLATILE}}
- [[FunctionLeakProof:{DescID: 109}, ABSENT], PUBLIC]
{functionId: 109}
- [[FunctionNullInputBehavior:{DescID: 109}, ABSENT], PUBLIC]
{functionId: 109, nullInputBehavior: {nullInputBehavior: CALLED_ON_NULL_INPUT}}
- [[FunctionBody:{DescID: 109}, ABSENT], PUBLIC]
{body: "SELECT a FROM defaultdb.public.t;\nSELECT b FROM defaultdb.public.t@t_idx_b;\nSELECT c FROM defaultdb.public.t@t_idx_c;\nSELECT a FROM defaultdb.public.v;\nSELECT nextval(105:::REGCLASS);", functionId: 109, lang: {lang: SQL}, usesSequenceIds: [105], usesTables: [{columnIds: [1], tableId: 104}, {columnIds: [2], indexId: 2, tableId: 104}, {columnIds: [3], indexId: 3, tableId: 104}], usesTypeIds: [107, 108], usesViews: [{columnIds: [1], viewId: 106}]}
| pkg/sql/schemachanger/scbuild/testdata/drop_function | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.00017435320478398353,
0.00016989702999126166,
0.0001597677473910153,
0.00017388038395438343,
0.0000056936942200991325
] |
{
"id": 0,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvclient\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/allocator/storepool\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/rpc\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 35
} | echo
----
db0.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
txn.SetIsoLevel(isolation.Serializable)
txn.DelRange(ctx, tk(2), tk(4), true /* @s1 */) // @<ts> <nil>
return nil
}) // @<ts> <nil>
// ^-- txnpb:<txn>
| pkg/kv/kvnemesis/testdata/TestApplier/txn-ssi-delrange | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.0001674083323450759,
0.0001674083323450759,
0.0001674083323450759,
0.0001674083323450759,
0
] |
{
"id": 0,
"code_window": [
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvclient\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/allocator/storepool\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/roachpb\"\n",
"\t\"github.com/cockroachdb/cockroach/pkg/rpc\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 35
} | // Copyright 2023 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package upgrades_test
import (
"context"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/upgrade/upgrades"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/stretchr/testify/assert"
)
func TestCreateActivityUpdateJobMigration(t *testing.T) {
skip.UnderStressRace(t)
defer leaktest.AfterTest(t)()
ctx := context.Background()
settings := cluster.MakeTestingClusterSettingsWithVersions(
clusterversion.TestingBinaryVersion,
clusterversion.TestingBinaryMinSupportedVersion,
false,
)
tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Settings: settings,
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
DisableAutomaticVersionUpgrade: make(chan struct{}),
BinaryVersionOverride: clusterversion.TestingBinaryMinSupportedVersion,
},
},
},
})
defer tc.Stopper().Stop(ctx)
db := tc.ServerConn(0)
defer db.Close()
// NB: this isn't actually doing anything, since the table is baked into the
// bootstrap schema, so this is really just showing the upgrade is idempotent,
// but this is in line with the other tests of createSystemTable upgrades.
upgrades.Upgrade(
t,
db,
clusterversion.V23_1CreateSystemActivityUpdateJob,
nil,
false,
)
row := db.QueryRow("SELECT count(*) FROM system.public.jobs WHERE id = 103")
assert.NotNil(t, row)
assert.NoError(t, row.Err())
var count int
err := row.Scan(&count)
assert.NoError(t, err)
assert.Equal(t, 1, count)
}
| pkg/upgrade/upgrades/system_activity_update_job_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.0009711883612908423,
0.00035643443698063493,
0.0001629270554985851,
0.00017135079542640597,
0.0003251105663366616
] |
{
"id": 3,
"code_window": [
"// a slice containing the liveness record of all nodes that have ever been a part of the\n",
"// cluster.\n",
"func getLivenessResponse(\n",
"\tctx context.Context, nl optionalnodeliveness.Interface, now hlc.Timestamp, st *cluster.Settings,\n",
") (*serverpb.LivenessResponse, error) {\n",
"\tlivenesses, err := nl.GetLivenessesFromKV(ctx)\n",
"\tif err != nil {\n",
"\t\treturn nil, serverError(ctx, err)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnodeVitalityMap, err := nl.ScanNodeVitalityFromKV(ctx)\n",
"\n"
],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 2056
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package optionalnodeliveness
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/errorutil"
)
// Interface is the interface used in Container.
type Interface interface {
Self() (livenesspb.Liveness, bool)
GetLiveness(nodeID roachpb.NodeID) (liveness.Record, bool)
GetLivenessesFromKV(ctx context.Context) ([]livenesspb.Liveness, error)
IsAvailable(roachpb.NodeID) bool
IsAvailableNotDraining(roachpb.NodeID) bool
IsLive(roachpb.NodeID) (bool, error)
}
// Container optionally gives access to liveness information about
// the KV nodes. It is typically not available to anyone but the system tenant.
type Container struct {
w errorutil.TenantSQLDeprecatedWrapper
}
// MakeContainer initializes an Container wrapping a
// (possibly nil) *NodeLiveness.
//
// Use of node liveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
//
// See TenantSQLDeprecatedWrapper for details.
func MakeContainer(nl Interface) Container {
return Container{
w: errorutil.MakeTenantSQLDeprecatedWrapper(nl, nl != nil),
}
}
// OptionalErr returns the NodeLiveness instance if available. Otherwise, it
// returns an error referring to the optionally passed in issues.
//
// Use of NodeLiveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
func (nl *Container) OptionalErr(issue int) (Interface, error) {
v, err := nl.w.OptionalErr(issue)
if err != nil {
return nil, err
}
return v.(Interface), nil
}
var _ = (*Container)(nil).OptionalErr // silence unused lint
// Optional returns the NodeLiveness instance and true if available.
// Otherwise, returns nil and false. Prefer OptionalErr where possible.
//
// Use of NodeLiveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
func (nl *Container) Optional(issue int) (Interface, bool) {
v, ok := nl.w.Optional()
if !ok {
return nil, false
}
return v.(Interface), true
}
| pkg/sql/optionalnodeliveness/node_liveness.go | 1 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.034676916897296906,
0.006787266582250595,
0.00017922898405231535,
0.0010346903000026941,
0.011019142344594002
] |
{
"id": 3,
"code_window": [
"// a slice containing the liveness record of all nodes that have ever been a part of the\n",
"// cluster.\n",
"func getLivenessResponse(\n",
"\tctx context.Context, nl optionalnodeliveness.Interface, now hlc.Timestamp, st *cluster.Settings,\n",
") (*serverpb.LivenessResponse, error) {\n",
"\tlivenesses, err := nl.GetLivenessesFromKV(ctx)\n",
"\tif err != nil {\n",
"\t\treturn nil, serverError(ctx, err)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnodeVitalityMap, err := nl.ScanNodeVitalityFromKV(ctx)\n",
"\n"
],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 2056
} | /* setup */
CREATE DATABASE t;
CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL DEFAULT (DECIMAL '3.14'));
CREATE UNIQUE INDEX vidx ON t.test (v);
ALTER TABLE t.test ADD COLUMN x DECIMAL DEFAULT (DECIMAL '1.4') CHECK (x >= 0);
----
...
+database {0 0 t} -> 104
+schema {104 0 public} -> 105
+object {104 105 test} -> 106
/* test */
ALTER TABLE t.test DROP pi;
----
begin transaction #1
# begin StatementPhase
checking for feature: ALTER TABLE
increment telemetry for sql.schema.alter_table
increment telemetry for sql.schema.alter_table.drop_column
write *eventpb.AlterTable to event log:
mutationId: 1
sql:
descriptorId: 106
statement: ALTER TABLE ‹t›.‹public›.‹test› DROP COLUMN ‹pi›
tag: ALTER TABLE
user: root
tableName: t.public.test
## StatementPhase stage 1 of 1 with 10 MutationType ops
upsert descriptor #106
...
oid: 20
width: 64
- - defaultExpr: 3.14:::DECIMAL
- id: 3
- name: pi
- nullable: true
- type:
- family: DecimalFamily
- oid: 1700
- defaultExpr: 1.4:::DECIMAL
id: 4
...
- k
- v
- - pi
+ - crdb_internal_column_3_name_placeholder
- x
name: primary
...
version: 4
modificationTime: {}
+ mutations:
+ - column:
+ defaultExpr: 3.14:::DECIMAL
+ id: 3
+ name: crdb_internal_column_3_name_placeholder
+ nullable: true
+ type:
+ family: DecimalFamily
+ oid: 1700
+ direction: DROP
+ mutationId: 1
+ state: WRITE_ONLY
+ - direction: ADD
+ index:
+ constraintId: 7
+ createdExplicitly: true
+ encodingType: 1
+ foreignKey: {}
+ geoConfig: {}
+ id: 6
+ interleave: {}
+ keyColumnDirections:
+ - ASC
+ keyColumnIds:
+ - 1
+ keyColumnNames:
+ - k
+ name: crdb_internal_index_6_name_placeholder
+ partitioning: {}
+ sharded: {}
+ storeColumnIds:
+ - 2
+ - 4
+ storeColumnNames:
+ - v
+ - x
+ unique: true
+ version: 4
+ mutationId: 1
+ state: BACKFILLING
+ - direction: ADD
+ index:
+ constraintId: 8
+ createdExplicitly: true
+ encodingType: 1
+ foreignKey: {}
+ geoConfig: {}
+ id: 7
+ interleave: {}
+ keyColumnDirections:
+ - ASC
+ keyColumnIds:
+ - 1
+ keyColumnNames:
+ - k
+ name: crdb_internal_index_7_name_placeholder
+ partitioning: {}
+ sharded: {}
+ storeColumnIds:
+ - 2
+ - 4
+ storeColumnNames:
+ - v
+ - x
+ unique: true
+ useDeletePreservingEncoding: true
+ version: 4
+ mutationId: 1
+ state: DELETE_ONLY
name: test
nextColumnId: 5
- nextConstraintId: 7
+ nextConstraintId: 9
nextFamilyId: 1
- nextIndexId: 6
+ nextIndexId: 8
nextMutationId: 1
parentId: 104
...
storeColumnNames:
- v
- - pi
+ - crdb_internal_column_3_name_placeholder
- x
unique: true
...
time: {}
unexposedParentSchemaId: 105
- version: "16"
+ version: "17"
# end StatementPhase
# begin PreCommitPhase
## PreCommitPhase stage 1 of 2 with 1 MutationType op
undo all catalog changes within txn #1
persist all catalog changes to storage
## PreCommitPhase stage 2 of 2 with 14 MutationType ops
upsert descriptor #106
...
oid: 20
width: 64
- - defaultExpr: 3.14:::DECIMAL
- id: 3
- name: pi
- nullable: true
- type:
- family: DecimalFamily
- oid: 1700
- defaultExpr: 1.4:::DECIMAL
id: 4
...
createAsOfTime:
wallTime: "1640995200000000000"
+ declarativeSchemaChangerState:
+ authorization:
+ userName: root
+ currentStatuses: <redacted>
+ jobId: "1"
+ nameMapping:
+ columns:
+ "1": k
+ "2": v
+ "4": x
+ "4294967294": tableoid
+ "4294967295": crdb_internal_mvcc_timestamp
+ constraints:
+ "6": check_x
+ families:
+ "0": primary
+ id: 106
+ indexes:
+ "2": vidx
+ "6": test_pkey
+ name: test
+ relevantStatements:
+ - statement:
+ redactedStatement: ALTER TABLE ‹t›.‹public›.‹test› DROP COLUMN ‹pi›
+ statement: ALTER TABLE t.test DROP COLUMN pi
+ statementTag: ALTER TABLE
+ revertible: true
+ targetRanks: <redacted>
+ targets: <redacted>
families:
- columnIds:
...
- k
- v
- - pi
+ - crdb_internal_column_3_name_placeholder
- x
name: primary
...
version: 4
modificationTime: {}
+ mutations:
+ - column:
+ defaultExpr: 3.14:::DECIMAL
+ id: 3
+ name: crdb_internal_column_3_name_placeholder
+ nullable: true
+ type:
+ family: DecimalFamily
+ oid: 1700
+ direction: DROP
+ mutationId: 1
+ state: WRITE_ONLY
+ - direction: ADD
+ index:
+ constraintId: 7
+ createdExplicitly: true
+ encodingType: 1
+ foreignKey: {}
+ geoConfig: {}
+ id: 6
+ interleave: {}
+ keyColumnDirections:
+ - ASC
+ keyColumnIds:
+ - 1
+ keyColumnNames:
+ - k
+ name: crdb_internal_index_6_name_placeholder
+ partitioning: {}
+ sharded: {}
+ storeColumnIds:
+ - 2
+ - 4
+ storeColumnNames:
+ - v
+ - x
+ unique: true
+ version: 4
+ mutationId: 1
+ state: BACKFILLING
+ - direction: ADD
+ index:
+ constraintId: 8
+ createdExplicitly: true
+ encodingType: 1
+ foreignKey: {}
+ geoConfig: {}
+ id: 7
+ interleave: {}
+ keyColumnDirections:
+ - ASC
+ keyColumnIds:
+ - 1
+ keyColumnNames:
+ - k
+ name: crdb_internal_index_7_name_placeholder
+ partitioning: {}
+ sharded: {}
+ storeColumnIds:
+ - 2
+ - 4
+ storeColumnNames:
+ - v
+ - x
+ unique: true
+ useDeletePreservingEncoding: true
+ version: 4
+ mutationId: 1
+ state: DELETE_ONLY
name: test
nextColumnId: 5
- nextConstraintId: 7
+ nextConstraintId: 9
nextFamilyId: 1
- nextIndexId: 6
+ nextIndexId: 8
nextMutationId: 1
parentId: 104
...
storeColumnNames:
- v
- - pi
+ - crdb_internal_column_3_name_placeholder
- x
unique: true
...
time: {}
unexposedParentSchemaId: 105
- version: "16"
+ version: "17"
persist all catalog changes to storage
create job #1 (non-cancelable: false): "ALTER TABLE t.public.test DROP COLUMN pi"
descriptor IDs: [106]
# end PreCommitPhase
commit transaction #1
notified job registry to adopt jobs: [1]
# begin PostCommitPhase
begin transaction #2
commit transaction #2
begin transaction #3
## PostCommitPhase stage 1 of 7 with 3 MutationType ops
upsert descriptor #106
...
version: 4
mutationId: 1
- state: DELETE_ONLY
+ state: WRITE_ONLY
name: test
nextColumnId: 5
...
time: {}
unexposedParentSchemaId: 105
- version: "17"
+ version: "18"
persist all catalog changes to storage
update progress of schema change job #1: "PostCommitPhase stage 2 of 7 with 1 BackfillType op pending"
commit transaction #3
begin transaction #4
## PostCommitPhase stage 2 of 7 with 1 BackfillType op
backfill indexes [6] from index #4 in table #106
commit transaction #4
begin transaction #5
## PostCommitPhase stage 3 of 7 with 3 MutationType ops
upsert descriptor #106
...
version: 4
mutationId: 1
- state: BACKFILLING
+ state: DELETE_ONLY
- direction: ADD
index:
...
time: {}
unexposedParentSchemaId: 105
- version: "18"
+ version: "19"
persist all catalog changes to storage
update progress of schema change job #1: "PostCommitPhase stage 4 of 7 with 1 MutationType op pending"
commit transaction #5
begin transaction #6
## PostCommitPhase stage 4 of 7 with 3 MutationType ops
upsert descriptor #106
...
version: 4
mutationId: 1
- state: DELETE_ONLY
+ state: MERGING
- direction: ADD
index:
...
time: {}
unexposedParentSchemaId: 105
- version: "19"
+ version: "20"
persist all catalog changes to storage
update progress of schema change job #1: "PostCommitPhase stage 5 of 7 with 1 BackfillType op pending"
commit transaction #6
begin transaction #7
## PostCommitPhase stage 5 of 7 with 1 BackfillType op
merge temporary indexes [7] into backfilled indexes [6] in table #106
commit transaction #7
begin transaction #8
## PostCommitPhase stage 6 of 7 with 3 MutationType ops
upsert descriptor #106
...
version: 4
mutationId: 1
- state: MERGING
+ state: WRITE_ONLY
- direction: ADD
index:
...
time: {}
unexposedParentSchemaId: 105
- version: "20"
+ version: "21"
persist all catalog changes to storage
update progress of schema change job #1: "PostCommitPhase stage 7 of 7 with 1 ValidationType op pending"
commit transaction #8
begin transaction #9
## PostCommitPhase stage 7 of 7 with 1 ValidationType op
validate forward indexes [6] in table #106
commit transaction #9
begin transaction #10
## PostCommitNonRevertiblePhase stage 1 of 3 with 11 MutationType ops
upsert descriptor #106
...
statement: ALTER TABLE t.test DROP COLUMN pi
statementTag: ALTER TABLE
- revertible: true
targetRanks: <redacted>
targets: <redacted>
...
direction: DROP
mutationId: 1
- state: WRITE_ONLY
- - direction: ADD
+ state: DELETE_ONLY
+ - direction: DROP
index:
- constraintId: 7
+ constraintId: 8
createdExplicitly: true
encodingType: 1
foreignKey: {}
geoConfig: {}
- id: 6
+ id: 7
interleave: {}
keyColumnDirections:
...
keyColumnNames:
- k
- name: crdb_internal_index_6_name_placeholder
+ name: crdb_internal_index_7_name_placeholder
partitioning: {}
sharded: {}
...
- x
unique: true
+ useDeletePreservingEncoding: true
version: 4
mutationId: 1
- state: WRITE_ONLY
- - direction: ADD
+ state: DELETE_ONLY
+ - direction: DROP
index:
- constraintId: 8
+ constraintId: 4
createdExplicitly: true
encodingType: 1
foreignKey: {}
geoConfig: {}
- id: 7
+ id: 4
interleave: {}
keyColumnDirections:
...
keyColumnNames:
- k
- name: crdb_internal_index_7_name_placeholder
+ name: crdb_internal_index_4_name_placeholder
partitioning: {}
sharded: {}
storeColumnIds:
- 2
+ - 3
- 4
storeColumnNames:
- v
+ - crdb_internal_column_3_name_placeholder
- x
unique: true
- useDeletePreservingEncoding: true
version: 4
mutationId: 1
...
parentId: 104
primaryIndex:
- constraintId: 4
+ constraintId: 7
createdExplicitly: true
encodingType: 1
foreignKey: {}
geoConfig: {}
- id: 4
+ id: 6
interleave: {}
keyColumnDirections:
...
storeColumnIds:
- 2
- - 3
- 4
storeColumnNames:
- v
- - crdb_internal_column_3_name_placeholder
- x
unique: true
...
time: {}
unexposedParentSchemaId: 105
- version: "21"
+ version: "22"
persist all catalog changes to storage
update progress of schema change job #1: "PostCommitNonRevertiblePhase stage 2 of 3 with 6 MutationType ops pending"
set schema change job #1 to non-cancellable
commit transaction #10
begin transaction #11
## PostCommitNonRevertiblePhase stage 2 of 3 with 8 MutationType ops
upsert descriptor #106
...
- direction: DROP
index:
- constraintId: 8
- createdExplicitly: true
- encodingType: 1
- foreignKey: {}
- geoConfig: {}
- id: 7
- interleave: {}
- keyColumnDirections:
- - ASC
- keyColumnIds:
- - 1
- keyColumnNames:
- - k
- name: crdb_internal_index_7_name_placeholder
- partitioning: {}
- sharded: {}
- storeColumnIds:
- - 2
- - 4
- storeColumnNames:
- - v
- - x
- unique: true
- useDeletePreservingEncoding: true
- version: 4
- mutationId: 1
- state: DELETE_ONLY
- - direction: DROP
- index:
constraintId: 4
createdExplicitly: true
...
version: 4
mutationId: 1
- state: WRITE_ONLY
+ state: DELETE_ONLY
name: test
nextColumnId: 5
...
time: {}
unexposedParentSchemaId: 105
- version: "22"
+ version: "23"
persist all catalog changes to storage
update progress of schema change job #1: "PostCommitNonRevertiblePhase stage 3 of 3 with 5 MutationType ops pending"
commit transaction #11
begin transaction #12
## PostCommitNonRevertiblePhase stage 3 of 3 with 7 MutationType ops
upsert descriptor #106
...
createAsOfTime:
wallTime: "1640995200000000000"
- declarativeSchemaChangerState:
- authorization:
- userName: root
- currentStatuses: <redacted>
- jobId: "1"
- nameMapping:
- columns:
- "1": k
- "2": v
- "4": x
- "4294967294": tableoid
- "4294967295": crdb_internal_mvcc_timestamp
- constraints:
- "6": check_x
- families:
- "0": primary
- id: 106
- indexes:
- "2": vidx
- "6": test_pkey
- name: test
- relevantStatements:
- - statement:
- redactedStatement: ALTER TABLE ‹t›.‹public›.‹test› DROP COLUMN ‹pi›
- statement: ALTER TABLE t.test DROP COLUMN pi
- statementTag: ALTER TABLE
- targetRanks: <redacted>
- targets: <redacted>
families:
- columnIds:
- 1
- 2
- - 3
- 4
columnNames:
- k
- v
- - crdb_internal_column_3_name_placeholder
- x
name: primary
...
version: 4
modificationTime: {}
- mutations:
- - column:
- defaultExpr: 3.14:::DECIMAL
- id: 3
- name: crdb_internal_column_3_name_placeholder
- nullable: true
- type:
- family: DecimalFamily
- oid: 1700
- direction: DROP
- mutationId: 1
- state: DELETE_ONLY
- - direction: DROP
- index:
- constraintId: 4
- createdExplicitly: true
- encodingType: 1
- foreignKey: {}
- geoConfig: {}
- id: 4
- interleave: {}
- keyColumnDirections:
- - ASC
- keyColumnIds:
- - 1
- keyColumnNames:
- - k
- name: crdb_internal_index_4_name_placeholder
- partitioning: {}
- sharded: {}
- storeColumnIds:
- - 2
- - 3
- - 4
- storeColumnNames:
- - v
- - crdb_internal_column_3_name_placeholder
- - x
- unique: true
- version: 4
- mutationId: 1
- state: DELETE_ONLY
+ mutations: []
name: test
nextColumnId: 5
...
time: {}
unexposedParentSchemaId: 105
- version: "23"
+ version: "24"
persist all catalog changes to storage
create job #2 (non-cancelable: true): "GC for ALTER TABLE t.public.test DROP COLUMN pi"
descriptor IDs: [106]
update progress of schema change job #1: "all stages completed"
set schema change job #1 to non-cancellable
updated schema change job #1 descriptor IDs to []
write *eventpb.FinishSchemaChange to event log:
sc:
descriptorId: 106
commit transaction #12
notified job registry to adopt jobs: [2]
# end PostCommitPhase
| pkg/sql/schemachanger/testdata/end_to_end/drop_column_unique_index/drop_column_unique_index.side_effects | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.0001784532651072368,
0.00017571962962392718,
0.00017025411943905056,
0.0001763232285156846,
0.000001944426458067028
] |
{
"id": 3,
"code_window": [
"// a slice containing the liveness record of all nodes that have ever been a part of the\n",
"// cluster.\n",
"func getLivenessResponse(\n",
"\tctx context.Context, nl optionalnodeliveness.Interface, now hlc.Timestamp, st *cluster.Settings,\n",
") (*serverpb.LivenessResponse, error) {\n",
"\tlivenesses, err := nl.GetLivenessesFromKV(ctx)\n",
"\tif err != nil {\n",
"\t\treturn nil, serverError(ctx, err)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnodeVitalityMap, err := nl.ScanNodeVitalityFromKV(ctx)\n",
"\n"
],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 2056
} | #! /usr/bin/env expect -f
source [file join [file dirname $argv0] common.tcl]
spawn /bin/bash
send "PS1=':''/# '\r"
eexpect ":/# "
start_test "Check that --max-disk-temp-storage works."
send "$argv start-single-node --insecure --store=path=logs/mystore --max-disk-temp-storage=10GiB\r"
eexpect "node starting"
interrupt
eexpect ":/# "
end_test
start_test "Check that --max-disk-temp-storage can be expressed as a percentage."
send "$argv start-single-node --insecure --store=path=logs/mystore --max-disk-temp-storage=10%\r"
eexpect "node starting"
interrupt
eexpect ":/# "
end_test
start_test "Check that --max-disk-temp-storage percentage works when the store is in-memory."
send "$argv start-single-node --insecure --store=type=mem,size=1GB --max-disk-temp-storage=10%\r"
eexpect "node starting"
interrupt
eexpect ":/# "
end_test
start_test "Check that memory max flags do not exceed available RAM."
send "$argv start-single-node --insecure --cache=.40 --max-sql-memory=.40\r"
eexpect "WARNING: the sum of --max-sql-memory"
eexpect "is larger than"
eexpect "of total RAM"
eexpect "increased risk"
eexpect "node starting"
interrupt
eexpect ":/# "
end_test
start_test "Check that not using --host nor --advertise causes a user warning."
send "$argv start-single-node --insecure\r"
eexpect "WARNING: neither --listen-addr nor --advertise-addr was specified"
eexpect "node starting"
interrupt
eexpect ":/# "
end_test
start_test "Check that using --advertise-addr does not cause a user warning."
send "$argv start-single-node --insecure --advertise-addr=172.31.11.189\r"
expect {
"WARNING: neither --listen-addr nor --advertise-addr was specified" {
report "unexpected WARNING: neither --listen-addr nor --advertise-addr was specified"
exit 1
}
}
eexpect "node starting"
interrupt
eexpect ":/# "
end_test
start_test "Check that --listening-url-file gets created with the right data"
send "$argv start-single-node --insecure --listening-url-file=foourl\r"
eexpect "node starting"
system "grep -q 'postgresql://.*@.*:\[0-9\]\[0-9\]*' foourl"
interrupt
eexpect ":/# "
end_test
start_test {Check that the "failed running SUBCOMMAND" message does not consider a flag the subcommand}
send "$argv --vmodule=*=2 start --garbage\r"
eexpect {Failed running "start"}
eexpect ":/# "
end_test
start_test {Check that the "failed running SUBCOMMAND" message handles nested subcommands}
send "$argv --vmodule=*=2 debug zip --garbage\r"
eexpect {Failed running "debug zip"}
eexpect ":/# "
end_test
start_test {Check that the "failed running SUBCOMMAND" message handles missing subcommands}
send "$argv --vmodule=*=2 --garbage\r"
eexpect {Failed running "cockroach"}
eexpect ":/# "
end_test
start_test "Check that start without --join errors out"
send "$argv start --insecure\r"
eexpect "ERROR: no --join flags provided to 'cockroach start'"
eexpect "HINT: Consider using 'cockroach init' or 'cockroach start-single-node' instead"
eexpect {Failed running "start"}
end_test
start_test "Check that demo start-up flags are reported to telemetry"
send "$argv demo --no-line-editor --no-example-database --echo-sql --logtostderr=WARNING\r"
eexpect "defaultdb>"
send "SELECT * FROM crdb_internal.feature_usage WHERE feature_name LIKE 'cli.demo.%' ORDER BY 1;\r"
eexpect feature_name
eexpect "cli.demo.explicitflags.echo-sql"
eexpect "cli.demo.explicitflags.logtostderr"
eexpect "cli.demo.explicitflags.no-example-database"
eexpect "cli.demo.runs"
eexpect "defaultdb>"
send_eof
eexpect ":/# "
end_test
start_test "Check that locality flags without a region tier warn"
send "$argv start-single-node --insecure --locality=data-center=us-east,zone=a\r"
eexpect "WARNING: The --locality flag does not contain a"
interrupt
eexpect ":/# "
end_test
start_server $argv
start_test "Check that server start-up flags are reported to telemetry"
send "$argv sql --insecure --no-line-editor\r"
eexpect "defaultdb>"
send "SELECT * FROM crdb_internal.feature_usage WHERE feature_name LIKE 'cli.start-single-node.%' ORDER BY 1;\r"
eexpect feature_name
eexpect "cli.start-single-node.explicitflags.insecure"
eexpect "cli.start-single-node.explicitflags.listening-url-file"
eexpect "cli.start-single-node.explicitflags.max-sql-memory"
eexpect "cli.start-single-node.runs"
eexpect "defaultdb>"
send_eof
eexpect ":/# "
end_test
start_test "Check that a client can connect using the URL env var"
send "export COCKROACH_URL=`cat server_url`;\r"
eexpect ":/# "
send "$argv sql --no-line-editor\r"
eexpect "defaultdb>"
send_eof
eexpect ":/# "
end_test
start_test "Check that an invalid URL in the env var produces a reasonable error"
send "export COCKROACH_URL=invalid_url;\r"
eexpect ":/# "
send "$argv sql --no-line-editor\r"
eexpect "ERROR"
eexpect "setting --url from COCKROACH_URL"
eexpect "invalid argument"
eexpect "unrecognized URL scheme"
eexpect ":/# "
send "unset COCKROACH_URL\r"
eexpect ":/# "
end_test
start_test "Check that common URL mistakes are detected and the user is informed"
send "$argv sql --no-line-editor --url='postgres://invalid:0/?-cinvalid'\r"
eexpect "warning: found raw URL parameter \"-cinvalid"
eexpect "are you sure"
eexpect ":/# "
send "$argv sql --no-line-editor --url='postgres://invalid:0/?options=-cluster=foo'\r"
eexpect "warning: found \"-cluster=\" in URL \"options\" field"
eexpect "are you sure"
eexpect ":/# "
end_test
stop_server $argv
start_test "Check that set GOMEMLIMIT env var without specifying --max-go-memory works"
send "export GOMEMLIMIT=1GiB;\r"
eexpect ":/# "
send "$argv start-single-node --insecure --store=path=logs/mystore\r"
eexpect "node starting"
interrupt
eexpect ":/# "
stop_server $argv
end_test
send "exit 0\r"
eexpect eof
| pkg/cli/interactive_tests/test_flags.tcl | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.0001764869666658342,
0.00017052698240149766,
0.00016345908807124943,
0.00017061058315448463,
0.0000031714992019260535
] |
{
"id": 3,
"code_window": [
"// a slice containing the liveness record of all nodes that have ever been a part of the\n",
"// cluster.\n",
"func getLivenessResponse(\n",
"\tctx context.Context, nl optionalnodeliveness.Interface, now hlc.Timestamp, st *cluster.Settings,\n",
") (*serverpb.LivenessResponse, error) {\n",
"\tlivenesses, err := nl.GetLivenessesFromKV(ctx)\n",
"\tif err != nil {\n",
"\t\treturn nil, serverError(ctx, err)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnodeVitalityMap, err := nl.ScanNodeVitalityFromKV(ctx)\n",
"\n"
],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 2056
} | // Code generated by generate-staticcheck; DO NOT EDIT.
//go:build bazel
// +build bazel
package sa1010
import (
util "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/staticcheck"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/staticcheck"
)
var Analyzer *analysis.Analyzer
func init() {
for _, analyzer := range staticcheck.Analyzers {
if analyzer.Analyzer.Name == "SA1010" {
Analyzer = analyzer.Analyzer
break
}
}
util.MungeAnalyzer(Analyzer)
}
| build/bazelutil/staticcheckanalyzers/sa1010/analyzer.go | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.0001749553921399638,
0.00017437244241591543,
0.00017398617637809366,
0.00017417575872968882,
4.1941083850360883e-7
] |
{
"id": 4,
"code_window": [
"\tif err != nil {\n",
"\t\treturn nil, serverError(ctx, err)\n",
"\t}\n",
"\n",
"\tthreshold := liveness.TimeUntilNodeDead.Get(&st.SV)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tlivenesses := make([]livenesspb.Liveness, 0, len(nodeVitalityMap))\n",
"\tstatusMap := make(map[roachpb.NodeID]livenesspb.NodeLivenessStatus, len(nodeVitalityMap))\n"
],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 2061
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package optionalnodeliveness
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/errorutil"
)
// Interface is the interface used in Container.
type Interface interface {
Self() (livenesspb.Liveness, bool)
GetLiveness(nodeID roachpb.NodeID) (liveness.Record, bool)
GetLivenessesFromKV(ctx context.Context) ([]livenesspb.Liveness, error)
IsAvailable(roachpb.NodeID) bool
IsAvailableNotDraining(roachpb.NodeID) bool
IsLive(roachpb.NodeID) (bool, error)
}
// Container optionally gives access to liveness information about
// the KV nodes. It is typically not available to anyone but the system tenant.
type Container struct {
w errorutil.TenantSQLDeprecatedWrapper
}
// MakeContainer initializes an Container wrapping a
// (possibly nil) *NodeLiveness.
//
// Use of node liveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
//
// See TenantSQLDeprecatedWrapper for details.
func MakeContainer(nl Interface) Container {
return Container{
w: errorutil.MakeTenantSQLDeprecatedWrapper(nl, nl != nil),
}
}
// OptionalErr returns the NodeLiveness instance if available. Otherwise, it
// returns an error referring to the optionally passed in issues.
//
// Use of NodeLiveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
func (nl *Container) OptionalErr(issue int) (Interface, error) {
v, err := nl.w.OptionalErr(issue)
if err != nil {
return nil, err
}
return v.(Interface), nil
}
var _ = (*Container)(nil).OptionalErr // silence unused lint
// Optional returns the NodeLiveness instance and true if available.
// Otherwise, returns nil and false. Prefer OptionalErr where possible.
//
// Use of NodeLiveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
func (nl *Container) Optional(issue int) (Interface, bool) {
v, ok := nl.w.Optional()
if !ok {
return nil, false
}
return v.(Interface), true
}
| pkg/sql/optionalnodeliveness/node_liveness.go | 1 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.003894214052706957,
0.0013501751236617565,
0.00016740815772209316,
0.0004024384543299675,
0.0014424965484067798
] |
{
"id": 4,
"code_window": [
"\tif err != nil {\n",
"\t\treturn nil, serverError(ctx, err)\n",
"\t}\n",
"\n",
"\tthreshold := liveness.TimeUntilNodeDead.Get(&st.SV)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tlivenesses := make([]livenesspb.Liveness, 0, len(nodeVitalityMap))\n",
"\tstatusMap := make(map[roachpb.NodeID]livenesspb.NodeLivenessStatus, len(nodeVitalityMap))\n"
],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 2061
} | # Test binding calls which reference names via either types or oid casts work.
# Before the changes made in the commit adding this test, this test would result
# in a nil pointer panic, since the txn was not initialized correctly.
# TODO(richardjcai): Support let command similar to logic tests to
# programmatically get the table id.
# The first table created has id 54.
send
Query {"String": "CREATE TABLE t (a INT PRIMARY KEY)"}
----
until
ReadyForQuery
----
{"Type":"CommandComplete","CommandTag":"CREATE TABLE"}
{"Type":"ReadyForQuery","TxStatus":"I"}
# 'S' for Statement
# 84 = ASCII 'T'
# ParameterFormatCodes = [0] for text format
send
Parse {"Name": "s7", "Query": "SELECT $1::REGCLASS::INT8"}
Describe {"ObjectType": "S", "Name": "s7"}
Sync
----
until
ReadyForQuery
----
{"Type":"ParseComplete"}
{"Type":"ParameterDescription","ParameterOIDs":[2205]}
{"Type":"RowDescription","Fields":[{"Name":"int8","TableOID":0,"TableAttributeNumber":0,"DataTypeOID":20,"DataTypeSize":8,"TypeModifier":-1,"Format":0}]}
{"Type":"ReadyForQuery","TxStatus":"I"}
# The below incantation used to trigger a code path which would nil the
# planner transaction but never set it. This was pretty much the only
# way you could do such a thing.
# This is crdb_only because Postgres does not support AS OF SYSTEM TIME.
send crdb_only
Query {"String": "BEGIN AS OF SYSTEM TIME '1s'"}
Sync
----
# There are two ReadyForQuerys because a simple query was followed by Sync.
until crdb_only
ErrorResponse
ReadyForQuery
ReadyForQuery
----
{"Type":"ErrorResponse","Code":"XXUUU"}
{"Type":"ReadyForQuery","TxStatus":"I"}
{"Type":"ReadyForQuery","TxStatus":"I"}
send crdb_only
Bind {"DestinationPortal": "p7", "PreparedStatement": "s7", "ParameterFormatCodes": [0], "Parameters": [{"text":"T"}]}
Execute {"Portal": "p7"}
Sync
----
until crdb_only
ReadyForQuery
----
{"Type":"BindComplete"}
{"Type":"DataRow","Values":[{"text":"104"}]}
{"Type":"CommandComplete","CommandTag":"SELECT 1"}
{"Type":"ReadyForQuery","TxStatus":"I"}
send
Query {"String": "DROP TABLE IF EXISTS tab"}
----
until ignore=NoticeResponse
ReadyForQuery
----
{"Type":"CommandComplete","CommandTag":"DROP TABLE"}
{"Type":"ReadyForQuery","TxStatus":"I"}
send
Query {"String": "BEGIN"}
----
until ignore=NoticeResponse
ReadyForQuery
----
{"Type":"CommandComplete","CommandTag":"BEGIN"}
{"Type":"ReadyForQuery","TxStatus":"T"}
send
Parse {"Name": "s8", "Query": "SELECT relname FROM pg_class WHERE oid = $1::regclass"}
Bind {"DestinationPortal": "p8", "PreparedStatement": "s8", "ResultFormatCodes": [0], "Parameters": [{"text":"t"}]}
Sync
----
until
ReadyForQuery
----
{"Type":"ParseComplete"}
{"Type":"BindComplete"}
{"Type":"ReadyForQuery","TxStatus":"T"}
send
Query {"String": "ALTER TABLE t RENAME TO tab"}
----
until
ReadyForQuery
----
{"Type":"CommandComplete","CommandTag":"ALTER TABLE"}
{"Type":"ReadyForQuery","TxStatus":"T"}
send
Execute {"Portal": "p8"}
Sync
----
until noncrdb_only
ReadyForQuery
----
{"Type":"DataRow","Values":[{"text":"t"}]}
{"Type":"CommandComplete","CommandTag":"SELECT 1"}
{"Type":"ReadyForQuery","TxStatus":"T"}
# Currently, CRDB differs in that it returns the new table name here, but the
# important part of this test is that it asserts that binding the placeholder
# parameter occurred before the table rename, which matches the Postgres
# behavior.
# TODO(rafi): To be fully correct, we still should return table name 't' here.
until crdb_only
ReadyForQuery
----
{"Type":"DataRow","Values":[{"text":"tab"}]}
{"Type":"CommandComplete","CommandTag":"SELECT 1"}
{"Type":"ReadyForQuery","TxStatus":"T"}
send
Query {"String": "COMMIT"}
----
until ignore=NoticeResponse
ReadyForQuery
----
{"Type":"CommandComplete","CommandTag":"COMMIT"}
{"Type":"ReadyForQuery","TxStatus":"I"}
| pkg/sql/pgwire/testdata/pgtest/bind_and_resolve | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.00031127603142522275,
0.00017797714099287987,
0.00016648933524265885,
0.0001681846333667636,
0.00003565773295122199
] |
{
"id": 4,
"code_window": [
"\tif err != nil {\n",
"\t\treturn nil, serverError(ctx, err)\n",
"\t}\n",
"\n",
"\tthreshold := liveness.TimeUntilNodeDead.Get(&st.SV)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tlivenesses := make([]livenesspb.Liveness, 0, len(nodeVitalityMap))\n",
"\tstatusMap := make(map[roachpb.NodeID]livenesspb.NodeLivenessStatus, len(nodeVitalityMap))\n"
],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 2061
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rowexec
// SubqueryExecMode is an enum to indicate the type of a subquery.
type SubqueryExecMode int
const (
// SubqueryExecModeExists indicates that the subquery is an argument to
// EXISTS. Result type is Bool.
SubqueryExecModeExists SubqueryExecMode = 1 + iota
// SubqueryExecModeAllRowsNormalized indicates that the subquery is an
// argument to IN, ANY, SOME, or ALL. Any number of rows are
// expected. The result type is tuple of rows. As a special case, if
// there is only one column selected, the result is a tuple of the
// selected values (instead of a tuple of 1-tuples).
SubqueryExecModeAllRowsNormalized
// SubqueryExecModeAllRows indicates that the subquery is an
// argument to an ARRAY constructor. Any number of rows are expected, and
// exactly one column is expected. Result type is a tuple
// of selected values.
SubqueryExecModeAllRows
// SubqueryExecModeOneRow indicates that the subquery is an argument to
// another function. At most 1 row is expected. The result type is a tuple of
// columns, unless there is exactly 1 column in which case the result type is
// that column's type. If there are no rows, the result is NULL.
SubqueryExecModeOneRow
)
| pkg/sql/rowexec/subquery.go | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.00022124290990177542,
0.00019117255578748882,
0.00017156351532321423,
0.0001859418989624828,
0.000019113767848466523
] |
{
"id": 4,
"code_window": [
"\tif err != nil {\n",
"\t\treturn nil, serverError(ctx, err)\n",
"\t}\n",
"\n",
"\tthreshold := liveness.TimeUntilNodeDead.Get(&st.SV)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tlivenesses := make([]livenesspb.Liveness, 0, len(nodeVitalityMap))\n",
"\tstatusMap := make(map[roachpb.NodeID]livenesspb.NodeLivenessStatus, len(nodeVitalityMap))\n"
],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 2061
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/sql/decodeusername"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/roleoption"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sqlerrors"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
)
// RevokeRoleNode removes entries from the system.role_members table.
// This is called from REVOKE <ROLE>
type RevokeRoleNode struct {
roles []username.SQLUsername
members []username.SQLUsername
adminOption bool
}
// RevokeRole represents a GRANT ROLE statement.
func (p *planner) RevokeRole(ctx context.Context, n *tree.RevokeRole) (planNode, error) {
return p.RevokeRoleNode(ctx, n)
}
func (p *planner) RevokeRoleNode(ctx context.Context, n *tree.RevokeRole) (*RevokeRoleNode, error) {
sqltelemetry.IncIAMRevokeCounter(n.AdminOption)
ctx, span := tracing.ChildSpan(ctx, n.StatementTag())
defer span.Finish()
hasCreateRolePriv, err := p.HasRoleOption(ctx, roleoption.CREATEROLE)
if err != nil {
return nil, err
}
// check permissions on each role.
allRoles, err := p.MemberOfWithAdminOption(ctx, p.User())
if err != nil {
return nil, err
}
revokingRoleHasAdminOptionOnAdmin := allRoles[username.AdminRoleName()]
inputRoles, err := decodeusername.FromNameList(n.Roles)
if err != nil {
return nil, err
}
inputMembers, err := decodeusername.FromRoleSpecList(
p.SessionData(), username.PurposeValidation, n.Members,
)
if err != nil {
return nil, err
}
for _, r := range inputRoles {
// If the current user has CREATEROLE, and the role being revoked is not an
// admin there is no need to check if the user is allowed to grant/revoke
// membership in the role. However, if the role being revoked is an admin,
// then make sure the current user also has the admin option for that role.
revokedRoleIsAdmin, err := p.UserHasAdminRole(ctx, r)
if err != nil {
return nil, err
}
if hasCreateRolePriv && !revokedRoleIsAdmin {
continue
}
if hasAdminOption := allRoles[r]; !hasAdminOption && !revokingRoleHasAdminOptionOnAdmin {
if revokedRoleIsAdmin {
return nil, pgerror.Newf(pgcode.InsufficientPrivilege,
"%s must have admin option on role %q", p.User(), r)
}
return nil, pgerror.Newf(pgcode.InsufficientPrivilege,
"%s must have CREATEROLE or have admin option on role %q", p.User(), r)
}
}
// Check that roles exist.
// TODO(mberhault): just like GRANT/REVOKE privileges, we fetch the list of all roles.
// This is wasteful when we have a LOT of roles compared to the number of roles being operated on.
roles, err := p.GetAllRoles(ctx)
if err != nil {
return nil, err
}
for _, r := range inputRoles {
if _, ok := roles[r]; !ok {
return nil, sqlerrors.NewUndefinedUserError(r)
}
}
for _, m := range inputMembers {
if _, ok := roles[m]; !ok {
return nil, sqlerrors.NewUndefinedUserError(m)
}
}
return &RevokeRoleNode{
roles: inputRoles,
members: inputMembers,
adminOption: n.AdminOption,
}, nil
}
func (n *RevokeRoleNode) startExec(params runParams) error {
opName := "revoke-role"
var memberStmt string
if n.adminOption {
// ADMIN OPTION FOR is specified, we don't remove memberships just remove the admin option.
memberStmt = `UPDATE system.role_members SET "isAdmin" = false WHERE "role" = $1 AND "member" = $2`
} else {
// Admin option not specified: remove membership if it exists.
memberStmt = `DELETE FROM system.role_members WHERE "role" = $1 AND "member" = $2`
}
var rowsAffected int
for _, r := range n.roles {
for _, m := range n.members {
if r.IsAdminRole() && m.IsRootUser() {
// We use CodeObjectInUseError which is what happens if you tried to delete the current user in pg.
return pgerror.Newf(pgcode.ObjectInUse,
"role/user %s cannot be removed from role %s or lose the ADMIN OPTION",
username.RootUser, username.AdminRole)
}
affected, err := params.p.InternalSQLTxn().ExecEx(
params.ctx,
opName,
params.p.txn,
sessiondata.RootUserSessionDataOverride,
memberStmt,
r.Normalized(), m.Normalized(),
)
if err != nil {
return err
}
rowsAffected += affected
}
}
// We need to bump the table version to trigger a refresh if anything changed.
if rowsAffected > 0 {
if err := params.p.BumpRoleMembershipTableVersion(params.ctx); err != nil {
return err
}
}
return nil
}
// Next implements the planNode interface.
func (*RevokeRoleNode) Next(runParams) (bool, error) { return false, nil }
// Values implements the planNode interface.
func (*RevokeRoleNode) Values() tree.Datums { return tree.Datums{} }
// Close implements the planNode interface.
func (*RevokeRoleNode) Close(context.Context) {}
| pkg/sql/revoke_role.go | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.001352665713056922,
0.0002562045119702816,
0.00016078772023320198,
0.00016781818703748286,
0.0002720001502893865
] |
{
"id": 5,
"code_window": [
"\n",
"\tstatusMap := make(map[roachpb.NodeID]livenesspb.NodeLivenessStatus, len(livenesses))\n",
"\tfor _, liveness := range livenesses {\n",
"\t\tstatus := storepool.LivenessStatus(liveness, now, threshold)\n",
"\t\tstatusMap[liveness.NodeID] = status\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\tfor nodeID, vitality := range nodeVitalityMap {\n",
"\t\tlivenesses = append(livenesses, vitality.GenLiveness())\n",
"\t\tstatusMap[nodeID] = vitality.LivenessStatus()\n"
],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 2063
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package optionalnodeliveness
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/errorutil"
)
// Interface is the interface used in Container.
type Interface interface {
Self() (livenesspb.Liveness, bool)
GetLiveness(nodeID roachpb.NodeID) (liveness.Record, bool)
GetLivenessesFromKV(ctx context.Context) ([]livenesspb.Liveness, error)
IsAvailable(roachpb.NodeID) bool
IsAvailableNotDraining(roachpb.NodeID) bool
IsLive(roachpb.NodeID) (bool, error)
}
// Container optionally gives access to liveness information about
// the KV nodes. It is typically not available to anyone but the system tenant.
type Container struct {
w errorutil.TenantSQLDeprecatedWrapper
}
// MakeContainer initializes an Container wrapping a
// (possibly nil) *NodeLiveness.
//
// Use of node liveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
//
// See TenantSQLDeprecatedWrapper for details.
func MakeContainer(nl Interface) Container {
return Container{
w: errorutil.MakeTenantSQLDeprecatedWrapper(nl, nl != nil),
}
}
// OptionalErr returns the NodeLiveness instance if available. Otherwise, it
// returns an error referring to the optionally passed in issues.
//
// Use of NodeLiveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
func (nl *Container) OptionalErr(issue int) (Interface, error) {
v, err := nl.w.OptionalErr(issue)
if err != nil {
return nil, err
}
return v.(Interface), nil
}
var _ = (*Container)(nil).OptionalErr // silence unused lint
// Optional returns the NodeLiveness instance and true if available.
// Otherwise, returns nil and false. Prefer OptionalErr where possible.
//
// Use of NodeLiveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
func (nl *Container) Optional(issue int) (Interface, bool) {
v, ok := nl.w.Optional()
if !ok {
return nil, false
}
return v.(Interface), true
}
| pkg/sql/optionalnodeliveness/node_liveness.go | 1 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.4758371412754059,
0.06064515560865402,
0.0001748556678649038,
0.00020108750322833657,
0.1569371372461319
] |
{
"id": 5,
"code_window": [
"\n",
"\tstatusMap := make(map[roachpb.NodeID]livenesspb.NodeLivenessStatus, len(livenesses))\n",
"\tfor _, liveness := range livenesses {\n",
"\t\tstatus := storepool.LivenessStatus(liveness, now, threshold)\n",
"\t\tstatusMap[liveness.NodeID] = status\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\tfor nodeID, vitality := range nodeVitalityMap {\n",
"\t\tlivenesses = append(livenesses, vitality.GenLiveness())\n",
"\t\tstatusMap[nodeID] = vitality.LivenessStatus()\n"
],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 2063
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tests
import (
"context"
"fmt"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/cmd/roachtest/option"
"github.com/cockroachdb/cockroach/pkg/roachprod/logger"
"github.com/cockroachdb/cockroach/pkg/roachprod/prometheus"
gomock "github.com/golang/mock/gomock"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
)
func TestTPCCChaosEventProcessor(t *testing.T) {
ctx := context.Background()
startTime := time.Date(2020, 12, 25, 0, 0, 0, 0, time.UTC)
region1 := option.NodeListOption([]int{1, 2, 3})
portRegion1 := 2110
region2 := option.NodeListOption([]int{4, 5, 6})
portRegion2 := 2111
makeMetric := func(op string, errorOrSuccess string, port int) string {
return fmt.Sprintf(`workload_tpcc_%s_%s_total{instance=":%d"}`, op, errorOrSuccess, port)
}
firstPreShutdown := startTime.Add(90 * time.Second)
firstShutdownComplete := startTime.Add(95 * time.Second)
scrapeAfterFirstShutdownComplete := firstShutdownComplete.Add(prometheus.DefaultScrapeInterval)
firstPreStartup := startTime.Add(180 * time.Second)
scrapeBeforeFirstPreStartup := firstPreStartup.Add(-prometheus.DefaultScrapeInterval)
firstStartupComplete := startTime.Add(185 * time.Second)
scrapeAfterFirstStartupComplete := firstStartupComplete.Add(prometheus.DefaultScrapeInterval)
secondPreShutdown := startTime.Add(390 * time.Second)
scrapeBeforeSecondPreShutdown := secondPreShutdown.Add(-prometheus.DefaultScrapeInterval)
secondShutdownComplete := startTime.Add(395 * time.Second)
scrapeAfterSecondShutdownComplete := secondShutdownComplete.Add(prometheus.DefaultScrapeInterval)
secondPreStartup := startTime.Add(480 * time.Second)
scrapeBeforeSecondPreStartup := secondPreStartup.Add(-prometheus.DefaultScrapeInterval)
secondStartupComplete := startTime.Add(490 * time.Second)
metricA := "newOtan"
metricB := "otanLevel"
type expectPromQuery struct {
q string
t time.Time
retVal model.SampleValue
}
testCases := []struct {
desc string
chaosEvents []ChaosEvent
ops []string
workloadInstances []workloadInstance
mockPromQueries []expectPromQuery
expectedErrors []string
allowZeroSuccessDuringUptime bool
maxErrorsDuringUptime int
}{
{
desc: "everything is good",
chaosEvents: []ChaosEvent{
{Type: ChaosEventTypeStart, Time: startTime.Add(0 * time.Second)},
// Shutdown and restart region1.
{Type: ChaosEventTypePreShutdown, Time: firstPreShutdown, Target: region1},
{Type: ChaosEventTypeShutdownComplete, Time: firstShutdownComplete, Target: region1},
{Type: ChaosEventTypePreStartup, Time: firstPreStartup, Target: region1},
{Type: ChaosEventTypeStartupComplete, Time: firstStartupComplete, Target: region1},
// Shutdown and restart region2.
{Type: ChaosEventTypePreShutdown, Time: secondPreShutdown, Target: region2},
{Type: ChaosEventTypeShutdownComplete, Time: secondShutdownComplete, Target: region2},
{Type: ChaosEventTypePreStartup, Time: secondPreStartup, Target: region2},
{Type: ChaosEventTypeStartupComplete, Time: secondStartupComplete, Target: region2},
{Type: ChaosEventTypeEnd, Time: startTime.Add(600 * time.Second)},
},
ops: []string{metricA, metricB},
mockPromQueries: []expectPromQuery{
// Restart region1.
{q: makeMetric(metricA, "success", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 100},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 10},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 0},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 0},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 100},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 10},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 0},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 0},
// Shutdown region2.
{q: makeMetric(metricA, "success", portRegion1), t: scrapeAfterFirstStartupComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion1), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeAfterFirstStartupComplete, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeAfterFirstStartupComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeAfterFirstStartupComplete, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeAfterFirstStartupComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeAfterFirstStartupComplete, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeAfterFirstStartupComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeAfterFirstStartupComplete, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
// Restart region2.
{q: makeMetric(metricA, "success", portRegion1), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricA, "success", portRegion1), t: scrapeBeforeSecondPreStartup, retVal: 10000},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeBeforeSecondPreStartup, retVal: 1000},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeBeforeSecondPreStartup, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeBeforeSecondPreStartup, retVal: 10000},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeBeforeSecondPreStartup, retVal: 10000},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeBeforeSecondPreStartup, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeBeforeSecondPreStartup, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeBeforeSecondPreStartup, retVal: 10000},
},
workloadInstances: []workloadInstance{
{
nodes: region1,
prometheusPort: portRegion1,
},
{
nodes: region2,
prometheusPort: portRegion2,
},
},
},
{
desc: "tolerate errors and no successes during uptime",
chaosEvents: []ChaosEvent{
{Type: ChaosEventTypeStart, Time: startTime.Add(0 * time.Second)},
// Shutdown and restart region1.
{Type: ChaosEventTypePreShutdown, Time: firstPreShutdown, Target: region1},
{Type: ChaosEventTypeShutdownComplete, Time: firstShutdownComplete, Target: region1},
{Type: ChaosEventTypePreStartup, Time: firstPreStartup, Target: region1},
{Type: ChaosEventTypeStartupComplete, Time: firstStartupComplete, Target: region1},
// Shutdown and restart region2.
{Type: ChaosEventTypePreShutdown, Time: secondPreShutdown, Target: region2},
{Type: ChaosEventTypeShutdownComplete, Time: secondShutdownComplete, Target: region2},
{Type: ChaosEventTypePreStartup, Time: secondPreStartup, Target: region2},
{Type: ChaosEventTypeStartupComplete, Time: secondStartupComplete, Target: region2},
{Type: ChaosEventTypeEnd, Time: startTime.Add(600 * time.Second)},
},
allowZeroSuccessDuringUptime: true,
maxErrorsDuringUptime: 5,
ops: []string{metricA, metricB},
mockPromQueries: []expectPromQuery{
// Restart region1.
{q: makeMetric(metricA, "success", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 100},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 10},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 0},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 5}, // test that it still works with <= maxErrorsDuringUptime errors
{q: makeMetric(metricB, "success", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 100},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 10},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 0},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 0},
// Shutdown region2.
{q: makeMetric(metricA, "success", portRegion1), t: scrapeAfterFirstStartupComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion1), t: scrapeBeforeSecondPreShutdown, retVal: 100}, // no success should still succeed as allowZeroSuccessDuringUptime = true
{q: makeMetric(metricA, "error", portRegion1), t: scrapeAfterFirstStartupComplete, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeAfterFirstStartupComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeAfterFirstStartupComplete, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeAfterFirstStartupComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeAfterFirstStartupComplete, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeAfterFirstStartupComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeAfterFirstStartupComplete, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
// Restart region2.
{q: makeMetric(metricA, "success", portRegion1), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricA, "success", portRegion1), t: scrapeBeforeSecondPreStartup, retVal: 10000},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeBeforeSecondPreStartup, retVal: 1000},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeBeforeSecondPreStartup, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeBeforeSecondPreStartup, retVal: 10000},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeBeforeSecondPreStartup, retVal: 10000},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeBeforeSecondPreStartup, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeBeforeSecondPreStartup, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeAfterSecondShutdownComplete, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeBeforeSecondPreStartup, retVal: 10000},
},
workloadInstances: []workloadInstance{
{
nodes: region1,
prometheusPort: portRegion1,
},
{
nodes: region2,
prometheusPort: portRegion2,
},
},
},
{
desc: "unexpected node errors during shutdown",
chaosEvents: []ChaosEvent{
{Type: ChaosEventTypeStart, Time: startTime.Add(0 * time.Second)},
// Shutdown and restart region1.
{Type: ChaosEventTypePreShutdown, Time: firstPreShutdown, Target: region1},
{Type: ChaosEventTypeShutdownComplete, Time: firstShutdownComplete, Target: region1},
{Type: ChaosEventTypePreStartup, Time: firstPreStartup, Target: region1},
{Type: ChaosEventTypeStartupComplete, Time: firstStartupComplete, Target: region1},
{Type: ChaosEventTypeEnd, Time: startTime.Add(0 * time.Second)},
},
ops: []string{metricA, metricB},
mockPromQueries: []expectPromQuery{
// Restart region1.
{q: makeMetric(metricA, "success", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 100},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 10},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 0},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 0},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 100},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 10},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 0},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 1000}, // should have had no errors during shutdown.
},
workloadInstances: []workloadInstance{
{
nodes: region1,
prometheusPort: portRegion1,
},
{
nodes: region2,
prometheusPort: portRegion2,
},
},
expectedErrors: []string{
fmt.Sprintf(
`error at from 2020-12-25T00:01:45Z, to 2020-12-25T00:02:50Z on metric %s: expected <=0 errors, found from 0.000000, to 1000.000000`,
makeMetric(metricB, "error", portRegion2),
),
},
},
{
desc: "unexpected node errors during shutdown with tolerance",
chaosEvents: []ChaosEvent{
{Type: ChaosEventTypeStart, Time: startTime.Add(0 * time.Second)},
// Shutdown and restart region1.
{Type: ChaosEventTypePreShutdown, Time: firstPreShutdown, Target: region1},
{Type: ChaosEventTypeShutdownComplete, Time: firstShutdownComplete, Target: region1},
{Type: ChaosEventTypePreStartup, Time: firstPreStartup, Target: region1},
{Type: ChaosEventTypeStartupComplete, Time: firstStartupComplete, Target: region1},
{Type: ChaosEventTypeEnd, Time: startTime.Add(0 * time.Second)},
},
ops: []string{metricA, metricB},
mockPromQueries: []expectPromQuery{
// Restart region1.
{q: makeMetric(metricA, "success", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 100},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 10},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 0},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 0},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 100},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 10},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 0},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 6}, // should have had <= 5 errors during shutdown.
},
maxErrorsDuringUptime: 5,
workloadInstances: []workloadInstance{
{
nodes: region1,
prometheusPort: portRegion1,
},
{
nodes: region2,
prometheusPort: portRegion2,
},
},
expectedErrors: []string{
fmt.Sprintf(
`error at from 2020-12-25T00:01:45Z, to 2020-12-25T00:02:50Z on metric %s: expected <=5 errors, found from 0.000000, to 6.000000`,
makeMetric(metricB, "error", portRegion2),
),
},
},
{
desc: "unexpected node successes during shutdown",
chaosEvents: []ChaosEvent{
{Type: ChaosEventTypeStart, Time: startTime.Add(0 * time.Second)},
// Shutdown and restart region1.
{Type: ChaosEventTypePreShutdown, Time: firstPreShutdown, Target: region1},
{Type: ChaosEventTypeShutdownComplete, Time: firstShutdownComplete, Target: region1},
{Type: ChaosEventTypePreStartup, Time: firstPreStartup, Target: region1},
{Type: ChaosEventTypeStartupComplete, Time: firstStartupComplete, Target: region1},
{Type: ChaosEventTypeEnd, Time: startTime.Add(0 * time.Second)},
},
ops: []string{metricA, metricB},
mockPromQueries: []expectPromQuery{
// Restart region1.
{q: makeMetric(metricA, "success", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 1000}, // should have had no successes whilst shutdown.
{q: makeMetric(metricA, "success", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 0},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 0},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 100},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 10},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 10}, // should have had errors whilst shutdown.
{q: makeMetric(metricB, "success", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 0},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 0},
},
workloadInstances: []workloadInstance{
{
nodes: region1,
prometheusPort: portRegion1,
},
{
nodes: region2,
prometheusPort: portRegion2,
},
},
expectedErrors: []string{
fmt.Sprintf(
`error at from 2020-12-25T00:01:45Z, to 2020-12-25T00:02:50Z on metric %s: expected successes to not increase, found from 100.000000, to 1000.000000`,
makeMetric(metricA, "success", portRegion1),
),
fmt.Sprintf(
`error at from 2020-12-25T00:01:45Z, to 2020-12-25T00:02:50Z on metric %s: expected errors, found from 10.000000, to 10.000000`,
makeMetric(metricB, "error", portRegion1),
),
},
},
{
desc: "nodes have unexpected blips after startup",
chaosEvents: []ChaosEvent{
{Type: ChaosEventTypeStart, Time: startTime.Add(0 * time.Second)},
// Shutdown and restart region1.
{Type: ChaosEventTypePreShutdown, Time: firstPreShutdown, Target: region1},
{Type: ChaosEventTypeShutdownComplete, Time: firstShutdownComplete, Target: region1},
{Type: ChaosEventTypePreStartup, Time: firstPreStartup, Target: region1},
{Type: ChaosEventTypeStartupComplete, Time: firstStartupComplete, Target: region1},
// Shutdown and restart region2.
{Type: ChaosEventTypePreShutdown, Time: secondPreShutdown, Target: region2},
{Type: ChaosEventTypeShutdownComplete, Time: secondShutdownComplete, Target: region2},
},
ops: []string{metricA, metricB},
mockPromQueries: []expectPromQuery{
// Restart region1.
{q: makeMetric(metricA, "success", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 100},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 10},
{q: makeMetric(metricA, "error", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 0},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 0},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 100},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeAfterFirstShutdownComplete, retVal: 10},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeAfterFirstShutdownComplete, retVal: 0},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeBeforeFirstPreStartup, retVal: 0},
// Shutdown region2.
{q: makeMetric(metricA, "success", portRegion1), t: scrapeAfterFirstStartupComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion1), t: scrapeBeforeSecondPreShutdown, retVal: 100}, // should have had successes as it was not the node shutdown.
{q: makeMetric(metricA, "success", portRegion2), t: scrapeAfterFirstStartupComplete, retVal: 100},
{q: makeMetric(metricA, "success", portRegion2), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeAfterFirstStartupComplete, retVal: 1000},
{q: makeMetric(metricA, "error", portRegion2), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeAfterFirstStartupComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion1), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeAfterFirstStartupComplete, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion1), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeAfterFirstStartupComplete, retVal: 100},
{q: makeMetric(metricB, "success", portRegion2), t: scrapeBeforeSecondPreShutdown, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeAfterFirstStartupComplete, retVal: 1000},
{q: makeMetric(metricB, "error", portRegion2), t: scrapeBeforeSecondPreShutdown, retVal: 10000}, // unexpected errors during restart.
},
workloadInstances: []workloadInstance{
{
nodes: region1,
prometheusPort: portRegion1,
},
{
nodes: region2,
prometheusPort: portRegion2,
},
},
expectedErrors: []string{
fmt.Sprintf(
`error at from 2020-12-25T00:03:15Z, to 2020-12-25T00:06:20Z on metric %s: expected successes to be increasing, found from 100.000000, to 100.000000`,
makeMetric(metricA, "success", portRegion1),
),
fmt.Sprintf(
`error at from 2020-12-25T00:03:15Z, to 2020-12-25T00:06:20Z on metric %s: expected <=0 errors, found from 1000.000000, to 10000.000000`,
makeMetric(metricB, "error", portRegion2),
),
},
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
ch := make(chan ChaosEvent)
ep := tpccChaosEventProcessor{
workloadInstances: tc.workloadInstances,
ops: tc.ops,
ch: ch,
allowZeroSuccessDuringUptime: tc.allowZeroSuccessDuringUptime,
maxErrorsDuringUptime: tc.maxErrorsDuringUptime,
promClient: func(ctrl *gomock.Controller) prometheus.Client {
c := NewMockClient(ctrl)
e := c.EXPECT()
for _, m := range tc.mockPromQueries {
e.Query(ctx, m.q, m.t).Return(
model.Value(model.Vector{&model.Sample{Value: m.retVal}}),
nil,
nil,
)
}
return c
}(ctrl),
}
l, err := (&logger.Config{}).NewLogger("")
require.NoError(t, err)
ep.listen(ctx, l)
for _, chaosEvent := range tc.chaosEvents {
ch <- chaosEvent
}
close(ch)
if len(tc.expectedErrors) == 0 {
require.NoError(t, ep.err())
} else {
require.Error(t, ep.err())
// The first error found should be exposed.
require.EqualError(t, ep.err(), tc.expectedErrors[0])
// Check each other combined error matches.
require.Len(t, ep.errs, len(tc.expectedErrors))
for i, err := range ep.errs {
require.EqualError(t, err, tc.expectedErrors[i])
}
}
})
}
}
| pkg/cmd/roachtest/tests/drt_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.0007482279324904084,
0.00019614708435256034,
0.0001639697002246976,
0.00017685859347693622,
0.00009787927410798147
] |
{
"id": 5,
"code_window": [
"\n",
"\tstatusMap := make(map[roachpb.NodeID]livenesspb.NodeLivenessStatus, len(livenesses))\n",
"\tfor _, liveness := range livenesses {\n",
"\t\tstatus := storepool.LivenessStatus(liveness, now, threshold)\n",
"\t\tstatusMap[liveness.NodeID] = status\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\tfor nodeID, vitality := range nodeVitalityMap {\n",
"\t\tlivenesses = append(livenesses, vitality.GenLiveness())\n",
"\t\tstatusMap[nodeID] = vitality.LivenessStatus()\n"
],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 2063
} | // Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import { createSelector } from "reselect";
import { AppState } from "src/store/reducers";
import { localStorageSelector } from "src/store/utils/selectors";
import { TxnInsightEvent } from "src/insights";
import { selectTransactionFingerprintID } from "src/selectors/common";
import { FixFingerprintHexValue } from "../../../util";
export const selectTransactionInsights = (state: AppState): TxnInsightEvent[] =>
state.adminUI?.txnInsights?.data?.results;
export const selectTransactionInsightsError = (state: AppState): Error | null =>
state.adminUI?.txnInsights?.lastError;
export const selectTransactionInsightsMaxApiReached = (
state: AppState,
): boolean => state.adminUI?.stmtInsights?.data?.maxSizeReached;
export const selectTxnInsightsByFingerprint = createSelector(
selectTransactionInsights,
selectTransactionFingerprintID,
(execInsights, fingerprintID) => {
if (fingerprintID == null) {
return null;
}
const id = FixFingerprintHexValue(BigInt(fingerprintID).toString(16));
return execInsights?.filter(txn => txn.transactionFingerprintID === id);
},
);
export const selectSortSetting = createSelector(
localStorageSelector,
localStorage => localStorage["sortSetting/InsightsPage"],
);
export const selectFilters = createSelector(
localStorageSelector,
localStorage => localStorage["filters/InsightsPage"],
);
// Show the data as 'Loading' when the request is in flight AND the
// data is invalid or null.
export const selectTransactionInsightsLoading = (state: AppState): boolean =>
state.adminUI?.txnInsights?.inFlight &&
(!state.adminUI?.txnInsights?.valid || !state.adminUI?.txnInsights?.data);
| pkg/ui/workspaces/cluster-ui/src/store/insights/transactionInsights/transactionInsights.selectors.ts | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.0001765224733389914,
0.00017431350715924054,
0.00017113267676904798,
0.00017468223813921213,
0.0000016845212940097554
] |
{
"id": 5,
"code_window": [
"\n",
"\tstatusMap := make(map[roachpb.NodeID]livenesspb.NodeLivenessStatus, len(livenesses))\n",
"\tfor _, liveness := range livenesses {\n",
"\t\tstatus := storepool.LivenessStatus(liveness, now, threshold)\n",
"\t\tstatusMap[liveness.NodeID] = status\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\tfor nodeID, vitality := range nodeVitalityMap {\n",
"\t\tlivenesses = append(livenesses, vitality.GenLiveness())\n",
"\t\tstatusMap[nodeID] = vitality.LivenessStatus()\n"
],
"file_path": "pkg/server/admin.go",
"type": "replace",
"edit_start_line_idx": 2063
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package xform
import (
"context"
"math"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/distribution"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/opt/ordering"
"github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/errors"
)
// CanProvidePhysicalProps returns true if the given expression can provide the
// required physical properties. The optimizer uses this to determine whether an
// expression provides a required physical property. If it does not, then the
// optimizer inserts an enforcer operator that is able to provide it.
//
// Some operators, like Select and Project, may not directly provide a required
// physical property, but do "pass through" the requirement to their input.
// Operators that do this should return true from the appropriate canProvide
// method and then pass through that property in the buildChildPhysicalProps
// method.
func CanProvidePhysicalProps(
ctx context.Context, evalCtx *eval.Context, e memo.RelExpr, required *physical.Required,
) bool {
// All operators can provide the Presentation and LimitHint properties, so no
// need to check for that.
canProvideOrdering := e.Op() == opt.SortOp || ordering.CanProvide(e, &required.Ordering)
canProvideDistribution := e.Op() == opt.DistributeOp || distribution.CanProvide(ctx, evalCtx, e, &required.Distribution)
return canProvideOrdering && canProvideDistribution
}
// BuildChildPhysicalProps returns the set of physical properties required of
// the nth child, based upon the properties required of the parent. For example,
// the Project operator passes through any ordering requirement to its child,
// but provides any presentation requirement.
//
// The childProps argument is allocated once by the caller and can be reused
// repeatedly as physical properties are derived for each child. On each call,
// buildChildPhysicalProps updates the childProps argument.
func BuildChildPhysicalProps(
mem *memo.Memo, parent memo.RelExpr, nth int, parentProps *physical.Required,
) *physical.Required {
var childProps physical.Required
// ScalarExprs don't support required physical properties; don't build
// physical properties for them.
if _, ok := parent.Child(nth).(opt.ScalarExpr); ok {
return mem.InternPhysicalProps(&childProps)
}
// Most operations don't require a presentation of their input; these are the
// exceptions.
switch parent.Op() {
case opt.ExplainOp:
childProps.Presentation = parent.(*memo.ExplainExpr).Props.Presentation
case opt.AlterTableSplitOp:
childProps.Presentation = parent.(*memo.AlterTableSplitExpr).Props.Presentation
case opt.AlterTableUnsplitOp:
childProps.Presentation = parent.(*memo.AlterTableUnsplitExpr).Props.Presentation
case opt.AlterTableRelocateOp:
childProps.Presentation = parent.(*memo.AlterTableRelocateExpr).Props.Presentation
case opt.AlterRangeRelocateOp:
childProps.Presentation = parent.(*memo.AlterRangeRelocateExpr).Props.Presentation
case opt.ControlJobsOp:
childProps.Presentation = parent.(*memo.ControlJobsExpr).Props.Presentation
case opt.CancelQueriesOp:
childProps.Presentation = parent.(*memo.CancelQueriesExpr).Props.Presentation
case opt.CancelSessionsOp:
childProps.Presentation = parent.(*memo.CancelSessionsExpr).Props.Presentation
case opt.ExportOp:
childProps.Presentation = parent.(*memo.ExportExpr).Props.Presentation
}
childProps.Ordering = ordering.BuildChildRequired(parent, &parentProps.Ordering, nth)
childProps.Distribution = distribution.BuildChildRequired(parent, &parentProps.Distribution, nth)
switch parent.Op() {
case opt.LimitOp:
if constLimit, ok := parent.(*memo.LimitExpr).Limit.(*memo.ConstExpr); ok {
childProps.LimitHint = float64(*constLimit.Value.(*tree.DInt))
if childProps.LimitHint <= 0 {
childProps.LimitHint = 1
}
}
case opt.OffsetOp:
if parentProps.LimitHint == 0 {
break
}
if constOffset, ok := parent.(*memo.OffsetExpr).Offset.(*memo.ConstExpr); ok {
childProps.LimitHint = parentProps.LimitHint + float64(*constOffset.Value.(*tree.DInt))
if childProps.LimitHint <= 0 {
childProps.LimitHint = 1
}
}
case opt.IndexJoinOp:
// For an index join, every input row results in exactly one output row.
childProps.LimitHint = parentProps.LimitHint
case opt.ExceptOp, opt.ExceptAllOp, opt.IntersectOp, opt.IntersectAllOp,
opt.UnionOp, opt.UnionAllOp, opt.LocalityOptimizedSearchOp:
// TODO(celine): Set operation limits need further thought; for example,
// the right child of an ExceptOp should not be limited.
childProps.LimitHint = parentProps.LimitHint
case opt.DistinctOnOp:
distinctCount := parent.Relational().Statistics().RowCount
if parentProps.LimitHint > 0 {
// TODO(mgartner): If the expression is a streaming DistinctOn, this
// estimated limit hint is much lower than it should be.
childProps.LimitHint = distinctOnLimitHint(distinctCount, parentProps.LimitHint)
}
case opt.GroupByOp:
if parentProps.LimitHint == 0 {
break
}
private := parent.Private().(*memo.GroupingPrivate)
groupingColCount := private.GroupingCols.Len()
if groupingColCount == 0 {
break
}
outputRows := parent.Relational().Statistics().RowCount
if outputRows == 0 || outputRows < parentProps.LimitHint {
break
}
// For streaming GroupBy expressions we can estimate the number of input
// rows needed to produce LimitHint output rows.
streamingType := private.GroupingOrderType(&parentProps.Ordering)
if streamingType != memo.NoStreaming {
if input, ok := parent.Child(nth).(memo.RelExpr); ok {
inputRows := input.Relational().Statistics().RowCount
childProps.LimitHint = streamingGroupByInputLimitHint(inputRows, outputRows, parentProps.LimitHint)
}
}
case opt.SelectOp, opt.LookupJoinOp:
// These operations are assumed to produce a constant number of output rows
// for each input row, independent of already-processed rows.
outputRows := parent.Relational().Statistics().RowCount
if outputRows == 0 || outputRows < parentProps.LimitHint {
break
}
if input, ok := parent.Child(nth).(memo.RelExpr); ok {
inputRows := input.Relational().Statistics().RowCount
switch parent.Op() {
case opt.SelectOp:
// outputRows / inputRows is roughly the number of output rows produced
// for each input row. Reduce the number of required input rows so that
// the expected number of output rows is equal to the parent limit hint.
childProps.LimitHint = parentProps.LimitHint * inputRows / outputRows
case opt.LookupJoinOp:
childProps.LimitHint = lookupJoinInputLimitHint(inputRows, outputRows, parentProps.LimitHint)
}
}
case opt.OrdinalityOp, opt.ProjectOp, opt.ProjectSetOp:
childProps.LimitHint = parentProps.LimitHint
case opt.TopKOp:
if parentProps.Ordering.Any() {
break
}
outputRows := parent.Relational().Statistics().RowCount
topk := parent.(*memo.TopKExpr)
k := float64(topk.K)
if outputRows == 0 || outputRows < k {
break
}
if input, ok := parent.Child(nth).(memo.RelExpr); ok {
inputRows := input.Relational().Statistics().RowCount
if limitHint := topKInputLimitHint(mem, topk, inputRows, outputRows, k); limitHint < inputRows {
childProps.LimitHint = limitHint
}
}
}
if childProps.LimitHint < 0 {
panic(errors.AssertionFailedf("negative limit hint"))
}
// If properties haven't changed, no need to re-intern them.
if childProps.Equals(parentProps) {
return parentProps
}
return mem.InternPhysicalProps(&childProps)
}
// distinctOnLimitHint returns a limit hint for the distinct operation. Given a
// table with distinctCount distinct rows, distinctOnLimitHint will return an
// estimated number of rows to scan that in most cases will yield at least
// neededRows distinct rows while still substantially reducing the number of
// unnecessarily scanned rows.
//
// Assume that when examining a row, each of the distinctCount possible values
// has an equal probability of appearing. The expected number of rows that must
// be examined to collect neededRows distinct rows is
//
// E[examined rows] = distinctCount * (H_{distinctCount} - H_{distinctCount-neededRows})
//
// where distinctCount > neededRows and H_{i} is the ith harmonic number. This
// is a variation on the coupon collector's problem:
// https://en.wikipedia.org/wiki/Coupon_collector%27s_problem
//
// Since values are not uniformly distributed in practice, the limit hint is
// calculated by multiplying E[examined rows] by an experimentally-chosen factor
// to provide a small overestimate of the actual number of rows needed in most
// cases.
//
// This method is least accurate when attempting to return all or nearly all the
// distinct values in the table, since the actual distribution of values becomes
// the primary factor in how long it takes to "collect" the least-likely values.
// As a result, cases where this limit hint may be poor (too low or more than
// twice as high as needed) tend to occur when distinctCount is very close to
// neededRows.
func distinctOnLimitHint(distinctCount, neededRows float64) float64 {
// The harmonic function below is not intended for values under 1 (for one,
// it's not monotonic until 0.5); make sure we never return negative results.
if neededRows >= distinctCount-1.0 {
return 0
}
// Return an approximation of the nth harmonic number.
H := func(n float64) float64 {
// Euler–Mascheroni constant; this is included for clarity but is canceled
// out in our formula below.
const gamma = 0.5772156649
return math.Log(n) + gamma + 1/(2*n)
}
// Coupon collector's estimate, for a uniformly-distributed table.
uniformPrediction := distinctCount * (H(distinctCount) - H(distinctCount-neededRows))
// This multiplier was chosen based on simulating the distinct operation on
// hundreds of thousands of nonuniformly distributed tables with values of
// neededRows and distinctCount ranging between 1 and 1000.
multiplier := 0.15*neededRows/(distinctCount-neededRows) + 1.2
// In 91.6% of trials, this scaled estimate was between a 0% and 30%
// overestimate, and in 97.5% it was between a 0% and 100% overestimate.
//
// In 1.8% of tests, the prediction was for an insufficient number of rows, and
// in 0.7% of tests, the predicted number of rows was more than twice the actual
// number required.
return uniformPrediction * multiplier
}
// BuildChildPhysicalPropsScalar is like BuildChildPhysicalProps, but for
// when the parent is a scalar expression.
func BuildChildPhysicalPropsScalar(mem *memo.Memo, parent opt.Expr, nth int) *physical.Required {
var childProps physical.Required
_, childIsRelExpr := parent.Child(nth).(memo.RelExpr)
switch parent.Op() {
case opt.ArrayFlattenOp:
if nth == 0 {
af := parent.(*memo.ArrayFlattenExpr)
childProps.Ordering.FromOrdering(af.Ordering)
// ArrayFlatten might have extra ordering columns. Use the Presentation property
// to get rid of them.
childProps.Presentation = physical.Presentation{
opt.AliasedColumn{
// Keep the existing label for the column.
Alias: mem.Metadata().ColumnMeta(af.RequestedCol).Alias,
ID: af.RequestedCol,
},
}
}
default:
if !childIsRelExpr {
return physical.MinRequired
}
}
if childIsRelExpr && mem.RootProps() != nil {
// A relational expression whose parent is a scalar expression should
// require the distribution of the root, because the result ends up in the
// local gateway region.
childProps.Distribution = mem.RootProps().Distribution
}
return mem.InternPhysicalProps(&childProps)
}
| pkg/sql/opt/xform/physical_props.go | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.0011914365459233522,
0.0002135569229722023,
0.00016228533058892936,
0.00017558310355525464,
0.00018279730284120888
] |
{
"id": 11,
"code_window": [
"\tIsLive(roachpb.NodeID) (bool, error)\n",
"}\n",
"\n",
"// Container optionally gives access to liveness information about\n",
"// the KV nodes. It is typically not available to anyone but the system tenant.\n",
"type Container struct {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tScanNodeVitalityFromKV(ctx context.Context) (livenesspb.NodeVitalityMap, error)\n"
],
"file_path": "pkg/sql/optionalnodeliveness/node_liveness.go",
"type": "add",
"edit_start_line_idx": 29
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package optionalnodeliveness
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/errorutil"
)
// Interface is the interface used in Container.
type Interface interface {
Self() (livenesspb.Liveness, bool)
GetLiveness(nodeID roachpb.NodeID) (liveness.Record, bool)
GetLivenessesFromKV(ctx context.Context) ([]livenesspb.Liveness, error)
IsAvailable(roachpb.NodeID) bool
IsAvailableNotDraining(roachpb.NodeID) bool
IsLive(roachpb.NodeID) (bool, error)
}
// Container optionally gives access to liveness information about
// the KV nodes. It is typically not available to anyone but the system tenant.
type Container struct {
w errorutil.TenantSQLDeprecatedWrapper
}
// MakeContainer initializes an Container wrapping a
// (possibly nil) *NodeLiveness.
//
// Use of node liveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
//
// See TenantSQLDeprecatedWrapper for details.
func MakeContainer(nl Interface) Container {
return Container{
w: errorutil.MakeTenantSQLDeprecatedWrapper(nl, nl != nil),
}
}
// OptionalErr returns the NodeLiveness instance if available. Otherwise, it
// returns an error referring to the optionally passed in issues.
//
// Use of NodeLiveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
func (nl *Container) OptionalErr(issue int) (Interface, error) {
v, err := nl.w.OptionalErr(issue)
if err != nil {
return nil, err
}
return v.(Interface), nil
}
var _ = (*Container)(nil).OptionalErr // silence unused lint
// Optional returns the NodeLiveness instance and true if available.
// Otherwise, returns nil and false. Prefer OptionalErr where possible.
//
// Use of NodeLiveness from within the SQL layer is **deprecated**. Please do
// not introduce new uses of it.
func (nl *Container) Optional(issue int) (Interface, bool) {
v, ok := nl.w.Optional()
if !ok {
return nil, false
}
return v.(Interface), true
}
| pkg/sql/optionalnodeliveness/node_liveness.go | 1 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.9977109432220459,
0.6578092575073242,
0.00016419343592133373,
0.9442753791809082,
0.42473095655441284
] |
{
"id": 11,
"code_window": [
"\tIsLive(roachpb.NodeID) (bool, error)\n",
"}\n",
"\n",
"// Container optionally gives access to liveness information about\n",
"// the KV nodes. It is typically not available to anyone but the system tenant.\n",
"type Container struct {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tScanNodeVitalityFromKV(ctx context.Context) (livenesspb.NodeVitalityMap, error)\n"
],
"file_path": "pkg/sql/optionalnodeliveness/node_liveness.go",
"type": "add",
"edit_start_line_idx": 29
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"io"
"github.com/cockroachdb/cockroach/pkg/sql/opt/optgen/lang"
)
// explorerGen generates code for the explorer, which searches for logically
// equivalent expressions and adds them to the memo.
type explorerGen struct {
compiled *lang.CompiledExpr
md *metadata
w *matchWriter
ruleGen newRuleGen
}
func (g *explorerGen) generate(compiled *lang.CompiledExpr, w io.Writer) {
g.compiled = compiled
g.md = newMetadata(compiled, "xform")
g.w = &matchWriter{writer: w}
g.ruleGen.init(compiled, g.md, g.w)
g.w.writeIndent("package xform\n\n")
g.w.nestIndent("import (\n")
g.w.writeIndent("\"github.com/cockroachdb/cockroach/pkg/sql/opt\"\n")
g.w.writeIndent("\"github.com/cockroachdb/cockroach/pkg/sql/opt/memo\"\n")
g.w.writeIndent("\"github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical\"\n")
g.w.writeIndent("\"github.com/cockroachdb/cockroach/pkg/sql/sem/tree\"\n")
g.w.unnest(")\n\n")
g.genDispatcher()
g.genRuleFuncs()
}
// genDispatcher generates a switch statement that calls an exploration method
// for each define statement that has an explore rule defined. The code is
// similar to this:
//
// func (_e *explorer) exploreGroupMember(
// state *exploreState,
// member memo.RelExpr,
// ordinal int,
// ) (_fullyExplored bool) {
// switch t := member.(type) {
// case *memo.ScanNode:
// return _e.exploreScan(state, t, ordinal)
// case *memo.SelectNode:
// return _e.exploreSelect(state, t, ordinal)
// }
//
// // No rules for other operator types.
// return true
// }
func (g *explorerGen) genDispatcher() {
g.w.nestIndent("func (_e *explorer) exploreGroupMember(\n")
g.w.writeIndent("state *exploreState,\n")
g.w.writeIndent("member memo.RelExpr,\n")
g.w.writeIndent("ordinal int,\n")
g.w.writeIndent("required *physical.Required,\n")
g.w.unnest(") (_fullyExplored bool)")
g.w.nest(" {\n")
g.w.writeIndent("switch t := member.(type) {\n")
for _, define := range g.compiled.Defines {
// Only include exploration rules.
rules := g.compiled.LookupMatchingRules(string(define.Name)).WithTag("Explore")
if len(rules) > 0 {
opTyp := g.md.typeOf(define)
format := "case *%s: return _e.explore%s(state, t, ordinal, required)\n"
g.w.writeIndent(format, opTyp.name, define.Name)
}
}
g.w.writeIndent("}\n\n")
g.w.writeIndent("// No rules for other operator types.\n")
g.w.writeIndent("return true\n")
g.w.unnest("}\n\n")
}
// genRuleFuncs generates a method for each operator that has at least one
// explore rule defined. The code is similar to this:
//
// func (_e *explorer) exploreScan(
// _rootState *exploreState,
// _root *memo.ScanNode,
// _rootOrd int,
// _required *physical.Required,
// ) (_fullyExplored bool) {
// _fullyExplored = true
//
// ... exploration rule code goes here ...
//
// return _fullyExplored
// }
func (g *explorerGen) genRuleFuncs() {
for _, define := range g.compiled.Defines {
rules := g.compiled.LookupMatchingRules(string(define.Name)).WithTag("Explore")
if len(rules) == 0 {
continue
}
opTyp := g.md.typeOf(define)
g.w.nestIndent("func (_e *explorer) explore%s(\n", define.Name)
g.w.writeIndent("_rootState *exploreState,\n")
g.w.writeIndent("_root *%s,\n", opTyp.name)
g.w.writeIndent("_rootOrd int,\n")
g.w.writeIndent("_required *physical.Required,\n")
g.w.unnest(") (_fullyExplored bool)")
g.w.nest(" {\n")
g.w.writeIndent("opt.MaybeInjectOptimizerTestingPanic(_e.ctx, _e.evalCtx)\n")
g.w.writeIndent("_fullyExplored = true\n\n")
sortRulesByPriority(rules)
for _, rule := range rules {
g.ruleGen.genRule(rule)
}
g.w.writeIndent("return _fullyExplored\n")
g.w.unnest("}\n\n")
}
}
| pkg/sql/opt/optgen/cmd/optgen/explorer_gen.go | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.00017800615751184523,
0.00016992910241242498,
0.0001660525449551642,
0.00016903458163142204,
0.000003622893927968107
] |
{
"id": 11,
"code_window": [
"\tIsLive(roachpb.NodeID) (bool, error)\n",
"}\n",
"\n",
"// Container optionally gives access to liveness information about\n",
"// the KV nodes. It is typically not available to anyone but the system tenant.\n",
"type Container struct {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tScanNodeVitalityFromKV(ctx context.Context) (livenesspb.NodeVitalityMap, error)\n"
],
"file_path": "pkg/sql/optionalnodeliveness/node_liveness.go",
"type": "add",
"edit_start_line_idx": 29
} | // Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
)
func (p *planner) updateComment(
ctx context.Context, objID descpb.ID, subID uint32, cmtType catalogkeys.CommentType, cmt string,
) error {
b := p.Txn().NewBatch()
if err := p.descCollection.WriteCommentToBatch(
ctx,
p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
b,
catalogkeys.MakeCommentKey(uint32(objID), subID, cmtType),
cmt,
); err != nil {
return err
}
return p.Txn().Run(ctx, b)
}
func (p *planner) deleteComment(
ctx context.Context, objID descpb.ID, subID uint32, cmtType catalogkeys.CommentType,
) error {
b := p.Txn().NewBatch()
if err := p.descCollection.DeleteCommentInBatch(
ctx,
p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
b,
catalogkeys.MakeCommentKey(uint32(objID), subID, cmtType),
); err != nil {
return err
}
return p.Txn().Run(ctx, b)
}
| pkg/sql/comment.go | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.00017830614524427801,
0.0001716896367724985,
0.00016472581773996353,
0.00017035122436936945,
0.000005362682259146823
] |
{
"id": 11,
"code_window": [
"\tIsLive(roachpb.NodeID) (bool, error)\n",
"}\n",
"\n",
"// Container optionally gives access to liveness information about\n",
"// the KV nodes. It is typically not available to anyone but the system tenant.\n",
"type Container struct {\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tScanNodeVitalityFromKV(ctx context.Context) (livenesspb.NodeVitalityMap, error)\n"
],
"file_path": "pkg/sql/optionalnodeliveness/node_liveness.go",
"type": "add",
"edit_start_line_idx": 29
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/testutils/datapathutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/datadriven"
)
// This test doctoring a secure cluster.
func TestDoctorCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
c := NewCLITest(TestCLIParams{T: t})
defer c.Cleanup()
// Introduce a corruption in the descriptor table by adding a table and
// removing its parent.
c.RunWithArgs([]string{"sql", "-e", strings.Join([]string{
"CREATE TABLE to_drop (id INT)",
"DROP TABLE to_drop",
"CREATE TABLE foo (id INT)",
"INSERT INTO system.users VALUES ('node', NULL, true, 3)",
"GRANT node TO root",
"DELETE FROM system.namespace WHERE name = 'foo'",
"SELECT pg_catalog.pg_sleep(1)",
}, ";\n"),
})
t.Run("examine", func(t *testing.T) {
out, err := c.RunWithCapture("debug doctor examine cluster")
if err != nil {
t.Fatal(err)
}
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "doctor", "test_examine_cluster"), func(t *testing.T, td *datadriven.TestData) string {
return out
})
})
}
// This test the operation of zip over secure clusters.
func TestDoctorZipDir(t *testing.T) {
defer leaktest.AfterTest(t)()
c := NewCLITest(TestCLIParams{T: t, NoServer: true})
defer c.Cleanup()
t.Run("examine", func(t *testing.T) {
out, err := c.RunWithCapture("debug doctor examine zipdir testdata/doctor/debugzip 21.1-52")
if err != nil {
t.Fatal(err)
}
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "doctor", "test_examine_zipdir"), func(t *testing.T, td *datadriven.TestData) string {
return out
})
})
t.Run("examine", func(t *testing.T) {
out, err := c.RunWithCapture("debug doctor examine zipdir testdata/doctor/debugzip-with-quotes")
if err != nil {
t.Fatal(err)
}
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "doctor", "test_examine_zipdir_with_quotes"), func(t *testing.T, td *datadriven.TestData) string {
return out
})
})
t.Run("recreate", func(t *testing.T) {
out, err := c.RunWithCapture("debug doctor recreate zipdir testdata/doctor/debugzip")
if err != nil {
t.Fatal(err)
}
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "doctor", "test_recreate_zipdir"), func(t *testing.T, td *datadriven.TestData) string {
return out
})
})
t.Run("recreate-json", func(t *testing.T) {
out, err := c.RunWithCapture("debug doctor recreate zipdir testdata/doctor/debugzip-json")
if err != nil {
t.Fatal(err)
}
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "doctor", "test_recreate_zipdir-json"), func(t *testing.T, td *datadriven.TestData) string {
return out
})
})
t.Run("deprecated doctor zipdir with verbose", func(t *testing.T) {
out, err := c.RunWithCapture("debug doctor zipdir testdata/doctor/debugzip 21.11-52 --verbose")
if err != nil {
t.Fatal(err)
}
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "doctor", "test_examine_zipdir_verbose"), func(t *testing.T, td *datadriven.TestData) string {
return out
})
})
}
| pkg/cli/doctor_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/bd4638acbc9359bc0a02bbd05adf63ccc09fcde5 | [
0.00017780462803784758,
0.0001714730024104938,
0.00016444428183604032,
0.00017303148342762142,
0.000003901026957464637
] |
{
"id": 0,
"code_window": [
"\treturn 0, 0, InvalidRange{}\n",
"}\n",
"\n",
"func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {\n",
"\tmetaHashes := make([]string, len(metaArr))\n",
"\th := sha256.New()\n",
"\tfor i, meta := range metaArr {\n",
"\t\tif meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (FileInfo, error) {\n",
"\t// with less quorum return error.\n",
"\tif quorum < 2 {\n",
"\t\treturn FileInfo{}, errErasureReadQuorum\n",
"\t}\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 241
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"net/http"
"sort"
"time"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/sync/errgroup"
)
const erasureAlgorithm = "rs-vandermonde"
// byObjectPartNumber is a collection satisfying sort.Interface.
type byObjectPartNumber []ObjectPartInfo
func (t byObjectPartNumber) Len() int { return len(t) }
func (t byObjectPartNumber) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t byObjectPartNumber) Less(i, j int) bool { return t[i].Number < t[j].Number }
// AddChecksumInfo adds a checksum of a part.
func (e *ErasureInfo) AddChecksumInfo(ckSumInfo ChecksumInfo) {
for i, sum := range e.Checksums {
if sum.PartNumber == ckSumInfo.PartNumber {
e.Checksums[i] = ckSumInfo
return
}
}
e.Checksums = append(e.Checksums, ckSumInfo)
}
// GetChecksumInfo - get checksum of a part.
func (e ErasureInfo) GetChecksumInfo(partNumber int) (ckSum ChecksumInfo) {
for _, sum := range e.Checksums {
if sum.PartNumber == partNumber {
// Return the checksum
return sum
}
}
return ChecksumInfo{}
}
// ShardFileSize - returns final erasure size from original size.
func (e ErasureInfo) ShardFileSize(totalLength int64) int64 {
if totalLength == 0 {
return 0
}
if totalLength == -1 {
return -1
}
numShards := totalLength / e.BlockSize
lastBlockSize := totalLength % e.BlockSize
lastShardSize := ceilFrac(lastBlockSize, int64(e.DataBlocks))
return numShards*e.ShardSize() + lastShardSize
}
// ShardSize - returns actual shared size from erasure blockSize.
func (e ErasureInfo) ShardSize() int64 {
return ceilFrac(e.BlockSize, int64(e.DataBlocks))
}
// IsValid - tells if erasure info fields are valid.
func (fi FileInfo) IsValid() bool {
if fi.Deleted {
// Delete marker has no data, no need to check
// for erasure coding information
return true
}
dataBlocks := fi.Erasure.DataBlocks
parityBlocks := fi.Erasure.ParityBlocks
correctIndexes := (fi.Erasure.Index > 0 &&
fi.Erasure.Index <= dataBlocks+parityBlocks &&
len(fi.Erasure.Distribution) == (dataBlocks+parityBlocks))
return ((dataBlocks >= parityBlocks) &&
(dataBlocks != 0) && (parityBlocks != 0) &&
correctIndexes)
}
// ToObjectInfo - Converts metadata to object info.
func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
object = decodeDirObject(object)
versionID := fi.VersionID
if (globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket)) && versionID == "" {
versionID = nullVersionID
}
objInfo := ObjectInfo{
IsDir: HasSuffix(object, SlashSeparator),
Bucket: bucket,
Name: object,
VersionID: versionID,
IsLatest: fi.IsLatest,
DeleteMarker: fi.Deleted,
Size: fi.Size,
ModTime: fi.ModTime,
Legacy: fi.XLV1,
ContentType: fi.Metadata["content-type"],
ContentEncoding: fi.Metadata["content-encoding"],
NumVersions: fi.NumVersions,
SuccessorModTime: fi.SuccessorModTime,
}
// Update expires
var (
t time.Time
e error
)
if exp, ok := fi.Metadata["expires"]; ok {
if t, e = time.Parse(http.TimeFormat, exp); e == nil {
objInfo.Expires = t.UTC()
}
}
objInfo.backendType = BackendErasure
// Extract etag from metadata.
objInfo.ETag = extractETag(fi.Metadata)
// Add user tags to the object info
tags := fi.Metadata[xhttp.AmzObjectTagging]
if len(tags) != 0 {
objInfo.UserTags = tags
}
// Add replication status to the object info
objInfo.ReplicationStatus = replication.StatusType(fi.Metadata[xhttp.AmzBucketReplicationStatus])
if fi.Deleted {
objInfo.ReplicationStatus = replication.StatusType(fi.DeleteMarkerReplicationStatus)
}
objInfo.TransitionStatus = fi.TransitionStatus
objInfo.transitionedObjName = fi.TransitionedObjName
objInfo.TransitionTier = fi.TransitionTier
// etag/md5Sum has already been extracted. We need to
// remove to avoid it from appearing as part of
// response headers. e.g, X-Minio-* or X-Amz-*.
// Tags have also been extracted, we remove that as well.
objInfo.UserDefined = cleanMetadata(fi.Metadata)
// All the parts per object.
objInfo.Parts = fi.Parts
// Update storage class
if sc, ok := fi.Metadata[xhttp.AmzStorageClass]; ok {
objInfo.StorageClass = sc
} else {
objInfo.StorageClass = globalMinioDefaultStorageClass
}
objInfo.VersionPurgeStatus = fi.VersionPurgeStatus
// set restore status for transitioned object
restoreHdr, ok := fi.Metadata[xhttp.AmzRestore]
if ok {
if restoreStatus, err := parseRestoreObjStatus(restoreHdr); err == nil {
objInfo.RestoreOngoing = restoreStatus.Ongoing()
objInfo.RestoreExpires, _ = restoreStatus.Expiry()
}
}
// Success.
return objInfo
}
// objectPartIndex - returns the index of matching object part number.
func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
for i, part := range parts {
if partNumber == part.Number {
return i
}
}
return -1
}
// AddObjectPart - add a new object part in order.
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize int64, actualSize int64) {
partInfo := ObjectPartInfo{
Number: partNumber,
ETag: partETag,
Size: partSize,
ActualSize: actualSize,
}
// Update part info if it already exists.
for i, part := range fi.Parts {
if partNumber == part.Number {
fi.Parts[i] = partInfo
return
}
}
// Proceed to include new part info.
fi.Parts = append(fi.Parts, partInfo)
// Parts in FileInfo should be in sorted order by part number.
sort.Sort(byObjectPartNumber(fi.Parts))
}
// ObjectToPartOffset - translate offset of an object to offset of its individual part.
func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) {
if offset == 0 {
// Special case - if offset is 0, then partIndex and partOffset are always 0.
return 0, 0, nil
}
partOffset = offset
// Seek until object offset maps to a particular part offset.
for i, part := range fi.Parts {
partIndex = i
// Offset is smaller than size we have reached the proper part offset.
if partOffset < part.Size {
return partIndex, partOffset, nil
}
// Continue to towards the next part.
partOffset -= part.Size
}
logger.LogIf(ctx, InvalidRange{})
// Offset beyond the size of the object return InvalidRange.
return 0, 0, InvalidRange{}
}
func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {
metaHashes := make([]string, len(metaArr))
h := sha256.New()
for i, meta := range metaArr {
if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir {
for _, part := range meta.Parts {
h.Write([]byte(fmt.Sprintf("part.%d", part.Number)))
}
h.Write([]byte(fmt.Sprintf("%v", meta.Erasure.Distribution)))
// make sure that length of Data is same
h.Write([]byte(fmt.Sprintf("%v", len(meta.Data))))
metaHashes[i] = hex.EncodeToString(h.Sum(nil))
h.Reset()
}
}
metaHashCountMap := make(map[string]int)
for _, hash := range metaHashes {
if hash == "" {
continue
}
metaHashCountMap[hash]++
}
maxHash := ""
maxCount := 0
for hash, count := range metaHashCountMap {
if count > maxCount {
maxCount = count
maxHash = hash
}
}
if maxCount < quorum {
return FileInfo{}, errErasureReadQuorum
}
for i, hash := range metaHashes {
if hash == maxHash {
return metaArr[i], nil
}
}
return FileInfo{}, errErasureReadQuorum
}
// pickValidFileInfo - picks one valid FileInfo content and returns from a
// slice of FileInfo.
func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {
return findFileInfoInQuorum(ctx, metaArr, modTime, dataDir, quorum)
}
// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.
func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) {
g := errgroup.WithNErrs(len(disks))
// Start writing `xl.meta` to all disks in parallel.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
// Pick one FileInfo for a disk at index.
fi := files[index]
fi.Erasure.Index = index + 1
if fi.IsValid() {
return disks[index].WriteMetadata(ctx, bucket, prefix, fi)
}
return errCorruptedFormat
}, index)
}
// Wait for all the routines.
mErrs := g.Wait()
err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum)
return evalDisks(disks, mErrs), err
}
// Returns per object readQuorum and writeQuorum
// readQuorum is the min required disks to read data.
// writeQuorum is the min required disks to write data.
func objectQuorumFromMeta(ctx context.Context, partsMetaData []FileInfo, errs []error, defaultParityCount int) (objectReadQuorum, objectWriteQuorum int, err error) {
// get the latest updated Metadata and a count of all the latest updated FileInfo(s)
latestFileInfo, err := getLatestFileInfo(ctx, partsMetaData, errs)
if err != nil {
return 0, 0, err
}
dataBlocks := latestFileInfo.Erasure.DataBlocks
parityBlocks := globalStorageClass.GetParityForSC(latestFileInfo.Metadata[xhttp.AmzStorageClass])
if parityBlocks <= 0 {
parityBlocks = defaultParityCount
}
writeQuorum := dataBlocks
if dataBlocks == parityBlocks {
writeQuorum++
}
// Since all the valid erasure code meta updated at the same time are equivalent, pass dataBlocks
// from latestFileInfo to get the quorum
return dataBlocks, writeQuorum, nil
}
| cmd/erasure-metadata.go | 1 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.9990906715393066,
0.1433478444814682,
0.00016670791956130415,
0.00027375557692721486,
0.34899652004241943
] |
{
"id": 0,
"code_window": [
"\treturn 0, 0, InvalidRange{}\n",
"}\n",
"\n",
"func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {\n",
"\tmetaHashes := make([]string, len(metaArr))\n",
"\th := sha256.New()\n",
"\tfor i, meta := range metaArr {\n",
"\t\tif meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (FileInfo, error) {\n",
"\t// with less quorum return error.\n",
"\tif quorum < 2 {\n",
"\t\treturn FileInfo{}, errErasureReadQuorum\n",
"\t}\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 241
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/http"
"path"
"strings"
"sync"
"github.com/minio/madmin-go"
"github.com/minio/minio-go/v7/pkg/tags"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/lifecycle"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/hash"
xioutil "github.com/minio/minio/pkg/ioutil"
"github.com/minio/minio/pkg/mimedb"
"github.com/minio/minio/pkg/sync/errgroup"
)
// list all errors which can be ignored in object operations.
var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk)
/// Object Operations
func countOnlineDisks(onlineDisks []StorageAPI) (online int) {
for _, onlineDisk := range onlineDisks {
if onlineDisk != nil && onlineDisk.IsOnline() {
online++
}
}
return online
}
// CopyObject - copy object source object to destination object.
// if source object and destination object are same we only
// update metadata.
func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, err error) {
// This call shouldn't be used for anything other than metadata updates or adding self referential versions.
if !srcInfo.metadataOnly {
return oi, NotImplemented{}
}
defer NSUpdated(dstBucket, dstObject)
if !dstOpts.NoLock {
lk := er.NewNSLock(dstBucket, dstObject)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return oi, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
}
// Read metadata associated with the object from all disks.
storageDisks := er.getDisks()
metaArr, errs := readAllFileInfo(ctx, storageDisks, srcBucket, srcObject, srcOpts.VersionID, true)
// get Quorum for this object
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
// List all online disks.
onlineDisks, modTime, dataDir := listOnlineDisks(storageDisks, metaArr, errs)
// Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum)
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
if fi.Deleted {
if srcOpts.VersionID == "" {
return oi, toObjectErr(errFileNotFound, srcBucket, srcObject)
}
return fi.ToObjectInfo(srcBucket, srcObject), toObjectErr(errMethodNotAllowed, srcBucket, srcObject)
}
versionID := srcInfo.VersionID
if srcInfo.versionOnly {
versionID = dstOpts.VersionID
// preserve destination versionId if specified.
if versionID == "" {
versionID = mustGetUUID()
}
modTime = UTCNow()
}
fi.VersionID = versionID // set any new versionID we might have created
fi.ModTime = modTime // set modTime for the new versionID
if !dstOpts.MTime.IsZero() {
modTime = dstOpts.MTime
fi.ModTime = dstOpts.MTime
}
fi.Metadata = srcInfo.UserDefined
srcInfo.UserDefined["etag"] = srcInfo.ETag
// Update `xl.meta` content on each disks.
for index := range metaArr {
if metaArr[index].IsValid() {
metaArr[index].ModTime = modTime
metaArr[index].VersionID = versionID
metaArr[index].Metadata = srcInfo.UserDefined
}
}
// Write unique `xl.meta` for each disk.
if _, err = writeUniqueFileInfo(ctx, onlineDisks, srcBucket, srcObject, metaArr, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
return fi.ToObjectInfo(srcBucket, srcObject), nil
}
// GetObjectNInfo - returns object info and an object
// Read(Closer). When err != nil, the returned reader is always nil.
func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
var unlockOnDefer bool
var nsUnlocker = func() {}
defer func() {
if unlockOnDefer {
nsUnlocker()
}
}()
// Acquire lock
if lockType != noLock {
lock := er.NewNSLock(bucket, object)
switch lockType {
case writeLock:
lkctx, err := lock.GetLock(ctx, globalOperationTimeout)
if err != nil {
return nil, err
}
ctx = lkctx.Context()
nsUnlocker = func() { lock.Unlock(lkctx.Cancel) }
case readLock:
lkctx, err := lock.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return nil, err
}
ctx = lkctx.Context()
nsUnlocker = func() { lock.RUnlock(lkctx.Cancel) }
}
unlockOnDefer = true
}
fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts, true)
if err != nil {
return nil, toObjectErr(err, bucket, object)
}
objInfo := fi.ToObjectInfo(bucket, object)
if objInfo.DeleteMarker {
if opts.VersionID == "" {
return &GetObjectReader{
ObjInfo: objInfo,
}, toObjectErr(errFileNotFound, bucket, object)
}
// Make sure to return object info to provide extra information.
return &GetObjectReader{
ObjInfo: objInfo,
}, toObjectErr(errMethodNotAllowed, bucket, object)
}
if objInfo.IsRemote() {
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, h, objInfo, opts)
if err != nil {
return nil, err
}
unlockOnDefer = false
return gr.WithCleanupFuncs(nsUnlocker), nil
}
fn, off, length, err := NewGetObjectReader(rs, objInfo, opts)
if err != nil {
return nil, err
}
unlockOnDefer = false
pr, pw := xioutil.WaitPipe()
go func() {
pw.CloseWithError(er.getObjectWithFileInfo(ctx, bucket, object, off, length, pw, fi, metaArr, onlineDisks))
}()
// Cleanup function to cause the go routine above to exit, in
// case of incomplete read.
pipeCloser := func() {
pr.CloseWithError(nil)
}
return fn(pr, h, opts.CheckPrecondFn, pipeCloser, nsUnlocker)
}
// GetObject - reads an object erasured coded across multiple
// disks. Supports additional parameters like offset and length
// which are synonymous with HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (er erasureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
// Lock the object before reading.
lk := er.NewNSLock(bucket, object)
lkctx, err := lk.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return err
}
ctx = lkctx.Context()
defer lk.RUnlock(lkctx.Cancel)
// Start offset cannot be negative.
if startOffset < 0 {
logger.LogIf(ctx, errUnexpected, logger.Application)
return errUnexpected
}
// Writer cannot be nil.
if writer == nil {
logger.LogIf(ctx, errUnexpected)
return errUnexpected
}
return er.getObject(ctx, bucket, object, startOffset, length, writer, opts)
}
func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error {
// Reorder online disks based on erasure distribution order.
// Reorder parts metadata based on erasure distribution order.
onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi)
// For negative length read everything.
if length < 0 {
length = fi.Size - startOffset
}
// Reply back invalid range if the input offset and length fall out of range.
if startOffset > fi.Size || startOffset+length > fi.Size {
logger.LogIf(ctx, InvalidRange{startOffset, length, fi.Size}, logger.Application)
return InvalidRange{startOffset, length, fi.Size}
}
// Get start part index and offset.
partIndex, partOffset, err := fi.ObjectToPartOffset(ctx, startOffset)
if err != nil {
return InvalidRange{startOffset, length, fi.Size}
}
// Calculate endOffset according to length
endOffset := startOffset
if length > 0 {
endOffset += length - 1
}
// Get last part index to read given length.
lastPartIndex, _, err := fi.ObjectToPartOffset(ctx, endOffset)
if err != nil {
return InvalidRange{startOffset, length, fi.Size}
}
var totalBytesRead int64
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
if err != nil {
return toObjectErr(err, bucket, object)
}
var healOnce sync.Once
// once we have obtained a common FileInfo i.e latest, we should stick
// to single dataDir to read the content to avoid reading from some other
// dataDir that has stale FileInfo{} to ensure that we fail appropriately
// during reads and expect the same dataDir everywhere.
dataDir := fi.DataDir
for ; partIndex <= lastPartIndex; partIndex++ {
if length == totalBytesRead {
break
}
partNumber := fi.Parts[partIndex].Number
// Save the current part name and size.
partSize := fi.Parts[partIndex].Size
partLength := partSize - partOffset
// partLength should be adjusted so that we don't write more data than what was requested.
if partLength > (length - totalBytesRead) {
partLength = length - totalBytesRead
}
tillOffset := erasure.ShardFileOffset(partOffset, partLength, partSize)
// Get the checksums of the current part.
readers := make([]io.ReaderAt, len(onlineDisks))
prefer := make([]bool, len(onlineDisks))
for index, disk := range onlineDisks {
if disk == OfflineDisk {
continue
}
if !metaArr[index].IsValid() {
continue
}
checksumInfo := metaArr[index].Erasure.GetChecksumInfo(partNumber)
partPath := pathJoin(object, dataDir, fmt.Sprintf("part.%d", partNumber))
readers[index] = newBitrotReader(disk, metaArr[index].Data, bucket, partPath, tillOffset,
checksumInfo.Algorithm, checksumInfo.Hash, erasure.ShardSize())
// Prefer local disks
prefer[index] = disk.Hostname() == ""
}
written, err := erasure.Decode(ctx, writer, readers, partOffset, partLength, partSize, prefer)
// Note: we should not be defer'ing the following closeBitrotReaders() call as
// we are inside a for loop i.e if we use defer, we would accumulate a lot of open files by the time
// we return from this function.
closeBitrotReaders(readers)
if err != nil {
// If we have successfully written all the content that was asked
// by the client, but we still see an error - this would mean
// that we have some parts or data blocks missing or corrupted
// - attempt a heal to successfully heal them for future calls.
if written == partLength {
var scan madmin.HealScanMode
if errors.Is(err, errFileNotFound) {
scan = madmin.HealNormalScan
logger.Info("Healing required, attempting to heal missing shards for %s", pathJoin(bucket, object, fi.VersionID))
} else if errors.Is(err, errFileCorrupt) {
scan = madmin.HealDeepScan
logger.Info("Healing required, attempting to heal bitrot for %s", pathJoin(bucket, object, fi.VersionID))
}
if scan == madmin.HealNormalScan || scan == madmin.HealDeepScan {
healOnce.Do(func() {
if _, healing := er.getOnlineDisksWithHealing(); !healing {
go healObject(bucket, object, fi.VersionID, scan)
}
})
}
}
if err != nil {
return toObjectErr(err, bucket, object)
}
}
for i, r := range readers {
if r == nil {
onlineDisks[i] = OfflineDisk
}
}
// Track total bytes read from disk and written to the client.
totalBytesRead += partLength
// partOffset will be valid only for the first part, hence reset it to 0 for
// the remaining parts.
partOffset = 0
} // End of read all parts loop.
// Return success.
return nil
}
// getObject wrapper for erasure GetObject
func (er erasureObjects) getObject(ctx context.Context, bucket, object string, startOffset, length int64, writer io.Writer, opts ObjectOptions) error {
fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts, true)
if err != nil {
return toObjectErr(err, bucket, object)
}
if fi.Deleted {
if opts.VersionID == "" {
return toObjectErr(errFileNotFound, bucket, object)
}
// Make sure to return object info to provide extra information.
return toObjectErr(errMethodNotAllowed, bucket, object)
}
return er.getObjectWithFileInfo(ctx, bucket, object, startOffset, length, writer, fi, metaArr, onlineDisks)
}
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (info ObjectInfo, err error) {
if !opts.NoLock {
// Lock the object before reading.
lk := er.NewNSLock(bucket, object)
lkctx, err := lk.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.RUnlock(lkctx.Cancel)
}
return er.getObjectInfo(ctx, bucket, object, opts)
}
func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object string, opts ObjectOptions, readData bool) (fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI, err error) {
disks := er.getDisks()
// Read metadata associated with the object from all disks.
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, readData)
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
if err != nil {
return fi, nil, nil, err
}
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
if reducedErr == errErasureReadQuorum && bucket != minioMetaBucket {
if _, ok := isObjectDangling(metaArr, errs, nil); ok {
reducedErr = errFileNotFound
if opts.VersionID != "" {
reducedErr = errFileVersionNotFound
}
// Remove the dangling object only when:
// - This is a non versioned bucket
// - This is a versioned bucket and the version ID is passed, the reason
// is that we cannot fetch the ID of the latest version when we don't trust xl.meta
if !opts.Versioned || opts.VersionID != "" {
er.deleteObjectVersion(ctx, bucket, object, 1, FileInfo{
Name: object,
VersionID: opts.VersionID,
}, false)
}
}
}
return fi, nil, nil, toObjectErr(reducedErr, bucket, object)
}
// List all online disks.
onlineDisks, modTime, dataDir := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
fi, err = pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum)
if err != nil {
return fi, nil, nil, err
}
// if one of the disk is offline, return right here no need
// to attempt a heal on the object.
if countErrs(errs, errDiskNotFound) > 0 {
return fi, metaArr, onlineDisks, nil
}
var missingBlocks int
for i, err := range errs {
if err != nil && errors.Is(err, errFileNotFound) {
missingBlocks++
continue
}
if metaArr[i].IsValid() && metaArr[i].ModTime.Equal(fi.ModTime) && metaArr[i].DataDir == fi.DataDir {
continue
}
missingBlocks++
}
// if missing metadata can be reconstructed, attempt to reconstruct.
if missingBlocks > 0 && missingBlocks < readQuorum {
if _, healing := er.getOnlineDisksWithHealing(); !healing {
go healObject(bucket, object, fi.VersionID, madmin.HealNormalScan)
}
}
return fi, metaArr, onlineDisks, nil
}
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false)
if err != nil {
return objInfo, toObjectErr(err, bucket, object)
}
objInfo = fi.ToObjectInfo(bucket, object)
if !fi.VersionPurgeStatus.Empty() {
// Make sure to return object info to provide extra information.
return objInfo, toObjectErr(errMethodNotAllowed, bucket, object)
}
if fi.Deleted {
if opts.VersionID == "" || opts.DeleteMarker {
return objInfo, toObjectErr(errFileNotFound, bucket, object)
}
// Make sure to return object info to provide extra information.
return objInfo, toObjectErr(errMethodNotAllowed, bucket, object)
}
return objInfo, nil
}
func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, errs []error) {
// Undo rename object on disks where RenameFile succeeded.
// If srcEntry/dstEntry are objects then add a trailing slash to copy
// over all the parts inside the object directory
if isDir {
srcEntry = retainSlash(srcEntry)
dstEntry = retainSlash(dstEntry)
}
g := errgroup.WithNErrs(len(disks))
for index, disk := range disks {
if disk == nil {
continue
}
index := index
g.Go(func() error {
if errs[index] == nil {
_ = disks[index].RenameFile(context.TODO(), dstBucket, dstEntry, srcBucket, srcEntry)
}
return nil
}, index)
}
g.Wait()
}
// Similar to rename but renames data from srcEntry to dstEntry at dataDir
func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry string, metadata []FileInfo, dstBucket, dstEntry string, writeQuorum int) ([]StorageAPI, error) {
defer NSUpdated(dstBucket, dstEntry)
g := errgroup.WithNErrs(len(disks))
// Rename file on all underlying storage disks.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
// Pick one FileInfo for a disk at index.
fi := metadata[index]
// Assign index when index is initialized
if fi.Erasure.Index == 0 {
fi.Erasure.Index = index + 1
}
if fi.IsValid() {
return disks[index].RenameData(ctx, srcBucket, srcEntry, fi, dstBucket, dstEntry)
}
return errFileCorrupt
}, index)
}
// Wait for all renames to finish.
errs := g.Wait()
// We can safely allow RenameFile errors up to len(er.getDisks()) - writeQuorum
// otherwise return failure. Cleanup successful renames.
err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
return evalDisks(disks, errs), err
}
// rename - common function that renamePart and renameObject use to rename
// the respective underlying storage layer representations.
func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, writeQuorum int, ignoredErr []error) ([]StorageAPI, error) {
if isDir {
dstEntry = retainSlash(dstEntry)
srcEntry = retainSlash(srcEntry)
}
defer NSUpdated(dstBucket, dstEntry)
g := errgroup.WithNErrs(len(disks))
// Rename file on all underlying storage disks.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
if err := disks[index].RenameFile(ctx, srcBucket, srcEntry, dstBucket, dstEntry); err != nil {
if !IsErrIgnored(err, ignoredErr...) {
return err
}
}
return nil
}, index)
}
// Wait for all renames to finish.
errs := g.Wait()
// We can safely allow RenameFile errors up to len(er.getDisks()) - writeQuorum
// otherwise return failure. Cleanup successful renames.
err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if err == errErasureWriteQuorum {
// Undo all the partial rename operations.
undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isDir, errs)
}
return evalDisks(disks, errs), err
}
// PutObject - creates an object upon reading from the input stream
// until EOF, erasure codes the data across all disk and additionally
// writes `xl.meta` which carries the necessary metadata for future
// object operations.
func (er erasureObjects) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
return er.putObject(ctx, bucket, object, data, opts)
}
// putObject wrapper for erasureObjects PutObject
func (er erasureObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
data := r.Reader
// No metadata is set, allocate a new one.
if opts.UserDefined == nil {
opts.UserDefined = make(map[string]string)
}
storageDisks := er.getDisks()
parityDrives := len(storageDisks) / 2
if !opts.MaxParity {
// Get parity and data drive count based on storage class metadata
parityDrives = globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass])
if parityDrives <= 0 {
parityDrives = er.defaultParityCount
}
}
dataDrives := len(storageDisks) - parityDrives
// we now know the number of blocks this object needs for data and parity.
// writeQuorum is dataBlocks + 1
writeQuorum := dataDrives
if dataDrives == parityDrives {
writeQuorum++
}
// Validate input data size and it can never be less than zero.
if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument, logger.Application)
return ObjectInfo{}, toObjectErr(errInvalidArgument)
}
// Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock).
// -- FIXME (this also causes performance issue when disks are down).
if opts.ParentIsObject != nil && opts.ParentIsObject(ctx, bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object)
}
// Initialize parts metadata
partsMetadata := make([]FileInfo, len(storageDisks))
fi := newFileInfo(pathJoin(bucket, object), dataDrives, parityDrives)
fi.VersionID = opts.VersionID
if opts.Versioned && fi.VersionID == "" {
fi.VersionID = mustGetUUID()
}
fi.DataDir = mustGetUUID()
uniqueID := mustGetUUID()
tempObj := uniqueID
// Initialize erasure metadata.
for index := range partsMetadata {
partsMetadata[index] = fi
}
// Order disks according to erasure distribution
var onlineDisks []StorageAPI
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(storageDisks, partsMetadata, fi)
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
var buffer []byte
switch size := data.Size(); {
case size == 0:
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
case size == -1:
if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize {
buffer = make([]byte, data.ActualSize()+256, data.ActualSize()*2+512)
} else {
buffer = er.bp.Get()
defer er.bp.Put(buffer)
}
case size >= fi.Erasure.BlockSize:
buffer = er.bp.Get()
defer er.bp.Put(buffer)
case size < fi.Erasure.BlockSize:
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
}
if len(buffer) > int(fi.Erasure.BlockSize) {
buffer = buffer[:fi.Erasure.BlockSize]
}
partName := "part.1"
tempErasureObj := pathJoin(uniqueID, fi.DataDir, partName)
// Delete temporary object in the event of failure.
// If PutObject succeeded there would be no temporary
// object to delete.
var online int
defer func() {
if online != len(onlineDisks) {
er.deleteObject(context.Background(), minioMetaTmpBucket, tempObj, writeQuorum)
}
}()
shardFileSize := erasure.ShardFileSize(data.Size())
writers := make([]io.Writer, len(onlineDisks))
var inlineBuffers []*bytes.Buffer
if shardFileSize >= 0 {
if !opts.Versioned && shardFileSize < smallFileThreshold {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
} else if shardFileSize < smallFileThreshold/8 {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
}
}
for i, disk := range onlineDisks {
if disk == nil {
continue
}
if len(inlineBuffers) > 0 {
inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, shardFileSize))
writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize())
continue
}
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, shardFileSize, DefaultBitrotAlgorithm, erasure.ShardSize())
}
n, erasureErr := erasure.Encode(ctx, data, writers, buffer, writeQuorum)
closeBitrotWriters(writers)
if erasureErr != nil {
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if n < data.Size() {
return ObjectInfo{}, IncompleteBody{Bucket: bucket, Object: object}
}
if !opts.NoLock {
lk := er.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
}
for i, w := range writers {
if w == nil {
onlineDisks[i] = nil
continue
}
if len(inlineBuffers) > 0 && inlineBuffers[i] != nil {
partsMetadata[i].Data = inlineBuffers[i].Bytes()
} else {
partsMetadata[i].Data = nil
}
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize())
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: 1,
Algorithm: DefaultBitrotAlgorithm,
Hash: bitrotWriterSum(w),
})
}
if opts.UserDefined["etag"] == "" {
opts.UserDefined["etag"] = r.MD5CurrentHexString()
}
// Guess content-type from the extension if possible.
if opts.UserDefined["content-type"] == "" {
opts.UserDefined["content-type"] = mimedb.TypeByExtension(path.Ext(object))
}
modTime := opts.MTime
if opts.MTime.IsZero() {
modTime = UTCNow()
}
// Fill all the necessary metadata.
// Update `xl.meta` content on each disks.
for index := range partsMetadata {
partsMetadata[index].Metadata = opts.UserDefined
partsMetadata[index].Size = n
partsMetadata[index].ModTime = modTime
}
// Rename the successfully written temporary object to final location.
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, bucket, object, writeQuorum); err != nil {
logger.LogIf(ctx, err)
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Whether a disk was initially or becomes offline
// during this upload, send it to the MRF list.
for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
continue
}
er.addPartial(bucket, object, fi.VersionID)
break
}
for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
fi = partsMetadata[i]
break
}
}
online = countOnlineDisks(onlineDisks)
return fi.ToObjectInfo(bucket, object), nil
}
func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object string, writeQuorum int, fi FileInfo, forceDelMarker bool) error {
disks := er.getDisks()
g := errgroup.WithNErrs(len(disks))
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
return disks[index].DeleteVersion(ctx, bucket, object, fi, forceDelMarker)
}, index)
}
// return errors if any during deletion
return reduceWriteQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, writeQuorum)
}
// deleteObject - wrapper for delete object, deletes an object from
// all the disks in parallel, including `xl.meta` associated with the
// object.
func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string, writeQuorum int) error {
var err error
disks := er.getDisks()
tmpObj := mustGetUUID()
if bucket == minioMetaTmpBucket {
tmpObj = object
} else {
// Rename the current object while requiring write quorum, but also consider
// that a non found object in a given disk as a success since it already
// confirms that the object doesn't have a part in that disk (already removed)
disks, err = rename(ctx, disks, bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum,
[]error{errFileNotFound})
if err != nil {
return toObjectErr(err, bucket, object)
}
}
g := errgroup.WithNErrs(len(disks))
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
return disks[index].Delete(ctx, minioMetaTmpBucket, tmpObj, true)
}, index)
}
// return errors if any during deletion
return reduceWriteQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, writeQuorum)
}
// DeleteObjects deletes objects/versions in bulk, this function will still automatically split objects list
// into smaller bulks if some object names are found to be duplicated in the delete list, splitting
// into smaller bulks will avoid holding twice the write lock of the duplicated object names.
func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) {
errs := make([]error, len(objects))
dobjects := make([]DeletedObject, len(objects))
writeQuorums := make([]int, len(objects))
storageDisks := er.getDisks()
for i := range objects {
// Assume (N/2 + 1) quorums for all objects
// this is a theoretical assumption such that
// for delete's we do not need to honor storage
// class for objects which have reduced quorum
// storage class only needs to be honored for
// Read() requests alone which we already do.
writeQuorums[i] = getWriteQuorum(len(storageDisks))
}
versions := make([]FileInfo, len(objects))
for i := range objects {
if objects[i].VersionID == "" {
modTime := opts.MTime
if opts.MTime.IsZero() {
modTime = UTCNow()
}
uuid := opts.VersionID
if uuid == "" {
uuid = mustGetUUID()
}
if opts.Versioned || opts.VersionSuspended {
versions[i] = FileInfo{
Name: objects[i].ObjectName,
ModTime: modTime,
Deleted: true, // delete marker
DeleteMarkerReplicationStatus: objects[i].DeleteMarkerReplicationStatus,
VersionPurgeStatus: objects[i].VersionPurgeStatus,
}
if opts.Versioned {
versions[i].VersionID = uuid
}
continue
}
}
versions[i] = FileInfo{
Name: objects[i].ObjectName,
VersionID: objects[i].VersionID,
DeleteMarkerReplicationStatus: objects[i].DeleteMarkerReplicationStatus,
VersionPurgeStatus: objects[i].VersionPurgeStatus,
}
}
// Initialize list of errors.
var delObjErrs = make([][]error, len(storageDisks))
var wg sync.WaitGroup
// Remove versions in bulk for each disk
for index, disk := range storageDisks {
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
if disk == nil {
delObjErrs[index] = make([]error, len(versions))
for i := range versions {
delObjErrs[index][i] = errDiskNotFound
}
return
}
delObjErrs[index] = disk.DeleteVersions(ctx, bucket, versions)
}(index, disk)
}
wg.Wait()
// Reduce errors for each object
for objIndex := range objects {
diskErrs := make([]error, len(storageDisks))
// Iterate over disks to fetch the error
// of deleting of the current object
for i := range delObjErrs {
// delObjErrs[i] is not nil when disks[i] is also not nil
if delObjErrs[i] != nil {
diskErrs[i] = delObjErrs[i][objIndex]
}
}
err := reduceWriteQuorumErrs(ctx, diskErrs, objectOpIgnoredErrs, writeQuorums[objIndex])
if objects[objIndex].VersionID != "" {
errs[objIndex] = toObjectErr(err, bucket, objects[objIndex].ObjectName, objects[objIndex].VersionID)
} else {
errs[objIndex] = toObjectErr(err, bucket, objects[objIndex].ObjectName)
}
if errs[objIndex] == nil {
NSUpdated(bucket, objects[objIndex].ObjectName)
}
if versions[objIndex].Deleted {
dobjects[objIndex] = DeletedObject{
DeleteMarker: versions[objIndex].Deleted,
DeleteMarkerVersionID: versions[objIndex].VersionID,
DeleteMarkerMTime: DeleteMarkerMTime{versions[objIndex].ModTime},
DeleteMarkerReplicationStatus: versions[objIndex].DeleteMarkerReplicationStatus,
ObjectName: versions[objIndex].Name,
VersionPurgeStatus: versions[objIndex].VersionPurgeStatus,
}
} else {
dobjects[objIndex] = DeletedObject{
ObjectName: versions[objIndex].Name,
VersionID: versions[objIndex].VersionID,
VersionPurgeStatus: versions[objIndex].VersionPurgeStatus,
DeleteMarkerReplicationStatus: versions[objIndex].DeleteMarkerReplicationStatus,
}
}
}
// Check failed deletes across multiple objects
for _, version := range versions {
// Check if there is any offline disk and add it to the MRF list
for _, disk := range storageDisks {
if disk != nil && disk.IsOnline() {
// Skip attempted heal on online disks.
continue
}
// all other direct versionId references we should
// ensure no dangling file is left over.
er.addPartial(bucket, version.Name, version.VersionID)
break
}
}
return dobjects, errs
}
// DeleteObject - deletes an object, this call doesn't necessary reply
// any error as it is not necessary for the handler to reply back a
// response to the client request.
func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
versionFound := true
objInfo = ObjectInfo{VersionID: opts.VersionID} // version id needed in Delete API response.
goi, gerr := er.GetObjectInfo(ctx, bucket, object, opts)
if gerr != nil && goi.Name == "" {
switch gerr.(type) {
case InsufficientReadQuorum:
return objInfo, InsufficientWriteQuorum{}
}
// For delete marker replication, versionID being replicated will not exist on disk
if opts.DeleteMarker {
versionFound = false
} else {
return objInfo, gerr
}
}
defer NSUpdated(bucket, object)
// Acquire a write lock before deleting the object.
lk := er.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
storageDisks := er.getDisks()
writeQuorum := len(storageDisks)/2 + 1
var markDelete bool
// Determine whether to mark object deleted for replication
if goi.VersionID != "" {
markDelete = true
}
// Default deleteMarker to true if object is under versioning
deleteMarker := opts.Versioned
if opts.VersionID != "" {
// case where replica version needs to be deleted on target cluster
if versionFound && opts.DeleteMarkerReplicationStatus == replication.Replica.String() {
markDelete = false
}
if opts.VersionPurgeStatus.Empty() && opts.DeleteMarkerReplicationStatus == "" {
markDelete = false
}
if opts.VersionPurgeStatus == Complete {
markDelete = false
}
// determine if the version represents an object delete
// deleteMarker = true
if versionFound && !goi.DeleteMarker { // implies a versioned delete of object
deleteMarker = false
}
}
modTime := opts.MTime
if opts.MTime.IsZero() {
modTime = UTCNow()
}
if markDelete {
if opts.Versioned || opts.VersionSuspended {
fi := FileInfo{
Name: object,
Deleted: deleteMarker,
MarkDeleted: markDelete,
ModTime: modTime,
DeleteMarkerReplicationStatus: opts.DeleteMarkerReplicationStatus,
VersionPurgeStatus: opts.VersionPurgeStatus,
TransitionStatus: opts.Transition.Status,
ExpireRestored: opts.Transition.ExpireRestored,
}
if opts.Versioned {
fi.VersionID = mustGetUUID()
if opts.VersionID != "" {
fi.VersionID = opts.VersionID
}
}
// versioning suspended means we add `null`
// version as delete marker
// Add delete marker, since we don't have any version specified explicitly.
// Or if a particular version id needs to be replicated.
if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, opts.DeleteMarker); err != nil {
return objInfo, toObjectErr(err, bucket, object)
}
return fi.ToObjectInfo(bucket, object), nil
}
}
// Delete the object version on all disks.
if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, FileInfo{
Name: object,
VersionID: opts.VersionID,
MarkDeleted: markDelete,
Deleted: deleteMarker,
ModTime: modTime,
DeleteMarkerReplicationStatus: opts.DeleteMarkerReplicationStatus,
VersionPurgeStatus: opts.VersionPurgeStatus,
TransitionStatus: opts.Transition.Status,
ExpireRestored: opts.Transition.ExpireRestored,
}, opts.DeleteMarker); err != nil {
return objInfo, toObjectErr(err, bucket, object)
}
for _, disk := range storageDisks {
if disk != nil && disk.IsOnline() {
continue
}
er.addPartial(bucket, object, opts.VersionID)
break
}
return ObjectInfo{
Bucket: bucket,
Name: object,
VersionID: opts.VersionID,
VersionPurgeStatus: opts.VersionPurgeStatus,
ReplicationStatus: replication.StatusType(opts.DeleteMarkerReplicationStatus),
}, nil
}
// Send the successful but partial upload/delete, however ignore
// if the channel is blocked by other items.
func (er erasureObjects) addPartial(bucket, object, versionID string) {
select {
case er.mrfOpCh <- partialOperation{bucket: bucket, object: object, versionID: versionID}:
default:
}
}
func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
// Lock the object before updating tags.
lk := er.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
disks := er.getDisks()
// Read metadata associated with the object from all disks.
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false)
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// List all online disks.
_, modTime, dataDir := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
if fi.Deleted {
if opts.VersionID == "" {
return ObjectInfo{}, toObjectErr(errFileNotFound, bucket, object)
}
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
}
for k, v := range opts.UserDefined {
fi.Metadata[k] = v
}
fi.ModTime = opts.MTime
fi.VersionID = opts.VersionID
if err = er.updateObjectMeta(ctx, bucket, object, fi); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
objInfo := fi.ToObjectInfo(bucket, object)
return objInfo, nil
}
// PutObjectTags - replace or add tags to an existing object
func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) {
// Lock the object before updating tags.
lk := er.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
disks := er.getDisks()
// Read metadata associated with the object from all disks.
metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false)
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// List all online disks.
_, modTime, dataDir := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
if fi.Deleted {
if opts.VersionID == "" {
return ObjectInfo{}, toObjectErr(errFileNotFound, bucket, object)
}
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
}
fi.Metadata[xhttp.AmzObjectTagging] = tags
for k, v := range opts.UserDefined {
fi.Metadata[k] = v
}
if err = er.updateObjectMeta(ctx, bucket, object, fi); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
return fi.ToObjectInfo(bucket, object), nil
}
// updateObjectMeta will update the metadata of a file.
func (er erasureObjects) updateObjectMeta(ctx context.Context, bucket, object string, fi FileInfo) error {
if len(fi.Metadata) == 0 {
return nil
}
disks := er.getDisks()
g := errgroup.WithNErrs(len(disks))
// Start writing `xl.meta` to all disks in parallel.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
return disks[index].UpdateMetadata(ctx, bucket, object, fi)
}, index)
}
// Wait for all the routines.
mErrs := g.Wait()
return reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, getWriteQuorum(len(disks)))
}
// DeleteObjectTags - delete object tags from an existing object
func (er erasureObjects) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
return er.PutObjectTags(ctx, bucket, object, "", opts)
}
// GetObjectTags - get object tags from an existing object
func (er erasureObjects) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) {
// GetObjectInfo will return tag value as well
oi, err := er.GetObjectInfo(ctx, bucket, object, opts)
if err != nil {
return nil, err
}
return tags.ParseObjectTags(oi.UserTags)
}
// TransitionObject - transition object content to target tier.
func (er erasureObjects) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
tgtClient, err := globalTierConfigMgr.getDriver(opts.Transition.Tier)
if err != nil {
return err
}
defer NSUpdated(bucket, object)
// Acquire write lock before starting to transition the object.
lk := er.NewNSLock(bucket, object)
lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout)
if err != nil {
return err
}
ctx = lkctx.Context()
defer lk.Unlock(lkctx.Cancel)
fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts, true)
if err != nil {
return toObjectErr(err, bucket, object)
}
if fi.Deleted {
if opts.VersionID == "" {
return toObjectErr(errFileNotFound, bucket, object)
}
// Make sure to return object info to provide extra information.
return toObjectErr(errMethodNotAllowed, bucket, object)
}
// verify that the object queued for transition is identical to that on disk.
if !opts.MTime.Equal(fi.ModTime) || !strings.EqualFold(opts.Transition.ETag, extractETag(fi.Metadata)) {
return toObjectErr(errFileNotFound, bucket, object)
}
// if object already transitioned, return
if fi.TransitionStatus == lifecycle.TransitionComplete {
return nil
}
if fi.XLV1 {
if _, err = er.HealObject(ctx, bucket, object, "", madmin.HealOpts{NoLock: true}); err != nil {
return err
}
// Fetch FileInfo again. HealObject migrates object the latest
// format. Among other things this changes fi.DataDir and
// possibly fi.Data (if data is inlined).
fi, metaArr, onlineDisks, err = er.getObjectFileInfo(ctx, bucket, object, opts, true)
if err != nil {
return toObjectErr(err, bucket, object)
}
}
destObj, err := genTransitionObjName()
if err != nil {
return err
}
pr, pw := xioutil.WaitPipe()
go func() {
err := er.getObjectWithFileInfo(ctx, bucket, object, 0, fi.Size, pw, fi, metaArr, onlineDisks)
pw.CloseWithError(err)
}()
err = tgtClient.Put(ctx, destObj, pr, fi.Size)
pr.CloseWithError(err)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to transition %s/%s(%s) to %s tier: %w", bucket, object, opts.VersionID, opts.Transition.Tier, err))
return err
}
fi.TransitionStatus = lifecycle.TransitionComplete
fi.TransitionedObjName = destObj
fi.TransitionTier = opts.Transition.Tier
eventName := event.ObjectTransitionComplete
storageDisks := er.getDisks()
writeQuorum := len(storageDisks)/2 + 1
if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, false); err != nil {
eventName = event.ObjectTransitionFailed
}
for _, disk := range storageDisks {
if disk != nil && disk.IsOnline() {
continue
}
er.addPartial(bucket, object, opts.VersionID)
break
}
// Notify object deleted event.
sendEvent(eventArgs{
EventName: eventName,
BucketName: bucket,
Object: ObjectInfo{
Name: object,
VersionID: opts.VersionID,
},
Host: "Internal: [ILM-Transition]",
})
return err
}
// RestoreTransitionedObject - restore transitioned object content locally on this cluster.
// This is similar to PostObjectRestore from AWS GLACIER
// storage class. When PostObjectRestore API is called, a temporary copy of the object
// is restored locally to the bucket on source cluster until the restore expiry date.
// The copy that was transitioned continues to reside in the transitioned tier.
func (er erasureObjects) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
return er.restoreTransitionedObject(ctx, bucket, object, opts)
}
// update restore status header in the metadata
func (er erasureObjects) updateRestoreMetadata(ctx context.Context, bucket, object string, objInfo ObjectInfo, opts ObjectOptions, rerr error) error {
oi := objInfo.Clone()
oi.metadataOnly = true // Perform only metadata updates.
if rerr == nil {
oi.UserDefined[xhttp.AmzRestore] = completedRestoreObj(opts.Transition.RestoreExpiry).String()
} else { // allow retry in the case of failure to restore
delete(oi.UserDefined, xhttp.AmzRestore)
}
if _, err := er.CopyObject(ctx, bucket, object, bucket, object, oi, ObjectOptions{
VersionID: oi.VersionID,
}, ObjectOptions{
VersionID: oi.VersionID,
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update transition restore metadata for %s/%s(%s): %s", bucket, object, oi.VersionID, err))
return err
}
return nil
}
// restoreTransitionedObject for multipart object chunks the file stream from remote tier into the same number of parts
// as in the xl.meta for this version and rehydrates the part.n into the fi.DataDir for this version as in the xl.meta
func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket string, object string, opts ObjectOptions) error {
setRestoreHeaderFn := func(oi ObjectInfo, rerr error) error {
er.updateRestoreMetadata(ctx, bucket, object, oi, opts, rerr)
return rerr
}
var oi ObjectInfo
// get the file info on disk for transitioned object
actualfi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false)
if err != nil {
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
oi = actualfi.ToObjectInfo(bucket, object)
ropts := putRestoreOpts(bucket, object, opts.Transition.RestoreRequest, oi)
if len(oi.Parts) == 1 {
var rs *HTTPRangeSpec
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, oi, opts)
if err != nil {
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
defer gr.Close()
hashReader, err := hash.NewReader(gr, gr.ObjInfo.Size, "", "", gr.ObjInfo.Size)
if err != nil {
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
pReader := NewPutObjReader(hashReader)
ropts.UserDefined[xhttp.AmzRestore] = completedRestoreObj(opts.Transition.RestoreExpiry).String()
_, err = er.PutObject(ctx, bucket, object, pReader, ropts)
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
uploadID, err := er.NewMultipartUpload(ctx, bucket, object, ropts)
if err != nil {
return setRestoreHeaderFn(oi, err)
}
var uploadedParts []CompletePart
var rs *HTTPRangeSpec
// get reader from the warm backend - note that even in the case of encrypted objects, this stream is still encrypted.
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, oi, opts)
if err != nil {
return setRestoreHeaderFn(oi, err)
}
defer gr.Close()
// rehydrate the parts back on disk as per the original xl.meta prior to transition
for _, partInfo := range oi.Parts {
hr, err := hash.NewReader(gr, partInfo.Size, "", "", partInfo.Size)
if err != nil {
return setRestoreHeaderFn(oi, err)
}
pInfo, err := er.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{})
if err != nil {
return setRestoreHeaderFn(oi, err)
}
if pInfo.Size != partInfo.Size {
return setRestoreHeaderFn(oi, InvalidObjectState{Bucket: bucket, Object: object})
}
uploadedParts = append(uploadedParts, CompletePart{
PartNumber: pInfo.PartNumber,
ETag: pInfo.ETag,
})
}
_, err = er.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{
MTime: oi.ModTime})
return setRestoreHeaderFn(oi, err)
}
| cmd/erasure-object.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.9939250349998474,
0.05497758463025093,
0.00016500406491104513,
0.00020312928245402873,
0.1980738788843155
] |
{
"id": 0,
"code_window": [
"\treturn 0, 0, InvalidRange{}\n",
"}\n",
"\n",
"func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {\n",
"\tmetaHashes := make([]string, len(metaArr))\n",
"\th := sha256.New()\n",
"\tfor i, meta := range metaArr {\n",
"\t\tif meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (FileInfo, error) {\n",
"\t// with less quorum return error.\n",
"\tif quorum < 2 {\n",
"\t\treturn FileInfo{}, errErasureReadQuorum\n",
"\t}\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 241
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mimedb
import "testing"
func TestMimeLookup(t *testing.T) {
// Test mimeLookup.
contentType := DB["txt"].ContentType
if contentType != "text/plain" {
t.Fatalf("Invalid content type are found expected \"application/x-msdownload\", got %s", contentType)
}
compressible := DB["txt"].Compressible
if compressible {
t.Fatalf("Invalid content type are found expected \"false\", got %t", compressible)
}
}
func TestTypeByExtension(t *testing.T) {
// Test TypeByExtension.
contentType := TypeByExtension(".txt")
if contentType != "text/plain" {
t.Fatalf("Invalid content type are found expected \"text/plain\", got %s", contentType)
}
// Test non-existent type resolution
contentType = TypeByExtension(".abc")
if contentType != "application/octet-stream" {
t.Fatalf("Invalid content type are found expected \"application/octet-stream\", got %s", contentType)
}
}
| pkg/mimedb/db_test.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.00017713283887133002,
0.00017063623818103224,
0.00016502043581567705,
0.00016917700122576207,
0.000004724313384940615
] |
{
"id": 0,
"code_window": [
"\treturn 0, 0, InvalidRange{}\n",
"}\n",
"\n",
"func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {\n",
"\tmetaHashes := make([]string, len(metaArr))\n",
"\th := sha256.New()\n",
"\tfor i, meta := range metaArr {\n",
"\t\tif meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (FileInfo, error) {\n",
"\t// with less quorum return error.\n",
"\tif quorum < 2 {\n",
"\t\treturn FileInfo{}, errErasureReadQuorum\n",
"\t}\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 241
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package iampolicy
import (
"encoding/json"
"fmt"
"sort"
"github.com/minio/minio-go/v7/pkg/set"
)
// ActionSet - set of actions.
type ActionSet map[Action]struct{}
// Clone clones ActionSet structure
func (actionSet ActionSet) Clone() ActionSet {
return NewActionSet(actionSet.ToSlice()...)
}
// Add - add action to the set.
func (actionSet ActionSet) Add(action Action) {
actionSet[action] = struct{}{}
}
// IsEmpty - returns if the current action set is empty
func (actionSet ActionSet) IsEmpty() bool {
return len(actionSet) == 0
}
// Match - matches object name with anyone of action pattern in action set.
func (actionSet ActionSet) Match(action Action) bool {
for r := range actionSet {
if r.Match(action) {
return true
}
// This is a special case where GetObjectVersion
// means GetObject is enabled implicitly.
switch r {
case GetObjectVersionAction:
if action == GetObjectAction {
return true
}
}
}
return false
}
// Equals - checks whether given action set is equal to current action set or not.
func (actionSet ActionSet) Equals(sactionSet ActionSet) bool {
// If length of set is not equal to length of given set, the
// set is not equal to given set.
if len(actionSet) != len(sactionSet) {
return false
}
// As both sets are equal in length, check each elements are equal.
for k := range actionSet {
if _, ok := sactionSet[k]; !ok {
return false
}
}
return true
}
// Intersection - returns actions available in both ActionSet.
func (actionSet ActionSet) Intersection(sset ActionSet) ActionSet {
nset := NewActionSet()
for k := range actionSet {
if _, ok := sset[k]; ok {
nset.Add(k)
}
}
return nset
}
// MarshalJSON - encodes ActionSet to JSON data.
func (actionSet ActionSet) MarshalJSON() ([]byte, error) {
if len(actionSet) == 0 {
return nil, Errorf("empty action set")
}
return json.Marshal(actionSet.ToSlice())
}
func (actionSet ActionSet) String() string {
actions := []string{}
for action := range actionSet {
actions = append(actions, string(action))
}
sort.Strings(actions)
return fmt.Sprintf("%v", actions)
}
// ToSlice - returns slice of actions from the action set.
func (actionSet ActionSet) ToSlice() []Action {
actions := []Action{}
for action := range actionSet {
actions = append(actions, action)
}
return actions
}
// ToAdminSlice - returns slice of admin actions from the action set.
func (actionSet ActionSet) ToAdminSlice() []AdminAction {
actions := []AdminAction{}
for action := range actionSet {
actions = append(actions, AdminAction(action))
}
return actions
}
// UnmarshalJSON - decodes JSON data to ActionSet.
func (actionSet *ActionSet) UnmarshalJSON(data []byte) error {
var sset set.StringSet
if err := json.Unmarshal(data, &sset); err != nil {
return err
}
if len(sset) == 0 {
return Errorf("empty action set")
}
*actionSet = make(ActionSet)
for _, s := range sset.ToSlice() {
actionSet.Add(Action(s))
}
return nil
}
// ValidateAdmin checks if all actions are valid Admin actions
func (actionSet ActionSet) ValidateAdmin() error {
for _, action := range actionSet.ToAdminSlice() {
if !action.IsValid() {
return Errorf("unsupported admin action '%v'", action)
}
}
return nil
}
// Validate checks if all actions are valid
func (actionSet ActionSet) Validate() error {
for _, action := range actionSet.ToSlice() {
if !action.IsValid() {
return Errorf("unsupported action '%v'", action)
}
}
return nil
}
// NewActionSet - creates new action set.
func NewActionSet(actions ...Action) ActionSet {
actionSet := make(ActionSet)
for _, action := range actions {
actionSet.Add(action)
}
return actionSet
}
| pkg/iam/policy/actionset.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0021339883096516132,
0.0003012221131939441,
0.00016523960221093148,
0.00016961473738774657,
0.0004378753947094083
] |
{
"id": 1,
"code_window": [
"\t}\n",
"\n",
"\tfor i, hash := range metaHashes {\n",
"\t\tif hash == maxHash {\n",
"\t\t\treturn metaArr[i], nil\n",
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif metaArr[i].IsValid() {\n",
"\t\t\t\treturn metaArr[i], nil\n",
"\t\t\t}\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 280
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"strconv"
"testing"
"time"
humanize "github.com/dustin/go-humanize"
)
const ActualSize = 1000
// Test FileInfo.AddObjectPart()
func TestAddObjectPart(t *testing.T) {
testCases := []struct {
partNum int
expectedIndex int
}{
{1, 0},
{2, 1},
{4, 2},
{5, 3},
{7, 4},
// Insert part.
{3, 2},
// Replace existing part.
{4, 3},
// Missing part.
{6, -1},
}
// Setup.
fi := newFileInfo("test-object", 8, 8)
fi.Erasure.Index = 1
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Test them.
for _, testCase := range testCases {
if testCase.expectedIndex > -1 {
partNumString := strconv.Itoa(testCase.partNum)
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
}
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
}
}
// Test objectPartIndex(). generates a sample FileInfo data and asserts
// the output of objectPartIndex() with the expected value.
func TestObjectPartIndex(t *testing.T) {
testCases := []struct {
partNum int
expectedIndex int
}{
{2, 1},
{1, 0},
{5, 3},
{4, 2},
{7, 4},
}
// Setup.
fi := newFileInfo("test-object", 8, 8)
fi.Erasure.Index = 1
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Add some parts for testing.
for _, testCase := range testCases {
partNumString := strconv.Itoa(testCase.partNum)
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
}
// Add failure test case.
testCases = append(testCases, struct {
partNum int
expectedIndex int
}{6, -1})
// Test them.
for _, testCase := range testCases {
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
}
}
// Test FileInfo.ObjectToPartOffset().
func TestObjectToPartOffset(t *testing.T) {
// Setup.
fi := newFileInfo("test-object", 8, 8)
fi.Erasure.Index = 1
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Add some parts for testing.
// Total size of all parts is 5,242,899 bytes.
for _, partNum := range []int{1, 2, 4, 5, 7} {
partNumString := strconv.Itoa(partNum)
fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize)
}
testCases := []struct {
offset int64
expectedIndex int
expectedOffset int64
expectedErr error
}{
{0, 0, 0, nil},
{1 * humanize.MiByte, 0, 1 * humanize.MiByte, nil},
{1 + humanize.MiByte, 1, 0, nil},
{2 + humanize.MiByte, 1, 1, nil},
// Its valid for zero sized object.
{-1, 0, -1, nil},
// Max fffset is always (size - 1).
{(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte) - 1, 4, 1048582, nil},
// Error if offset is size.
{(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte), 0, 0, InvalidRange{}},
}
// Test them.
for _, testCase := range testCases {
index, offset, err := fi.ObjectToPartOffset(context.Background(), testCase.offset)
if err != testCase.expectedErr {
t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err)
}
if index != testCase.expectedIndex {
t.Fatalf("%+v: index: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
if offset != testCase.expectedOffset {
t.Fatalf("%+v: offset: expected = %d, got: %d", testCase, testCase.expectedOffset, offset)
}
}
}
func TestFindFileInfoInQuorum(t *testing.T) {
getNFInfo := func(n int, quorum int, t int64, dataDir string) []FileInfo {
fi := newFileInfo("test", 8, 8)
fi.AddObjectPart(1, "etag", 100, 100)
fi.ModTime = time.Unix(t, 0)
fi.DataDir = dataDir
fis := make([]FileInfo, n)
for i := range fis {
fis[i] = fi
fis[i].Erasure.Index = i + 1
quorum--
if quorum == 0 {
break
}
}
return fis
}
tests := []struct {
fis []FileInfo
modTime time.Time
dataDir string
expectedErr error
}{
{
fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"),
modTime: time.Unix(1603863445, 0),
dataDir: "36a21454-a2ca-11eb-bbaa-93a81c686f21",
expectedErr: nil,
},
{
fis: getNFInfo(16, 7, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"),
modTime: time.Unix(1603863445, 0),
dataDir: "36a21454-a2ca-11eb-bbaa-93a81c686f21",
expectedErr: errErasureReadQuorum,
},
}
for _, test := range tests {
test := test
t.Run("", func(t *testing.T) {
_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, 8)
if err != test.expectedErr {
t.Errorf("Expected %s, got %s", test.expectedErr, err)
}
})
}
}
| cmd/erasure-metadata_test.go | 1 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.27777957916259766,
0.01338968425989151,
0.00016371723904740065,
0.00017113136709667742,
0.05911938101053238
] |
{
"id": 1,
"code_window": [
"\t}\n",
"\n",
"\tfor i, hash := range metaHashes {\n",
"\t\tif hash == maxHash {\n",
"\t\t\treturn metaArr[i], nil\n",
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif metaArr[i].IsValid() {\n",
"\t\t\t\treturn metaArr[i], nil\n",
"\t\t\t}\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 280
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package target
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"net"
"net/url"
"os"
"path/filepath"
"github.com/minio/minio/pkg/event"
xnet "github.com/minio/minio/pkg/net"
sarama "github.com/Shopify/sarama"
saramatls "github.com/Shopify/sarama/tools/tls"
)
// Kafka input constants
const (
KafkaBrokers = "brokers"
KafkaTopic = "topic"
KafkaQueueDir = "queue_dir"
KafkaQueueLimit = "queue_limit"
KafkaTLS = "tls"
KafkaTLSSkipVerify = "tls_skip_verify"
KafkaTLSClientAuth = "tls_client_auth"
KafkaSASL = "sasl"
KafkaSASLUsername = "sasl_username"
KafkaSASLPassword = "sasl_password"
KafkaSASLMechanism = "sasl_mechanism"
KafkaClientTLSCert = "client_tls_cert"
KafkaClientTLSKey = "client_tls_key"
KafkaVersion = "version"
EnvKafkaEnable = "MINIO_NOTIFY_KAFKA_ENABLE"
EnvKafkaBrokers = "MINIO_NOTIFY_KAFKA_BROKERS"
EnvKafkaTopic = "MINIO_NOTIFY_KAFKA_TOPIC"
EnvKafkaQueueDir = "MINIO_NOTIFY_KAFKA_QUEUE_DIR"
EnvKafkaQueueLimit = "MINIO_NOTIFY_KAFKA_QUEUE_LIMIT"
EnvKafkaTLS = "MINIO_NOTIFY_KAFKA_TLS"
EnvKafkaTLSSkipVerify = "MINIO_NOTIFY_KAFKA_TLS_SKIP_VERIFY"
EnvKafkaTLSClientAuth = "MINIO_NOTIFY_KAFKA_TLS_CLIENT_AUTH"
EnvKafkaSASLEnable = "MINIO_NOTIFY_KAFKA_SASL"
EnvKafkaSASLUsername = "MINIO_NOTIFY_KAFKA_SASL_USERNAME"
EnvKafkaSASLPassword = "MINIO_NOTIFY_KAFKA_SASL_PASSWORD"
EnvKafkaSASLMechanism = "MINIO_NOTIFY_KAFKA_SASL_MECHANISM"
EnvKafkaClientTLSCert = "MINIO_NOTIFY_KAFKA_CLIENT_TLS_CERT"
EnvKafkaClientTLSKey = "MINIO_NOTIFY_KAFKA_CLIENT_TLS_KEY"
EnvKafkaVersion = "MINIO_NOTIFY_KAFKA_VERSION"
)
// KafkaArgs - Kafka target arguments.
type KafkaArgs struct {
Enable bool `json:"enable"`
Brokers []xnet.Host `json:"brokers"`
Topic string `json:"topic"`
QueueDir string `json:"queueDir"`
QueueLimit uint64 `json:"queueLimit"`
Version string `json:"version"`
TLS struct {
Enable bool `json:"enable"`
RootCAs *x509.CertPool `json:"-"`
SkipVerify bool `json:"skipVerify"`
ClientAuth tls.ClientAuthType `json:"clientAuth"`
ClientTLSCert string `json:"clientTLSCert"`
ClientTLSKey string `json:"clientTLSKey"`
} `json:"tls"`
SASL struct {
Enable bool `json:"enable"`
User string `json:"username"`
Password string `json:"password"`
Mechanism string `json:"mechanism"`
} `json:"sasl"`
}
// Validate KafkaArgs fields
func (k KafkaArgs) Validate() error {
if !k.Enable {
return nil
}
if len(k.Brokers) == 0 {
return errors.New("no broker address found")
}
for _, b := range k.Brokers {
if _, err := xnet.ParseHost(b.String()); err != nil {
return err
}
}
if k.QueueDir != "" {
if !filepath.IsAbs(k.QueueDir) {
return errors.New("queueDir path should be absolute")
}
}
if k.Version != "" {
if _, err := sarama.ParseKafkaVersion(k.Version); err != nil {
return err
}
}
return nil
}
// KafkaTarget - Kafka target.
type KafkaTarget struct {
id event.TargetID
args KafkaArgs
producer sarama.SyncProducer
config *sarama.Config
store Store
loggerOnce func(ctx context.Context, err error, id interface{}, errKind ...interface{})
}
// ID - returns target ID.
func (target *KafkaTarget) ID() event.TargetID {
return target.id
}
// HasQueueStore - Checks if the queueStore has been configured for the target
func (target *KafkaTarget) HasQueueStore() bool {
return target.store != nil
}
// IsActive - Return true if target is up and active
func (target *KafkaTarget) IsActive() (bool, error) {
if !target.args.pingBrokers() {
return false, errNotConnected
}
return true, nil
}
// Save - saves the events to the store which will be replayed when the Kafka connection is active.
func (target *KafkaTarget) Save(eventData event.Event) error {
if target.store != nil {
return target.store.Put(eventData)
}
_, err := target.IsActive()
if err != nil {
return err
}
return target.send(eventData)
}
// send - sends an event to the kafka.
func (target *KafkaTarget) send(eventData event.Event) error {
if target.producer == nil {
return errNotConnected
}
objectName, err := url.QueryUnescape(eventData.S3.Object.Key)
if err != nil {
return err
}
key := eventData.S3.Bucket.Name + "/" + objectName
data, err := json.Marshal(event.Log{EventName: eventData.EventName, Key: key, Records: []event.Event{eventData}})
if err != nil {
return err
}
msg := sarama.ProducerMessage{
Topic: target.args.Topic,
Key: sarama.StringEncoder(key),
Value: sarama.ByteEncoder(data),
}
_, _, err = target.producer.SendMessage(&msg)
return err
}
// Send - reads an event from store and sends it to Kafka.
func (target *KafkaTarget) Send(eventKey string) error {
var err error
_, err = target.IsActive()
if err != nil {
return err
}
if target.producer == nil {
brokers := []string{}
for _, broker := range target.args.Brokers {
brokers = append(brokers, broker.String())
}
target.producer, err = sarama.NewSyncProducer(brokers, target.config)
if err != nil {
if err != sarama.ErrOutOfBrokers {
return err
}
return errNotConnected
}
}
eventData, eErr := target.store.Get(eventKey)
if eErr != nil {
// The last event key in a successful batch will be sent in the channel atmost once by the replayEvents()
// Such events will not exist and wouldve been already been sent successfully.
if os.IsNotExist(eErr) {
return nil
}
return eErr
}
err = target.send(eventData)
if err != nil {
// Sarama opens the ciruit breaker after 3 consecutive connection failures.
if err == sarama.ErrLeaderNotAvailable || err.Error() == "circuit breaker is open" {
return errNotConnected
}
return err
}
// Delete the event from store.
return target.store.Del(eventKey)
}
// Close - closes underneath kafka connection.
func (target *KafkaTarget) Close() error {
if target.producer != nil {
return target.producer.Close()
}
return nil
}
// Check if atleast one broker in cluster is active
func (k KafkaArgs) pingBrokers() bool {
for _, broker := range k.Brokers {
_, dErr := net.Dial("tcp", broker.String())
if dErr == nil {
return true
}
}
return false
}
// NewKafkaTarget - creates new Kafka target with auth credentials.
func NewKafkaTarget(id string, args KafkaArgs, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{}, kind ...interface{}), test bool) (*KafkaTarget, error) {
config := sarama.NewConfig()
target := &KafkaTarget{
id: event.TargetID{ID: id, Name: "kafka"},
args: args,
loggerOnce: loggerOnce,
}
if args.Version != "" {
kafkaVersion, err := sarama.ParseKafkaVersion(args.Version)
if err != nil {
target.loggerOnce(context.Background(), err, target.ID())
return target, err
}
config.Version = kafkaVersion
}
config.Net.SASL.User = args.SASL.User
config.Net.SASL.Password = args.SASL.Password
initScramClient(args, config) // initializes configured scram client.
config.Net.SASL.Enable = args.SASL.Enable
tlsConfig, err := saramatls.NewConfig(args.TLS.ClientTLSCert, args.TLS.ClientTLSKey)
if err != nil {
target.loggerOnce(context.Background(), err, target.ID())
return target, err
}
config.Net.TLS.Enable = args.TLS.Enable
config.Net.TLS.Config = tlsConfig
config.Net.TLS.Config.InsecureSkipVerify = args.TLS.SkipVerify
config.Net.TLS.Config.ClientAuth = args.TLS.ClientAuth
config.Net.TLS.Config.RootCAs = args.TLS.RootCAs
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Retry.Max = 10
config.Producer.Return.Successes = true
target.config = config
brokers := []string{}
for _, broker := range args.Brokers {
brokers = append(brokers, broker.String())
}
var store Store
if args.QueueDir != "" {
queueDir := filepath.Join(args.QueueDir, storePrefix+"-kafka-"+id)
store = NewQueueStore(queueDir, args.QueueLimit)
if oErr := store.Open(); oErr != nil {
target.loggerOnce(context.Background(), oErr, target.ID())
return target, oErr
}
target.store = store
}
producer, err := sarama.NewSyncProducer(brokers, config)
if err != nil {
if store == nil || err != sarama.ErrOutOfBrokers {
target.loggerOnce(context.Background(), err, target.ID())
return target, err
}
}
target.producer = producer
if target.store != nil && !test {
// Replays the events from the store.
eventKeyCh := replayEvents(target.store, doneCh, target.loggerOnce, target.ID())
// Start replaying events from the store.
go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce)
}
return target, nil
}
| pkg/event/target/kafka.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0008803260279819369,
0.00020261455210857093,
0.0001635531079955399,
0.0001712637604214251,
0.00012921140296384692
] |
{
"id": 1,
"code_window": [
"\t}\n",
"\n",
"\tfor i, hash := range metaHashes {\n",
"\t\tif hash == maxHash {\n",
"\t\t\treturn metaArr[i], nil\n",
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif metaArr[i].IsValid() {\n",
"\t\t\t\treturn metaArr[i], nil\n",
"\t\t\t}\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 280
} | -----BEGIN CERTIFICATE-----
MIIDqjCCApKgAwIBAgIJAOcv4FsrflS4MA0GCSqGSIb3DQEBCwUAMGoxCzAJBgNV
BAYTAlVTMQswCQYDVQQIDAJDQTEVMBMGA1UEBwwMUmVkd29vZCBDaXR5MQ4wDAYD
VQQKDAVNaW5pbzEUMBIGA1UECwwLRW5naW5lZXJpbmcxETAPBgNVBAMMCG1pbmlv
LmlvMB4XDTE4MDUyMDA4NDc0MFoXDTE5MDUyMDA4NDc0MFowajELMAkGA1UEBhMC
VVMxCzAJBgNVBAgMAkNBMRUwEwYDVQQHDAxSZWR3b29kIENpdHkxDjAMBgNVBAoM
BU1pbmlvMRQwEgYDVQQLDAtFbmdpbmVlcmluZzERMA8GA1UEAwwIbWluaW8uaW8w
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPszxaYwn+mIz6IGuUlmvW
wUs/yWTH4MC17qey2N5MqcxlfIWHUugcBsbGhi/e1druFW0s7YGMxp+G+Q1IezxX
+VmVaJCN8AgSowbYgpRdpRQ+mhGeQby0JcvO16fyPnUJBz3GGel2bcK8fcQyT0TV
apCiD9oURVmdvDSsRXz+EoPlOve8AWciHHgm1ItO5qdPRP5YtcJfLiwKnoYnpda2
d9SzmYk+Q2JFArooF7/A1DYz9bXCMo3qp0gQlMpSMDR+MCbxHBzBBr+fQG8QdDrz
WQ2slhniBhFDk0LuPCBLlSeIzkp+DoAGDXf3hWYhechlabZ7nfngg5erEz776WCF
AgMBAAGjUzBRMB0GA1UdDgQWBBRzC09a+3AlbFDg6BsvELolmO8jYjAfBgNVHSME
GDAWgBRzC09a+3AlbFDg6BsvELolmO8jYjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
SIb3DQEBCwUAA4IBAQBl0cx7qbidKjhoZ1Iv4pCD8xHZgtuWEDApPoGuMtVS66jJ
+oj0ncD5xCtv9XqXtshE65FIsEWnDOIwa+kyjMnxHbFwxveWBT4W0twtqwbVs7NE
I0So6cEmSx4+rB0XorY6mIbD3O9YAStelNhB1jVfQfIMSByYkcGq2Fh+B1LHlOrz
06LJdwYMiILzK0c5fvjZvsDq/9EK+Xo66hphKjs5cl1t9WK7wKOCoZDt2lOTZqEq
UWYGPWlTAxSWQxO4WnvSKqFdsRi8fOO3KlDq1eNqeDSGGCI0DTGgJxidHIpfOPEF
s/zojgc5npE32/1n8og6gLcv7LIKelBfMhUrFTp7
-----END CERTIFICATE-----
| pkg/certs/public.crt | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.00899419654160738,
0.0031662918627262115,
0.00016755062097217888,
0.0003371285565663129,
0.0041215321980416775
] |
{
"id": 1,
"code_window": [
"\t}\n",
"\n",
"\tfor i, hash := range metaHashes {\n",
"\t\tif hash == maxHash {\n",
"\t\t\treturn metaArr[i], nil\n",
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif metaArr[i].IsValid() {\n",
"\t\t\t\treturn metaArr[i], nil\n",
"\t\t\t}\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 280
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"net/http"
"strings"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config/identity/openid"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/wildcard"
)
const (
// STS API version.
stsAPIVersion = "2011-06-15"
stsVersion = "Version"
stsAction = "Action"
stsPolicy = "Policy"
stsToken = "Token"
stsWebIdentityToken = "WebIdentityToken"
stsDurationSeconds = "DurationSeconds"
stsLDAPUsername = "LDAPUsername"
stsLDAPPassword = "LDAPPassword"
// STS API action constants
clientGrants = "AssumeRoleWithClientGrants"
webIdentity = "AssumeRoleWithWebIdentity"
ldapIdentity = "AssumeRoleWithLDAPIdentity"
assumeRole = "AssumeRole"
stsRequestBodyLimit = 10 * (1 << 20) // 10 MiB
// JWT claim keys
expClaim = "exp"
subClaim = "sub"
issClaim = "iss"
// JWT claim to check the parent user
parentClaim = "parent"
// LDAP claim keys
ldapUser = "ldapUser"
)
// stsAPIHandlers implements and provides http handlers for AWS STS API.
type stsAPIHandlers struct{}
// registerSTSRouter - registers AWS STS compatible APIs.
func registerSTSRouter(router *mux.Router) {
// Initialize STS.
sts := &stsAPIHandlers{}
// STS Router
stsRouter := router.NewRoute().PathPrefix(SlashSeparator).Subrouter()
// Assume roles with no JWT, handles AssumeRole.
stsRouter.Methods(http.MethodPost).MatcherFunc(func(r *http.Request, rm *mux.RouteMatch) bool {
ctypeOk := wildcard.MatchSimple("application/x-www-form-urlencoded*", r.Header.Get(xhttp.ContentType))
authOk := wildcard.MatchSimple(signV4Algorithm+"*", r.Header.Get(xhttp.Authorization))
noQueries := len(r.URL.Query()) == 0
return ctypeOk && authOk && noQueries
}).HandlerFunc(httpTraceAll(sts.AssumeRole))
// Assume roles with JWT handler, handles both ClientGrants and WebIdentity.
stsRouter.Methods(http.MethodPost).MatcherFunc(func(r *http.Request, rm *mux.RouteMatch) bool {
ctypeOk := wildcard.MatchSimple("application/x-www-form-urlencoded*", r.Header.Get(xhttp.ContentType))
noQueries := len(r.URL.Query()) == 0
return ctypeOk && noQueries
}).HandlerFunc(httpTraceAll(sts.AssumeRoleWithSSO))
// AssumeRoleWithClientGrants
stsRouter.Methods(http.MethodPost).HandlerFunc(httpTraceAll(sts.AssumeRoleWithClientGrants)).
Queries(stsAction, clientGrants).
Queries(stsVersion, stsAPIVersion).
Queries(stsToken, "{Token:.*}")
// AssumeRoleWithWebIdentity
stsRouter.Methods(http.MethodPost).HandlerFunc(httpTraceAll(sts.AssumeRoleWithWebIdentity)).
Queries(stsAction, webIdentity).
Queries(stsVersion, stsAPIVersion).
Queries(stsWebIdentityToken, "{Token:.*}")
// AssumeRoleWithLDAPIdentity
stsRouter.Methods(http.MethodPost).HandlerFunc(httpTraceAll(sts.AssumeRoleWithLDAPIdentity)).
Queries(stsAction, ldapIdentity).
Queries(stsVersion, stsAPIVersion).
Queries(stsLDAPUsername, "{LDAPUsername:.*}").
Queries(stsLDAPPassword, "{LDAPPassword:.*}")
}
func checkAssumeRoleAuth(ctx context.Context, r *http.Request) (user auth.Credentials, isErrCodeSTS bool, stsErr STSErrorCode) {
switch getRequestAuthType(r) {
default:
return user, true, ErrSTSAccessDenied
case authTypeSigned:
s3Err := isReqAuthenticated(ctx, r, globalServerRegion, serviceSTS)
if s3Err != ErrNone {
return user, false, STSErrorCode(s3Err)
}
user, _, s3Err = getReqAccessKeyV4(r, globalServerRegion, serviceSTS)
if s3Err != ErrNone {
return user, false, STSErrorCode(s3Err)
}
// Temporary credentials or Service accounts cannot generate further temporary credentials.
if user.IsTemp() || user.IsServiceAccount() {
return user, true, ErrSTSAccessDenied
}
}
// Session tokens are not allowed in STS AssumeRole requests.
if getSessionToken(r) != "" {
return user, true, ErrSTSAccessDenied
}
return user, true, ErrSTSNone
}
// AssumeRole - implementation of AWS STS API AssumeRole to get temporary
// credentials for regular users on Minio.
// https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html
func (sts *stsAPIHandlers) AssumeRole(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AssumeRole")
user, isErrCodeSTS, stsErr := checkAssumeRoleAuth(ctx, r)
if stsErr != ErrSTSNone {
writeSTSErrorResponse(ctx, w, isErrCodeSTS, stsErr, nil)
return
}
if err := r.ParseForm(); err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
return
}
if r.Form.Get(stsVersion) != stsAPIVersion {
writeSTSErrorResponse(ctx, w, true, ErrSTSMissingParameter, fmt.Errorf("Invalid STS API version %s, expecting %s", r.Form.Get(stsVersion), stsAPIVersion))
return
}
action := r.Form.Get(stsAction)
switch action {
case assumeRole:
default:
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, fmt.Errorf("Unsupported action %s", action))
return
}
ctx = newContext(r, w, action)
defer logger.AuditLog(ctx, w, r, nil)
sessionPolicyStr := r.Form.Get(stsPolicy)
// https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html
// The plain text that you use for both inline and managed session
// policies shouldn't exceed 2048 characters.
if len(sessionPolicyStr) > 2048 {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, fmt.Errorf("Session policy shouldn't exceed 2048 characters"))
return
}
if len(sessionPolicyStr) > 0 {
sessionPolicy, err := iampolicy.ParseConfig(bytes.NewReader([]byte(sessionPolicyStr)))
if err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
return
}
// Version in policy must not be empty
if sessionPolicy.Version == "" {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, fmt.Errorf("Version cannot be empty expecting '2012-10-17'"))
return
}
}
var err error
m := make(map[string]interface{})
m[expClaim], err = openid.GetDefaultExpiration(r.Form.Get(stsDurationSeconds))
if err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
return
}
policies, err := globalIAMSys.PolicyDBGet(user.AccessKey, false)
if err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
return
}
policyName := strings.Join(policies, ",")
// This policy is the policy associated with the user
// requesting for temporary credentials. The temporary
// credentials will inherit the same policy requirements.
m[iamPolicyClaimNameOpenID()] = policyName
if len(sessionPolicyStr) > 0 {
m[iampolicy.SessionPolicyName] = base64.StdEncoding.EncodeToString([]byte(sessionPolicyStr))
}
secret := globalActiveCred.SecretKey
cred, err := auth.GetNewCredentialsWithMetadata(m, secret)
if err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInternalError, err)
return
}
// Set the parent of the temporary access key, this is useful
// in obtaining service accounts by this cred.
cred.ParentUser = user.AccessKey
// Set the newly generated credentials.
if err = globalIAMSys.SetTempUser(cred.AccessKey, cred, policyName); err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInternalError, err)
return
}
// Notify all other MinIO peers to reload temp users
for _, nerr := range globalNotificationSys.LoadUser(cred.AccessKey, true) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
assumeRoleResponse := &AssumeRoleResponse{
Result: AssumeRoleResult{
Credentials: cred,
},
}
assumeRoleResponse.ResponseMetadata.RequestID = w.Header().Get(xhttp.AmzRequestID)
writeSuccessResponseXML(w, encodeResponse(assumeRoleResponse))
}
func (sts *stsAPIHandlers) AssumeRoleWithSSO(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AssumeRoleSSOCommon")
// Parse the incoming form data.
if err := r.ParseForm(); err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
return
}
if r.Form.Get(stsVersion) != stsAPIVersion {
writeSTSErrorResponse(ctx, w, true, ErrSTSMissingParameter, fmt.Errorf("Invalid STS API version %s, expecting %s", r.Form.Get("Version"), stsAPIVersion))
return
}
action := r.Form.Get(stsAction)
switch action {
case ldapIdentity:
sts.AssumeRoleWithLDAPIdentity(w, r)
return
case clientGrants, webIdentity:
default:
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, fmt.Errorf("Unsupported action %s", action))
return
}
ctx = newContext(r, w, action)
defer logger.AuditLog(ctx, w, r, nil)
if globalOpenIDValidators == nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSNotInitialized, errServerNotInitialized)
return
}
v, err := globalOpenIDValidators.Get("jwt")
if err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
return
}
token := r.Form.Get(stsToken)
if token == "" {
token = r.Form.Get(stsWebIdentityToken)
}
m, err := v.Validate(token, r.Form.Get(stsDurationSeconds))
if err != nil {
switch err {
case openid.ErrTokenExpired:
switch action {
case clientGrants:
writeSTSErrorResponse(ctx, w, true, ErrSTSClientGrantsExpiredToken, err)
case webIdentity:
writeSTSErrorResponse(ctx, w, true, ErrSTSWebIdentityExpiredToken, err)
}
return
case auth.ErrInvalidDuration:
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
return
}
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
return
}
var subFromToken string
if v, ok := m[subClaim]; ok {
subFromToken, _ = v.(string)
}
if subFromToken == "" {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, errors.New("STS JWT Token has `sub` claim missing, `sub` claim is mandatory"))
return
}
var issFromToken string
if v, ok := m[issClaim]; ok {
issFromToken, _ = v.(string)
}
// JWT has requested a custom claim with policy value set.
// This is a MinIO STS API specific value, this value should
// be set and configured on your identity provider as part of
// JWT custom claims.
var policyName string
policySet, ok := iampolicy.GetPoliciesFromClaims(m, iamPolicyClaimNameOpenID())
if ok {
policyName = globalIAMSys.CurrentPolicies(strings.Join(policySet.ToSlice(), ","))
}
if policyName == "" && globalPolicyOPA == nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue,
fmt.Errorf("%s claim missing from the JWT token, credentials will not be generated", iamPolicyClaimNameOpenID()))
return
}
m[iamPolicyClaimNameOpenID()] = policyName
sessionPolicyStr := r.Form.Get(stsPolicy)
// https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html
// The plain text that you use for both inline and managed session
// policies shouldn't exceed 2048 characters.
if len(sessionPolicyStr) > 2048 {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, fmt.Errorf("Session policy should not exceed 2048 characters"))
return
}
if len(sessionPolicyStr) > 0 {
sessionPolicy, err := iampolicy.ParseConfig(bytes.NewReader([]byte(sessionPolicyStr)))
if err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
return
}
// Version in policy must not be empty
if sessionPolicy.Version == "" {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, fmt.Errorf("Invalid session policy version"))
return
}
m[iampolicy.SessionPolicyName] = base64.StdEncoding.EncodeToString([]byte(sessionPolicyStr))
}
secret := globalActiveCred.SecretKey
cred, err := auth.GetNewCredentialsWithMetadata(m, secret)
if err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInternalError, err)
return
}
// https://openid.net/specs/openid-connect-core-1_0.html#ClaimStability
// claim is only considered stable when subject and iss are used together
// this is to ensure that ParentUser doesn't change and we get to use
// parentUser as per the requirements for service accounts for OpenID
// based logins.
cred.ParentUser = "jwt:" + subFromToken + ":" + issFromToken
// Set the newly generated credentials.
if err = globalIAMSys.SetTempUser(cred.AccessKey, cred, policyName); err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInternalError, err)
return
}
// Notify all other MinIO peers to reload temp users
for _, nerr := range globalNotificationSys.LoadUser(cred.AccessKey, true) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
var encodedSuccessResponse []byte
switch action {
case clientGrants:
clientGrantsResponse := &AssumeRoleWithClientGrantsResponse{
Result: ClientGrantsResult{
Credentials: cred,
SubjectFromToken: subFromToken,
},
}
clientGrantsResponse.ResponseMetadata.RequestID = w.Header().Get(xhttp.AmzRequestID)
encodedSuccessResponse = encodeResponse(clientGrantsResponse)
case webIdentity:
webIdentityResponse := &AssumeRoleWithWebIdentityResponse{
Result: WebIdentityResult{
Credentials: cred,
SubjectFromWebIdentityToken: subFromToken,
},
}
webIdentityResponse.ResponseMetadata.RequestID = w.Header().Get(xhttp.AmzRequestID)
encodedSuccessResponse = encodeResponse(webIdentityResponse)
}
writeSuccessResponseXML(w, encodedSuccessResponse)
}
// AssumeRoleWithWebIdentity - implementation of AWS STS API supporting OAuth2.0
// users from web identity provider such as Facebook, Google, or any OpenID
// Connect-compatible identity provider.
//
// Eg:-
// $ curl https://minio:9000/?Action=AssumeRoleWithWebIdentity&WebIdentityToken=<jwt>
func (sts *stsAPIHandlers) AssumeRoleWithWebIdentity(w http.ResponseWriter, r *http.Request) {
sts.AssumeRoleWithSSO(w, r)
}
// AssumeRoleWithClientGrants - implementation of AWS STS extension API supporting
// OAuth2.0 client credential grants.
//
// Eg:-
// $ curl https://minio:9000/?Action=AssumeRoleWithClientGrants&Token=<jwt>
func (sts *stsAPIHandlers) AssumeRoleWithClientGrants(w http.ResponseWriter, r *http.Request) {
sts.AssumeRoleWithSSO(w, r)
}
// AssumeRoleWithLDAPIdentity - implements user auth against LDAP server
func (sts *stsAPIHandlers) AssumeRoleWithLDAPIdentity(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AssumeRoleWithLDAPIdentity")
defer logger.AuditLog(ctx, w, r, nil, stsLDAPPassword)
// Parse the incoming form data.
if err := r.ParseForm(); err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
return
}
if r.Form.Get(stsVersion) != stsAPIVersion {
writeSTSErrorResponse(ctx, w, true, ErrSTSMissingParameter,
fmt.Errorf("Invalid STS API version %s, expecting %s", r.Form.Get("Version"), stsAPIVersion))
return
}
ldapUsername := r.Form.Get(stsLDAPUsername)
ldapPassword := r.Form.Get(stsLDAPPassword)
if ldapUsername == "" || ldapPassword == "" {
writeSTSErrorResponse(ctx, w, true, ErrSTSMissingParameter, fmt.Errorf("LDAPUsername and LDAPPassword cannot be empty"))
return
}
action := r.Form.Get(stsAction)
switch action {
case ldapIdentity:
default:
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, fmt.Errorf("Unsupported action %s", action))
return
}
sessionPolicyStr := r.Form.Get(stsPolicy)
// https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html
// The plain text that you use for both inline and managed session
// policies shouldn't exceed 2048 characters.
if len(sessionPolicyStr) > 2048 {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, fmt.Errorf("Session policy should not exceed 2048 characters"))
return
}
if len(sessionPolicyStr) > 0 {
sessionPolicy, err := iampolicy.ParseConfig(bytes.NewReader([]byte(sessionPolicyStr)))
if err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
return
}
// Version in policy must not be empty
if sessionPolicy.Version == "" {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, fmt.Errorf("Version needs to be specified in session policy"))
return
}
}
ldapUserDN, groupDistNames, err := globalLDAPConfig.Bind(ldapUsername, ldapPassword)
if err != nil {
err = fmt.Errorf("LDAP server error: %w", err)
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue, err)
return
}
// Check if this user or their groups have a policy applied.
ldapPolicies, _ := globalIAMSys.PolicyDBGet(ldapUserDN, false, groupDistNames...)
if len(ldapPolicies) == 0 {
writeSTSErrorResponse(ctx, w, true, ErrSTSInvalidParameterValue,
fmt.Errorf("expecting a policy to be set for user `%s` or one of their groups: `%s` - rejecting this request",
ldapUserDN, strings.Join(groupDistNames, "`,`")))
return
}
expiryDur := globalLDAPConfig.GetExpiryDuration()
m := map[string]interface{}{
expClaim: UTCNow().Add(expiryDur).Unix(),
ldapUser: ldapUserDN,
}
if len(sessionPolicyStr) > 0 {
m[iampolicy.SessionPolicyName] = base64.StdEncoding.EncodeToString([]byte(sessionPolicyStr))
}
secret := globalActiveCred.SecretKey
cred, err := auth.GetNewCredentialsWithMetadata(m, secret)
if err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInternalError, err)
return
}
// Set the parent of the temporary access key, this is useful
// in obtaining service accounts by this cred.
cred.ParentUser = ldapUserDN
// Set this value to LDAP groups, LDAP user can be part
// of large number of groups
cred.Groups = groupDistNames
// Set the newly generated credentials, policyName is empty on purpose
// LDAP policies are applied automatically using their ldapUser, ldapGroups
// mapping.
if err = globalIAMSys.SetTempUser(cred.AccessKey, cred, ""); err != nil {
writeSTSErrorResponse(ctx, w, true, ErrSTSInternalError, err)
return
}
// Notify all other MinIO peers to reload temp users
for _, nerr := range globalNotificationSys.LoadUser(cred.AccessKey, true) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
ldapIdentityResponse := &AssumeRoleWithLDAPResponse{
Result: LDAPIdentityResult{
Credentials: cred,
},
}
ldapIdentityResponse.ResponseMetadata.RequestID = w.Header().Get(xhttp.AmzRequestID)
encodedSuccessResponse := encodeResponse(ldapIdentityResponse)
writeSuccessResponseXML(w, encodedSuccessResponse)
}
| cmd/sts-handlers.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0033849505707621574,
0.0002488298632670194,
0.00016188873269129544,
0.00017021576059050858,
0.00043153492151759565
] |
{
"id": 2,
"code_window": [
"\treturn FileInfo{}, errErasureReadQuorum\n",
"}\n",
"\n",
"// pickValidFileInfo - picks one valid FileInfo content and returns from a\n",
"// slice of FileInfo.\n",
"func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {\n",
"\treturn findFileInfoInQuorum(ctx, metaArr, modTime, dataDir, quorum)\n",
"}\n",
"\n",
"// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (FileInfo, error) {\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 289
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"strconv"
"testing"
"time"
humanize "github.com/dustin/go-humanize"
)
const ActualSize = 1000
// Test FileInfo.AddObjectPart()
func TestAddObjectPart(t *testing.T) {
testCases := []struct {
partNum int
expectedIndex int
}{
{1, 0},
{2, 1},
{4, 2},
{5, 3},
{7, 4},
// Insert part.
{3, 2},
// Replace existing part.
{4, 3},
// Missing part.
{6, -1},
}
// Setup.
fi := newFileInfo("test-object", 8, 8)
fi.Erasure.Index = 1
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Test them.
for _, testCase := range testCases {
if testCase.expectedIndex > -1 {
partNumString := strconv.Itoa(testCase.partNum)
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
}
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
}
}
// Test objectPartIndex(). generates a sample FileInfo data and asserts
// the output of objectPartIndex() with the expected value.
func TestObjectPartIndex(t *testing.T) {
testCases := []struct {
partNum int
expectedIndex int
}{
{2, 1},
{1, 0},
{5, 3},
{4, 2},
{7, 4},
}
// Setup.
fi := newFileInfo("test-object", 8, 8)
fi.Erasure.Index = 1
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Add some parts for testing.
for _, testCase := range testCases {
partNumString := strconv.Itoa(testCase.partNum)
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
}
// Add failure test case.
testCases = append(testCases, struct {
partNum int
expectedIndex int
}{6, -1})
// Test them.
for _, testCase := range testCases {
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
}
}
// Test FileInfo.ObjectToPartOffset().
func TestObjectToPartOffset(t *testing.T) {
// Setup.
fi := newFileInfo("test-object", 8, 8)
fi.Erasure.Index = 1
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Add some parts for testing.
// Total size of all parts is 5,242,899 bytes.
for _, partNum := range []int{1, 2, 4, 5, 7} {
partNumString := strconv.Itoa(partNum)
fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize)
}
testCases := []struct {
offset int64
expectedIndex int
expectedOffset int64
expectedErr error
}{
{0, 0, 0, nil},
{1 * humanize.MiByte, 0, 1 * humanize.MiByte, nil},
{1 + humanize.MiByte, 1, 0, nil},
{2 + humanize.MiByte, 1, 1, nil},
// Its valid for zero sized object.
{-1, 0, -1, nil},
// Max fffset is always (size - 1).
{(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte) - 1, 4, 1048582, nil},
// Error if offset is size.
{(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte), 0, 0, InvalidRange{}},
}
// Test them.
for _, testCase := range testCases {
index, offset, err := fi.ObjectToPartOffset(context.Background(), testCase.offset)
if err != testCase.expectedErr {
t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err)
}
if index != testCase.expectedIndex {
t.Fatalf("%+v: index: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
if offset != testCase.expectedOffset {
t.Fatalf("%+v: offset: expected = %d, got: %d", testCase, testCase.expectedOffset, offset)
}
}
}
func TestFindFileInfoInQuorum(t *testing.T) {
getNFInfo := func(n int, quorum int, t int64, dataDir string) []FileInfo {
fi := newFileInfo("test", 8, 8)
fi.AddObjectPart(1, "etag", 100, 100)
fi.ModTime = time.Unix(t, 0)
fi.DataDir = dataDir
fis := make([]FileInfo, n)
for i := range fis {
fis[i] = fi
fis[i].Erasure.Index = i + 1
quorum--
if quorum == 0 {
break
}
}
return fis
}
tests := []struct {
fis []FileInfo
modTime time.Time
dataDir string
expectedErr error
}{
{
fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"),
modTime: time.Unix(1603863445, 0),
dataDir: "36a21454-a2ca-11eb-bbaa-93a81c686f21",
expectedErr: nil,
},
{
fis: getNFInfo(16, 7, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"),
modTime: time.Unix(1603863445, 0),
dataDir: "36a21454-a2ca-11eb-bbaa-93a81c686f21",
expectedErr: errErasureReadQuorum,
},
}
for _, test := range tests {
test := test
t.Run("", func(t *testing.T) {
_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, 8)
if err != test.expectedErr {
t.Errorf("Expected %s, got %s", test.expectedErr, err)
}
})
}
}
| cmd/erasure-metadata_test.go | 1 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.030926525592803955,
0.004217963665723801,
0.0001642530842218548,
0.0002497703826520592,
0.008225332014262676
] |
{
"id": 2,
"code_window": [
"\treturn FileInfo{}, errErasureReadQuorum\n",
"}\n",
"\n",
"// pickValidFileInfo - picks one valid FileInfo content and returns from a\n",
"// slice of FileInfo.\n",
"func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {\n",
"\treturn findFileInfoInQuorum(ctx, metaArr, modTime, dataDir, quorum)\n",
"}\n",
"\n",
"// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (FileInfo, error) {\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 289
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"net/http"
"strconv"
"strings"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/sync/errgroup"
)
func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
g := errgroup.WithNErrs(len(objects)).WithConcurrency(500)
_, cancel := g.WithCancelOnError(ctx)
defer cancel()
for index := range objects {
index := index
g.Go(func() error {
size, err := objects[index].GetActualSize()
if err == nil {
objects[index].Size = size
}
objects[index].ETag = objects[index].GetActualETag(nil)
return nil
}, index)
}
g.WaitErr()
}
// Validate all the ListObjects query arguments, returns an APIErrorCode
// if one of the args do not meet the required conditions.
// Special conditions required by MinIO server are as below
// - delimiter if set should be equal to '/', otherwise the request is rejected.
// - marker if set should have a common prefix with 'prefix' param, otherwise
// the request is rejected.
func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int) APIErrorCode {
// Max keys cannot be negative.
if maxKeys < 0 {
return ErrInvalidMaxKeys
}
if encodingType != "" {
// Only url encoding type is supported
if strings.ToLower(encodingType) != "url" {
return ErrInvalidEncodingMethod
}
}
return ErrNone
}
// ListObjectVersions - GET Bucket Object versions
// You can use the versions subresource to list metadata about all
// of the versions of objects in a bucket.
func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectVersions")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketVersionsAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
urlValues := r.URL.Query()
// Extract all the listBucketVersions query params to their native values.
prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode := getListBucketObjectVersionsArgs(urlValues)
if errCode != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
return
}
// Validate the query params before beginning to serve the request.
if s3Error := validateListObjectsArgs(marker, delimiter, encodingType, maxkeys); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
listObjectVersions := objectAPI.ListObjectVersions
// Inititate a list object versions operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshaled into S3 compatible XML header.
listObjectVersionsInfo, err := listObjectVersions(ctx, bucket, prefix, marker, versionIDMarker, delimiter, maxkeys)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
concurrentDecryptETag(ctx, listObjectVersionsInfo.Objects)
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))
}
// ListObjectsV2MHandler - GET Bucket (List Objects) Version 2 with metadata.
// --------------------------
// This implementation of the GET operation returns some or all (up to 10000)
// of the objects in a bucket. You can use the request parameters as selection
// criteria to return a subset of the objects in a bucket.
//
// NOTE: It is recommended that this API to be used for application development.
// MinIO continues to support ListObjectsV1 and V2 for supporting legacy tools.
func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectsV2M")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
urlValues := r.URL.Query()
// Extract all the listObjectsV2 query params to their native values.
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues)
if errCode != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
return
}
// Validate the query params before beginning to serve the request.
// fetch-owner is not validated since it is a boolean
if s3Error := validateListObjectsArgs(token, delimiter, encodingType, maxKeys); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
listObjectsV2 := objectAPI.ListObjectsV2
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshaled into S3 compatible XML header.
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
// The next continuation token has id@node_index format to optimize paginated listing
nextContinuationToken := listObjectsV2Info.NextContinuationToken
response := generateListObjectsV2Response(bucket, prefix, token, nextContinuationToken, startAfter,
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, true)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))
}
// ListObjectsV2Handler - GET Bucket (List Objects) Version 2.
// --------------------------
// This implementation of the GET operation returns some or all (up to 10000)
// of the objects in a bucket. You can use the request parameters as selection
// criteria to return a subset of the objects in a bucket.
//
// NOTE: It is recommended that this API to be used for application development.
// MinIO continues to support ListObjectsV1 for supporting legacy tools.
func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectsV2")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
urlValues := r.URL.Query()
// Extract all the listObjectsV2 query params to their native values.
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues)
if errCode != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
return
}
// Validate the query params before beginning to serve the request.
// fetch-owner is not validated since it is a boolean
if s3Error := validateListObjectsArgs(token, delimiter, encodingType, maxKeys); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
listObjectsV2 := objectAPI.ListObjectsV2
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshaled into S3 compatible XML header.
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
concurrentDecryptETag(ctx, listObjectsV2Info.Objects)
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, false)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))
}
func parseRequestToken(token string) (subToken string, nodeIndex int) {
if token == "" {
return token, -1
}
i := strings.Index(token, "@")
if i < 0 {
return token, -1
}
nodeIndex, err := strconv.Atoi(token[i+1:])
if err != nil {
return token, -1
}
subToken = token[:i]
return subToken, nodeIndex
}
func proxyRequestByToken(ctx context.Context, w http.ResponseWriter, r *http.Request, token string) (string, bool) {
subToken, nodeIndex := parseRequestToken(token)
if nodeIndex > 0 {
return subToken, proxyRequestByNodeIndex(ctx, w, r, nodeIndex)
}
return subToken, false
}
func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int) (success bool) {
if len(globalProxyEndpoints) == 0 {
return false
}
if index < 0 || index >= len(globalProxyEndpoints) {
return false
}
ep := globalProxyEndpoints[index]
if ep.IsLocal {
return false
}
return proxyRequest(ctx, w, r, ep)
}
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
// --------------------------
// This implementation of the GET operation returns some or all (up to 10000)
// of the objects in a bucket. You can use the request parameters as selection
// criteria to return a subset of the objects in a bucket.
//
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectsV1")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Extract all the litsObjectsV1 query params to their native values.
prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.URL.Query())
if s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Validate all the query params before beginning to serve the request.
if s3Error := validateListObjectsArgs(marker, delimiter, encodingType, maxKeys); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
listObjects := objectAPI.ListObjects
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshaled into S3 compatible XML header.
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
concurrentDecryptETag(ctx, listObjectsInfo.Objects)
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))
}
| cmd/bucket-listobjects-handlers.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.00023132441856432706,
0.00017027284775394946,
0.00016226489969994873,
0.00016734257224015892,
0.000011798249033745378
] |
{
"id": 2,
"code_window": [
"\treturn FileInfo{}, errErasureReadQuorum\n",
"}\n",
"\n",
"// pickValidFileInfo - picks one valid FileInfo content and returns from a\n",
"// slice of FileInfo.\n",
"func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {\n",
"\treturn findFileInfoInQuorum(ctx, metaArr, modTime, dataDir, quorum)\n",
"}\n",
"\n",
"// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (FileInfo, error) {\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 289
} | // +build openbsd
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package sys
import "syscall"
// GetMaxMemoryLimit - returns the maximum size of the process's virtual memory (address space) in bytes.
func GetMaxMemoryLimit() (curLimit, maxLimit uint64, err error) {
var rlimit syscall.Rlimit
if err = syscall.Getrlimit(syscall.RLIMIT_DATA, &rlimit); err == nil {
curLimit = rlimit.Cur
maxLimit = rlimit.Max
}
return curLimit, maxLimit, err
}
// SetMaxMemoryLimit - sets the maximum size of the process's virtual memory (address space) in bytes.
func SetMaxMemoryLimit(curLimit, maxLimit uint64) error {
rlimit := syscall.Rlimit{Cur: curLimit, Max: maxLimit}
return syscall.Setrlimit(syscall.RLIMIT_DATA, &rlimit)
}
| pkg/sys/rlimit-memory_openbsd.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0005167459021322429,
0.00027092592790722847,
0.0001730871881591156,
0.0001969353179447353,
0.00014315555745270103
] |
{
"id": 2,
"code_window": [
"\treturn FileInfo{}, errErasureReadQuorum\n",
"}\n",
"\n",
"// pickValidFileInfo - picks one valid FileInfo content and returns from a\n",
"// slice of FileInfo.\n",
"func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {\n",
"\treturn findFileInfoInQuorum(ctx, metaArr, modTime, dataDir, quorum)\n",
"}\n",
"\n",
"// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (FileInfo, error) {\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 289
} | /*
* MinIO Object Storage (c) 2021 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { connect } from "react-redux"
import humanize from "humanize"
import * as actionsCommon from "./actions"
export class StorageInfo extends React.Component {
componentWillMount() {
const { fetchStorageInfo } = this.props
fetchStorageInfo()
}
render() {
const { used } = this.props.storageInfo
if (!used || used == 0) {
return <noscript />
}
return (
<div className="feh-used">
<div className="fehu-chart">
<div style={{ width: 0 }} />
</div>
<ul>
<li>
<span>Used: </span>
{humanize.filesize(used)}
</li>
</ul>
</div>
)
}
}
const mapStateToProps = state => {
return {
storageInfo: state.browser.storageInfo
}
}
const mapDispatchToProps = dispatch => {
return {
fetchStorageInfo: () => dispatch(actionsCommon.fetchStorageInfo())
}
}
export default connect(
mapStateToProps,
mapDispatchToProps
)(StorageInfo)
| browser/app/js/browser/StorageInfo.js | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.00017481249233242124,
0.00016844793572090566,
0.0001612767664482817,
0.00016918442270252854,
0.0000038080102058302145
] |
{
"id": 3,
"code_window": [
"\t\treturn 0, 0, err\n",
"\t}\n",
"\n",
"\tdataBlocks := latestFileInfo.Erasure.DataBlocks\n",
"\tparityBlocks := globalStorageClass.GetParityForSC(latestFileInfo.Metadata[xhttp.AmzStorageClass])\n",
"\tif parityBlocks <= 0 {\n",
"\t\tparityBlocks = defaultParityCount\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif !latestFileInfo.IsValid() {\n",
"\t\treturn 0, 0, errErasureReadQuorum\n",
"\t}\n",
"\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 331
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"strconv"
"testing"
"time"
humanize "github.com/dustin/go-humanize"
)
const ActualSize = 1000
// Test FileInfo.AddObjectPart()
func TestAddObjectPart(t *testing.T) {
testCases := []struct {
partNum int
expectedIndex int
}{
{1, 0},
{2, 1},
{4, 2},
{5, 3},
{7, 4},
// Insert part.
{3, 2},
// Replace existing part.
{4, 3},
// Missing part.
{6, -1},
}
// Setup.
fi := newFileInfo("test-object", 8, 8)
fi.Erasure.Index = 1
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Test them.
for _, testCase := range testCases {
if testCase.expectedIndex > -1 {
partNumString := strconv.Itoa(testCase.partNum)
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
}
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
}
}
// Test objectPartIndex(). generates a sample FileInfo data and asserts
// the output of objectPartIndex() with the expected value.
func TestObjectPartIndex(t *testing.T) {
testCases := []struct {
partNum int
expectedIndex int
}{
{2, 1},
{1, 0},
{5, 3},
{4, 2},
{7, 4},
}
// Setup.
fi := newFileInfo("test-object", 8, 8)
fi.Erasure.Index = 1
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Add some parts for testing.
for _, testCase := range testCases {
partNumString := strconv.Itoa(testCase.partNum)
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
}
// Add failure test case.
testCases = append(testCases, struct {
partNum int
expectedIndex int
}{6, -1})
// Test them.
for _, testCase := range testCases {
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
}
}
// Test FileInfo.ObjectToPartOffset().
func TestObjectToPartOffset(t *testing.T) {
// Setup.
fi := newFileInfo("test-object", 8, 8)
fi.Erasure.Index = 1
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Add some parts for testing.
// Total size of all parts is 5,242,899 bytes.
for _, partNum := range []int{1, 2, 4, 5, 7} {
partNumString := strconv.Itoa(partNum)
fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize)
}
testCases := []struct {
offset int64
expectedIndex int
expectedOffset int64
expectedErr error
}{
{0, 0, 0, nil},
{1 * humanize.MiByte, 0, 1 * humanize.MiByte, nil},
{1 + humanize.MiByte, 1, 0, nil},
{2 + humanize.MiByte, 1, 1, nil},
// Its valid for zero sized object.
{-1, 0, -1, nil},
// Max fffset is always (size - 1).
{(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte) - 1, 4, 1048582, nil},
// Error if offset is size.
{(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte), 0, 0, InvalidRange{}},
}
// Test them.
for _, testCase := range testCases {
index, offset, err := fi.ObjectToPartOffset(context.Background(), testCase.offset)
if err != testCase.expectedErr {
t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err)
}
if index != testCase.expectedIndex {
t.Fatalf("%+v: index: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
if offset != testCase.expectedOffset {
t.Fatalf("%+v: offset: expected = %d, got: %d", testCase, testCase.expectedOffset, offset)
}
}
}
func TestFindFileInfoInQuorum(t *testing.T) {
getNFInfo := func(n int, quorum int, t int64, dataDir string) []FileInfo {
fi := newFileInfo("test", 8, 8)
fi.AddObjectPart(1, "etag", 100, 100)
fi.ModTime = time.Unix(t, 0)
fi.DataDir = dataDir
fis := make([]FileInfo, n)
for i := range fis {
fis[i] = fi
fis[i].Erasure.Index = i + 1
quorum--
if quorum == 0 {
break
}
}
return fis
}
tests := []struct {
fis []FileInfo
modTime time.Time
dataDir string
expectedErr error
}{
{
fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"),
modTime: time.Unix(1603863445, 0),
dataDir: "36a21454-a2ca-11eb-bbaa-93a81c686f21",
expectedErr: nil,
},
{
fis: getNFInfo(16, 7, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"),
modTime: time.Unix(1603863445, 0),
dataDir: "36a21454-a2ca-11eb-bbaa-93a81c686f21",
expectedErr: errErasureReadQuorum,
},
}
for _, test := range tests {
test := test
t.Run("", func(t *testing.T) {
_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, 8)
if err != test.expectedErr {
t.Errorf("Expected %s, got %s", test.expectedErr, err)
}
})
}
}
| cmd/erasure-metadata_test.go | 1 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0077851759269833565,
0.0005784177919849753,
0.0001662214199313894,
0.00017503148410469294,
0.0016190764727070928
] |
{
"id": 3,
"code_window": [
"\t\treturn 0, 0, err\n",
"\t}\n",
"\n",
"\tdataBlocks := latestFileInfo.Erasure.DataBlocks\n",
"\tparityBlocks := globalStorageClass.GetParityForSC(latestFileInfo.Metadata[xhttp.AmzStorageClass])\n",
"\tif parityBlocks <= 0 {\n",
"\t\tparityBlocks = defaultParityCount\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif !latestFileInfo.IsValid() {\n",
"\t\treturn 0, 0, errErasureReadQuorum\n",
"\t}\n",
"\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 331
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"errors"
"os"
"path/filepath"
"testing"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/pkg/hash"
)
var toAPIErrorTests = []struct {
err error
errCode APIErrorCode
}{
{err: hash.BadDigest{}, errCode: ErrBadDigest},
{err: hash.SHA256Mismatch{}, errCode: ErrContentSHA256Mismatch},
{err: IncompleteBody{}, errCode: ErrIncompleteBody},
{err: ObjectExistsAsDirectory{}, errCode: ErrObjectExistsAsDirectory},
{err: BucketNameInvalid{}, errCode: ErrInvalidBucketName},
{err: BucketExists{}, errCode: ErrBucketAlreadyOwnedByYou},
{err: ObjectNotFound{}, errCode: ErrNoSuchKey},
{err: ObjectNameInvalid{}, errCode: ErrInvalidObjectName},
{err: InvalidUploadID{}, errCode: ErrNoSuchUpload},
{err: InvalidPart{}, errCode: ErrInvalidPart},
{err: InsufficientReadQuorum{}, errCode: ErrSlowDown},
{err: InsufficientWriteQuorum{}, errCode: ErrSlowDown},
{err: InvalidMarkerPrefixCombination{}, errCode: ErrNotImplemented},
{err: InvalidUploadIDKeyCombination{}, errCode: ErrNotImplemented},
{err: MalformedUploadID{}, errCode: ErrNoSuchUpload},
{err: PartTooSmall{}, errCode: ErrEntityTooSmall},
{err: BucketNotEmpty{}, errCode: ErrBucketNotEmpty},
{err: BucketNotFound{}, errCode: ErrNoSuchBucket},
{err: StorageFull{}, errCode: ErrStorageFull},
{err: NotImplemented{}, errCode: ErrNotImplemented},
{err: errSignatureMismatch, errCode: ErrSignatureDoesNotMatch},
// SSE-C errors
{err: crypto.ErrInvalidCustomerAlgorithm, errCode: ErrInvalidSSECustomerAlgorithm},
{err: crypto.ErrMissingCustomerKey, errCode: ErrMissingSSECustomerKey},
{err: crypto.ErrInvalidCustomerKey, errCode: ErrAccessDenied},
{err: crypto.ErrMissingCustomerKeyMD5, errCode: ErrMissingSSECustomerKeyMD5},
{err: crypto.ErrCustomerKeyMD5Mismatch, errCode: ErrSSECustomerKeyMD5Mismatch},
{err: errObjectTampered, errCode: ErrObjectTampered},
{err: nil, errCode: ErrNone},
{err: errors.New("Custom error"), errCode: ErrInternalError}, // Case where err type is unknown.
}
func TestAPIErrCode(t *testing.T) {
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer os.RemoveAll(disk)
initFSObjects(disk, t)
ctx := context.Background()
for i, testCase := range toAPIErrorTests {
errCode := toAPIErrorCode(ctx, testCase.err)
if errCode != testCase.errCode {
t.Errorf("Test %d: Expected error code %d, got %d", i+1, testCase.errCode, errCode)
}
}
}
| cmd/api-errors_test.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0004895597812719643,
0.00020855356706306338,
0.00016812772082630545,
0.00017410135478712618,
0.0000994074871414341
] |
{
"id": 3,
"code_window": [
"\t\treturn 0, 0, err\n",
"\t}\n",
"\n",
"\tdataBlocks := latestFileInfo.Erasure.DataBlocks\n",
"\tparityBlocks := globalStorageClass.GetParityForSC(latestFileInfo.Metadata[xhttp.AmzStorageClass])\n",
"\tif parityBlocks <= 0 {\n",
"\t\tparityBlocks = defaultParityCount\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif !latestFileInfo.IsValid() {\n",
"\t\treturn 0, 0, errErasureReadQuorum\n",
"\t}\n",
"\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 331
} | /*
* MinIO Object Storage (c) 2021 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { shallow, mount } from "enzyme"
import { ChangePasswordModal } from "../ChangePasswordModal"
import jwtDecode from "jwt-decode"
jest.mock("jwt-decode")
jwtDecode.mockImplementation(() => ({ sub: "minio" }))
jest.mock("../../web", () => ({
SetAuth: jest.fn(
({ currentAccessKey, currentSecretKey, newAccessKey, newSecretKey }) => {
if (
currentAccessKey == "minio" &&
currentSecretKey == "minio123" &&
newAccessKey == "test" &&
newSecretKey == "test1234"
) {
return Promise.resolve({})
} else {
return Promise.reject({
message: "Error"
})
}
}
),
GetToken: jest.fn(() => "")
}))
jest.mock("../../utils", () => ({
getRandomAccessKey: () => "raccesskey",
getRandomSecretKey: () => "rsecretkey"
}))
describe("ChangePasswordModal", () => {
const serverInfo = {
version: "test",
platform: "test",
runtime: "test",
info: {},
userInfo: { isIAMUser: true }
}
it("should render without crashing", () => {
shallow(<ChangePasswordModal serverInfo={serverInfo} />)
})
it("should not allow changing password when not IAM user", () => {
const newServerInfo = {
...serverInfo,
userInfo: { isIAMUser: false }
}
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
expect(
wrapper
.find("ModalBody")
.childAt(0)
.text()
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
})
it("should not allow changing password for STS user", () => {
const newServerInfo = {
...serverInfo,
userInfo: { isTempUser: true }
}
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
expect(
wrapper
.find("ModalBody")
.childAt(0)
.text()
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
})
it("should not generate accessKey for IAM User", () => {
const wrapper = shallow(<ChangePasswordModal serverInfo={serverInfo} />)
wrapper.find("#generate-keys").simulate("click")
setImmediate(() => {
expect(wrapper.state("newAccessKey")).toBe("minio")
expect(wrapper.state("newSecretKey")).toBe("rsecretkey")
})
})
it("should not show new accessKey field for IAM User", () => {
const wrapper = shallow(<ChangePasswordModal serverInfo={serverInfo} />)
expect(wrapper.find("#newAccesskey").exists()).toBeFalsy()
})
it("should disable Update button for secretKey", () => {
const showAlert = jest.fn()
const wrapper = shallow(
<ChangePasswordModal serverInfo={serverInfo} showAlert={showAlert} />
)
wrapper
.find("#currentSecretKey")
.simulate("change", { target: { value: "minio123" } })
wrapper
.find("#newSecretKey")
.simulate("change", { target: { value: "t1" } })
expect(wrapper.find("#update-keys").prop("disabled")).toBeTruthy()
})
it("should call hideChangePassword when Cancel button is clicked", () => {
const hideChangePassword = jest.fn()
const wrapper = shallow(
<ChangePasswordModal
serverInfo={serverInfo}
hideChangePassword={hideChangePassword}
/>
)
wrapper.find("#cancel-change-password").simulate("click")
expect(hideChangePassword).toHaveBeenCalled()
})
})
| browser/app/js/browser/__tests__/ChangePasswordModal.test.js | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.00017829034186434,
0.00017493710038252175,
0.00017064384883269668,
0.00017543206922709942,
0.0000025327519779239083
] |
{
"id": 3,
"code_window": [
"\t\treturn 0, 0, err\n",
"\t}\n",
"\n",
"\tdataBlocks := latestFileInfo.Erasure.DataBlocks\n",
"\tparityBlocks := globalStorageClass.GetParityForSC(latestFileInfo.Metadata[xhttp.AmzStorageClass])\n",
"\tif parityBlocks <= 0 {\n",
"\t\tparityBlocks = defaultParityCount\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif !latestFileInfo.IsValid() {\n",
"\t\treturn 0, 0, errErasureReadQuorum\n",
"\t}\n",
"\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "replace",
"edit_start_line_idx": 331
} | // Code generated by "stringer -type=storageMetric -trimprefix=storageMetric xl-storage-disk-id-check.go"; DO NOT EDIT.
package cmd
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[storageMetricMakeVolBulk-0]
_ = x[storageMetricMakeVol-1]
_ = x[storageMetricListVols-2]
_ = x[storageMetricStatVol-3]
_ = x[storageMetricDeleteVol-4]
_ = x[storageMetricWalkDir-5]
_ = x[storageMetricListDir-6]
_ = x[storageMetricReadFile-7]
_ = x[storageMetricAppendFile-8]
_ = x[storageMetricCreateFile-9]
_ = x[storageMetricReadFileStream-10]
_ = x[storageMetricRenameFile-11]
_ = x[storageMetricRenameData-12]
_ = x[storageMetricCheckParts-13]
_ = x[storageMetricCheckFile-14]
_ = x[storageMetricDelete-15]
_ = x[storageMetricDeleteVersions-16]
_ = x[storageMetricVerifyFile-17]
_ = x[storageMetricWriteAll-18]
_ = x[storageMetricDeleteVersion-19]
_ = x[storageMetricWriteMetadata-20]
_ = x[storageMetricUpdateMetadata-21]
_ = x[storageMetricReadVersion-22]
_ = x[storageMetricReadAll-23]
_ = x[storageMetricLast-24]
}
const _storageMetric_name = "MakeVolBulkMakeVolListVolsStatVolDeleteVolWalkDirListDirReadFileAppendFileCreateFileReadFileStreamRenameFileRenameDataCheckPartsCheckFileDeleteDeleteVersionsVerifyFileWriteAllDeleteVersionWriteMetadataUpdateMetadataReadVersionReadAllLast"
var _storageMetric_index = [...]uint8{0, 11, 18, 26, 33, 42, 49, 56, 64, 74, 84, 98, 108, 118, 128, 137, 143, 157, 167, 175, 188, 201, 215, 226, 233, 237}
func (i storageMetric) String() string {
if i >= storageMetric(len(_storageMetric_index)-1) {
return "storageMetric(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _storageMetric_name[_storageMetric_index[i]:_storageMetric_index[i+1]]
}
| cmd/storagemetric_string.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.00017285167996305972,
0.0001685953902779147,
0.0001646347518544644,
0.00016672589117661119,
0.000003370489366716356
] |
{
"id": 4,
"code_window": [
"\t\tparityBlocks = defaultParityCount\n",
"\t}\n",
"\n",
"\twriteQuorum := dataBlocks\n",
"\tif dataBlocks == parityBlocks {\n",
"\t\twriteQuorum++\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdataBlocks := len(partsMetaData) - parityBlocks\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "add",
"edit_start_line_idx": 337
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"net/http"
"sort"
"time"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/sync/errgroup"
)
const erasureAlgorithm = "rs-vandermonde"
// byObjectPartNumber is a collection satisfying sort.Interface.
type byObjectPartNumber []ObjectPartInfo
func (t byObjectPartNumber) Len() int { return len(t) }
func (t byObjectPartNumber) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t byObjectPartNumber) Less(i, j int) bool { return t[i].Number < t[j].Number }
// AddChecksumInfo adds a checksum of a part.
func (e *ErasureInfo) AddChecksumInfo(ckSumInfo ChecksumInfo) {
for i, sum := range e.Checksums {
if sum.PartNumber == ckSumInfo.PartNumber {
e.Checksums[i] = ckSumInfo
return
}
}
e.Checksums = append(e.Checksums, ckSumInfo)
}
// GetChecksumInfo - get checksum of a part.
func (e ErasureInfo) GetChecksumInfo(partNumber int) (ckSum ChecksumInfo) {
for _, sum := range e.Checksums {
if sum.PartNumber == partNumber {
// Return the checksum
return sum
}
}
return ChecksumInfo{}
}
// ShardFileSize - returns final erasure size from original size.
func (e ErasureInfo) ShardFileSize(totalLength int64) int64 {
if totalLength == 0 {
return 0
}
if totalLength == -1 {
return -1
}
numShards := totalLength / e.BlockSize
lastBlockSize := totalLength % e.BlockSize
lastShardSize := ceilFrac(lastBlockSize, int64(e.DataBlocks))
return numShards*e.ShardSize() + lastShardSize
}
// ShardSize - returns actual shared size from erasure blockSize.
func (e ErasureInfo) ShardSize() int64 {
return ceilFrac(e.BlockSize, int64(e.DataBlocks))
}
// IsValid - tells if erasure info fields are valid.
func (fi FileInfo) IsValid() bool {
if fi.Deleted {
// Delete marker has no data, no need to check
// for erasure coding information
return true
}
dataBlocks := fi.Erasure.DataBlocks
parityBlocks := fi.Erasure.ParityBlocks
correctIndexes := (fi.Erasure.Index > 0 &&
fi.Erasure.Index <= dataBlocks+parityBlocks &&
len(fi.Erasure.Distribution) == (dataBlocks+parityBlocks))
return ((dataBlocks >= parityBlocks) &&
(dataBlocks != 0) && (parityBlocks != 0) &&
correctIndexes)
}
// ToObjectInfo - Converts metadata to object info.
func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
object = decodeDirObject(object)
versionID := fi.VersionID
if (globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket)) && versionID == "" {
versionID = nullVersionID
}
objInfo := ObjectInfo{
IsDir: HasSuffix(object, SlashSeparator),
Bucket: bucket,
Name: object,
VersionID: versionID,
IsLatest: fi.IsLatest,
DeleteMarker: fi.Deleted,
Size: fi.Size,
ModTime: fi.ModTime,
Legacy: fi.XLV1,
ContentType: fi.Metadata["content-type"],
ContentEncoding: fi.Metadata["content-encoding"],
NumVersions: fi.NumVersions,
SuccessorModTime: fi.SuccessorModTime,
}
// Update expires
var (
t time.Time
e error
)
if exp, ok := fi.Metadata["expires"]; ok {
if t, e = time.Parse(http.TimeFormat, exp); e == nil {
objInfo.Expires = t.UTC()
}
}
objInfo.backendType = BackendErasure
// Extract etag from metadata.
objInfo.ETag = extractETag(fi.Metadata)
// Add user tags to the object info
tags := fi.Metadata[xhttp.AmzObjectTagging]
if len(tags) != 0 {
objInfo.UserTags = tags
}
// Add replication status to the object info
objInfo.ReplicationStatus = replication.StatusType(fi.Metadata[xhttp.AmzBucketReplicationStatus])
if fi.Deleted {
objInfo.ReplicationStatus = replication.StatusType(fi.DeleteMarkerReplicationStatus)
}
objInfo.TransitionStatus = fi.TransitionStatus
objInfo.transitionedObjName = fi.TransitionedObjName
objInfo.TransitionTier = fi.TransitionTier
// etag/md5Sum has already been extracted. We need to
// remove to avoid it from appearing as part of
// response headers. e.g, X-Minio-* or X-Amz-*.
// Tags have also been extracted, we remove that as well.
objInfo.UserDefined = cleanMetadata(fi.Metadata)
// All the parts per object.
objInfo.Parts = fi.Parts
// Update storage class
if sc, ok := fi.Metadata[xhttp.AmzStorageClass]; ok {
objInfo.StorageClass = sc
} else {
objInfo.StorageClass = globalMinioDefaultStorageClass
}
objInfo.VersionPurgeStatus = fi.VersionPurgeStatus
// set restore status for transitioned object
restoreHdr, ok := fi.Metadata[xhttp.AmzRestore]
if ok {
if restoreStatus, err := parseRestoreObjStatus(restoreHdr); err == nil {
objInfo.RestoreOngoing = restoreStatus.Ongoing()
objInfo.RestoreExpires, _ = restoreStatus.Expiry()
}
}
// Success.
return objInfo
}
// objectPartIndex - returns the index of matching object part number.
func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
for i, part := range parts {
if partNumber == part.Number {
return i
}
}
return -1
}
// AddObjectPart - add a new object part in order.
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize int64, actualSize int64) {
partInfo := ObjectPartInfo{
Number: partNumber,
ETag: partETag,
Size: partSize,
ActualSize: actualSize,
}
// Update part info if it already exists.
for i, part := range fi.Parts {
if partNumber == part.Number {
fi.Parts[i] = partInfo
return
}
}
// Proceed to include new part info.
fi.Parts = append(fi.Parts, partInfo)
// Parts in FileInfo should be in sorted order by part number.
sort.Sort(byObjectPartNumber(fi.Parts))
}
// ObjectToPartOffset - translate offset of an object to offset of its individual part.
func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) {
if offset == 0 {
// Special case - if offset is 0, then partIndex and partOffset are always 0.
return 0, 0, nil
}
partOffset = offset
// Seek until object offset maps to a particular part offset.
for i, part := range fi.Parts {
partIndex = i
// Offset is smaller than size we have reached the proper part offset.
if partOffset < part.Size {
return partIndex, partOffset, nil
}
// Continue to towards the next part.
partOffset -= part.Size
}
logger.LogIf(ctx, InvalidRange{})
// Offset beyond the size of the object return InvalidRange.
return 0, 0, InvalidRange{}
}
func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {
metaHashes := make([]string, len(metaArr))
h := sha256.New()
for i, meta := range metaArr {
if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir {
for _, part := range meta.Parts {
h.Write([]byte(fmt.Sprintf("part.%d", part.Number)))
}
h.Write([]byte(fmt.Sprintf("%v", meta.Erasure.Distribution)))
// make sure that length of Data is same
h.Write([]byte(fmt.Sprintf("%v", len(meta.Data))))
metaHashes[i] = hex.EncodeToString(h.Sum(nil))
h.Reset()
}
}
metaHashCountMap := make(map[string]int)
for _, hash := range metaHashes {
if hash == "" {
continue
}
metaHashCountMap[hash]++
}
maxHash := ""
maxCount := 0
for hash, count := range metaHashCountMap {
if count > maxCount {
maxCount = count
maxHash = hash
}
}
if maxCount < quorum {
return FileInfo{}, errErasureReadQuorum
}
for i, hash := range metaHashes {
if hash == maxHash {
return metaArr[i], nil
}
}
return FileInfo{}, errErasureReadQuorum
}
// pickValidFileInfo - picks one valid FileInfo content and returns from a
// slice of FileInfo.
func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {
return findFileInfoInQuorum(ctx, metaArr, modTime, dataDir, quorum)
}
// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.
func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) {
g := errgroup.WithNErrs(len(disks))
// Start writing `xl.meta` to all disks in parallel.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
// Pick one FileInfo for a disk at index.
fi := files[index]
fi.Erasure.Index = index + 1
if fi.IsValid() {
return disks[index].WriteMetadata(ctx, bucket, prefix, fi)
}
return errCorruptedFormat
}, index)
}
// Wait for all the routines.
mErrs := g.Wait()
err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum)
return evalDisks(disks, mErrs), err
}
// Returns per object readQuorum and writeQuorum
// readQuorum is the min required disks to read data.
// writeQuorum is the min required disks to write data.
func objectQuorumFromMeta(ctx context.Context, partsMetaData []FileInfo, errs []error, defaultParityCount int) (objectReadQuorum, objectWriteQuorum int, err error) {
// get the latest updated Metadata and a count of all the latest updated FileInfo(s)
latestFileInfo, err := getLatestFileInfo(ctx, partsMetaData, errs)
if err != nil {
return 0, 0, err
}
dataBlocks := latestFileInfo.Erasure.DataBlocks
parityBlocks := globalStorageClass.GetParityForSC(latestFileInfo.Metadata[xhttp.AmzStorageClass])
if parityBlocks <= 0 {
parityBlocks = defaultParityCount
}
writeQuorum := dataBlocks
if dataBlocks == parityBlocks {
writeQuorum++
}
// Since all the valid erasure code meta updated at the same time are equivalent, pass dataBlocks
// from latestFileInfo to get the quorum
return dataBlocks, writeQuorum, nil
}
| cmd/erasure-metadata.go | 1 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.998737633228302,
0.088861383497715,
0.00016628102457616478,
0.00017288663366343826,
0.27450084686279297
] |
{
"id": 4,
"code_window": [
"\t\tparityBlocks = defaultParityCount\n",
"\t}\n",
"\n",
"\twriteQuorum := dataBlocks\n",
"\tif dataBlocks == parityBlocks {\n",
"\t\twriteQuorum++\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdataBlocks := len(partsMetaData) - parityBlocks\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "add",
"edit_start_line_idx": 337
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"io"
"math/rand"
"strconv"
"testing"
"github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/kms"
)
// Return pointer to testOneByteReadEOF{}
func newTestReaderEOF(data []byte) io.Reader {
return &testOneByteReadEOF{false, data}
}
// OneByteReadEOF - implements io.Reader which returns 1 byte along with io.EOF error.
type testOneByteReadEOF struct {
eof bool
data []byte
}
func (r *testOneByteReadEOF) Read(p []byte) (n int, err error) {
if r.eof {
return 0, io.EOF
}
n = copy(p, r.data)
r.eof = true
return n, io.EOF
}
// Return pointer to testOneByteReadNoEOF{}
func newTestReaderNoEOF(data []byte) io.Reader {
return &testOneByteReadNoEOF{false, data}
}
// testOneByteReadNoEOF - implements io.Reader which returns 1 byte and nil error, but
// returns io.EOF on the next Read().
type testOneByteReadNoEOF struct {
eof bool
data []byte
}
func (r *testOneByteReadNoEOF) Read(p []byte) (n int, err error) {
if r.eof {
return 0, io.EOF
}
n = copy(p, r.data)
r.eof = true
return n, nil
}
// Wrapper for calling testMakeBucket for both Erasure and FS.
func TestMakeBucket(t *testing.T) {
ExecObjectLayerTest(t, testMakeBucket)
}
// Tests validate bucket creation.
func testMakeBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(context.Background(), "bucket-unknown", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
}
// Wrapper for calling testMultipartObjectCreation for both Erasure and FS.
func TestMultipartObjectCreation(t *testing.T) {
ExecExtendedObjectLayerTest(t, testMultipartObjectCreation)
}
// Tests validate creation of part files during Multipart operation.
func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) {
var opts ObjectOptions
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
uploadID, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
// Create a byte array of 5MiB.
data := bytes.Repeat([]byte("0123456789abcdef"), 5*humanize.MiByte/16)
completedParts := CompleteMultipartUpload{}
for i := 1; i <= 10; i++ {
expectedETaghex := getMD5Hash(data)
var calcPartInfo PartInfo
calcPartInfo, err = obj.PutObjectPart(context.Background(), "bucket", "key", uploadID, i, mustGetPutObjReader(t, bytes.NewBuffer(data), int64(len(data)), expectedETaghex, ""), opts)
if err != nil {
t.Errorf("%s: <ERROR> %s", instanceType, err)
}
if calcPartInfo.ETag != expectedETaghex {
t.Errorf("MD5 Mismatch")
}
completedParts.Parts = append(completedParts.Parts, CompletePart{
PartNumber: i,
ETag: calcPartInfo.ETag,
})
}
objInfo, err := obj.CompleteMultipartUpload(context.Background(), "bucket", "key", uploadID, completedParts.Parts, ObjectOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if objInfo.ETag != "7d364cb728ce42a74a96d22949beefb2-10" {
t.Errorf("Md5 mismtch")
}
}
// Wrapper for calling testMultipartObjectAbort for both Erasure and FS.
func TestMultipartObjectAbort(t *testing.T) {
ExecObjectLayerTest(t, testMultipartObjectAbort)
}
// Tests validate abortion of Multipart operation.
func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHandler) {
var opts ObjectOptions
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
uploadID, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
parts := make(map[int]string)
metadata := make(map[string]string)
for i := 1; i <= 10; i++ {
randomPerm := rand.Perm(10)
randomString := ""
for _, num := range randomPerm {
randomString = randomString + strconv.Itoa(num)
}
expectedETaghex := getMD5Hash([]byte(randomString))
metadata["md5"] = expectedETaghex
var calcPartInfo PartInfo
calcPartInfo, err = obj.PutObjectPart(context.Background(), "bucket", "key", uploadID, i, mustGetPutObjReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), expectedETaghex, ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if calcPartInfo.ETag != expectedETaghex {
t.Errorf("Md5 Mismatch")
}
parts[i] = expectedETaghex
}
err = obj.AbortMultipartUpload(context.Background(), "bucket", "key", uploadID, ObjectOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
}
// Wrapper for calling testMultipleObjectCreation for both Erasure and FS.
func TestMultipleObjectCreation(t *testing.T) {
ExecExtendedObjectLayerTest(t, testMultipleObjectCreation)
}
// Tests validate object creation.
func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) {
objects := make(map[string][]byte)
var opts ObjectOptions
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
for i := 0; i < 10; i++ {
randomPerm := rand.Perm(100)
randomString := ""
for _, num := range randomPerm {
randomString = randomString + strconv.Itoa(num)
}
expectedETaghex := getMD5Hash([]byte(randomString))
key := "obj" + strconv.Itoa(i)
objects[key] = []byte(randomString)
metadata := make(map[string]string)
metadata["etag"] = expectedETaghex
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), "bucket", key, mustGetPutObjReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), metadata["etag"], ""), ObjectOptions{UserDefined: metadata})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if objInfo.ETag != expectedETaghex {
t.Errorf("Md5 Mismatch")
}
}
for key, value := range objects {
var byteBuffer bytes.Buffer
err = GetObject(context.Background(), obj, "bucket", key, 0, int64(len(value)), &byteBuffer, "", opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if !bytes.Equal(byteBuffer.Bytes(), value) {
t.Errorf("%s: Mismatch of GetObject data with the expected one.", instanceType)
}
objInfo, err := obj.GetObjectInfo(context.Background(), "bucket", key, opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if objInfo.Size != int64(len(value)) {
t.Errorf("%s: Size mismatch of the GetObject data.", instanceType)
}
}
}
// Wrapper for calling TestPaging for both Erasure and FS.
func TestPaging(t *testing.T) {
ExecObjectLayerTest(t, testPaging)
}
// Tests validate creation of objects and the order of listing using various filters for ListObjects operation.
func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{})
result, err := obj.ListObjects(context.Background(), "bucket", "", "", "", 0)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if len(result.Objects) != 0 {
t.Errorf("%s: Number of objects in the result different from expected value.", instanceType)
}
if result.IsTruncated {
t.Errorf("%s: Expected IsTruncated to be `false`, but instead found it to be `%v`", instanceType, result.IsTruncated)
}
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
var opts ObjectOptions
// check before paging occurs.
for i := 0; i < 5; i++ {
key := "obj" + strconv.Itoa(i)
_, err = obj.PutObject(context.Background(), "bucket", key, mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
result, err = obj.ListObjects(context.Background(), "bucket", "", "", "", 5)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if len(result.Objects) != i+1 {
t.Errorf("%s: Expected length of objects to be %d, instead found to be %d", instanceType, len(result.Objects), i+1)
}
if result.IsTruncated {
t.Errorf("%s: Expected IsTruncated to be `false`, but instead found it to be `%v`", instanceType, result.IsTruncated)
}
}
// check after paging occurs pages work.
for i := 6; i <= 10; i++ {
key := "obj" + strconv.Itoa(i)
_, err = obj.PutObject(context.Background(), "bucket", key, mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
result, err = obj.ListObjects(context.Background(), "bucket", "obj", "", "", 5)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if len(result.Objects) != 5 {
t.Errorf("%s: Expected length of objects to be %d, instead found to be %d", instanceType, 5, len(result.Objects))
}
if !result.IsTruncated {
t.Errorf("%s: Expected IsTruncated to be `true`, but instead found it to be `%v`", instanceType, result.IsTruncated)
}
}
// check paging with prefix at end returns less objects.
{
_, err = obj.PutObject(context.Background(), "bucket", "newPrefix", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
_, err = obj.PutObject(context.Background(), "bucket", "newPrefix2", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
result, err = obj.ListObjects(context.Background(), "bucket", "new", "", "", 5)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if len(result.Objects) != 2 {
t.Errorf("%s: Expected length of objects to be %d, instead found to be %d", instanceType, 2, len(result.Objects))
}
}
// check ordering of pages.
{
result, err = obj.ListObjects(context.Background(), "bucket", "", "", "", 1000)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if result.Objects[0].Name != "newPrefix" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix", result.Objects[0].Name)
}
if result.Objects[1].Name != "newPrefix2" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix", result.Objects[1].Name)
}
if result.Objects[2].Name != "obj0" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix", result.Objects[2].Name)
}
if result.Objects[3].Name != "obj1" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix", result.Objects[3].Name)
}
if result.Objects[4].Name != "obj10" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix", result.Objects[4].Name)
}
}
// check delimited results with delimiter and prefix.
{
_, err = obj.PutObject(context.Background(), "bucket", "this/is/delimited", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
_, err = obj.PutObject(context.Background(), "bucket", "this/is/also/a/delimited/file", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
result, err = obj.ListObjects(context.Background(), "bucket", "this/is/", "", SlashSeparator, 10)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if len(result.Objects) != 1 {
t.Errorf("%s: Expected the number of objects in the result to be %d, but instead found %d", instanceType, 1, len(result.Objects))
}
if result.Prefixes[0] != "this/is/also/" {
t.Errorf("%s: Expected prefix to be `%s`, but instead found `%s`", instanceType, "this/is/also/", result.Prefixes[0])
}
}
// check delimited results with delimiter without prefix.
{
result, err = obj.ListObjects(context.Background(), "bucket", "", "", SlashSeparator, 1000)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if result.Objects[0].Name != "newPrefix" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix", result.Objects[0].Name)
}
if result.Objects[1].Name != "newPrefix2" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix", result.Objects[1].Name)
}
if result.Objects[2].Name != "obj0" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix", result.Objects[2].Name)
}
if result.Objects[3].Name != "obj1" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix", result.Objects[3].Name)
}
if result.Objects[4].Name != "obj10" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix", result.Objects[4].Name)
}
if result.Prefixes[0] != "this/" {
t.Errorf("%s: Expected the prefix to be `%s`, but instead found `%s`", instanceType, "this/", result.Prefixes[0])
}
}
// check results with Marker.
{
result, err = obj.ListObjects(context.Background(), "bucket", "", "newPrefix", "", 3)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if result.Objects[0].Name != "newPrefix2" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix2", result.Objects[0].Name)
}
if result.Objects[1].Name != "obj0" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "obj0", result.Objects[1].Name)
}
if result.Objects[2].Name != "obj1" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "obj1", result.Objects[2].Name)
}
}
// check ordering of results with prefix.
{
result, err = obj.ListObjects(context.Background(), "bucket", "obj", "", "", 1000)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if result.Objects[0].Name != "obj0" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "obj0", result.Objects[0].Name)
}
if result.Objects[1].Name != "obj1" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "obj1", result.Objects[1].Name)
}
if result.Objects[2].Name != "obj10" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "obj10", result.Objects[2].Name)
}
if result.Objects[3].Name != "obj2" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "obj2", result.Objects[3].Name)
}
if result.Objects[4].Name != "obj3" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "obj3", result.Objects[4].Name)
}
}
// check ordering of results with prefix and no paging.
{
result, err = obj.ListObjects(context.Background(), "bucket", "new", "", "", 5)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if result.Objects[0].Name != "newPrefix" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix", result.Objects[0].Name)
}
if result.Objects[1].Name != "newPrefix2" {
t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix2", result.Objects[0].Name)
}
}
}
// Wrapper for calling testObjectOverwriteWorks for both Erasure and FS.
func TestObjectOverwriteWorks(t *testing.T) {
ExecObjectLayerTest(t, testObjectOverwriteWorks)
}
// Tests validate overwriting of an existing object.
func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
var opts ObjectOptions
uploadContent := "The list of parts was not in ascending order. The parts list must be specified in order by part number."
length := int64(len(uploadContent))
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), length, "", ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
uploadContent = "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
length = int64(len(uploadContent))
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), length, "", ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
var bytesBuffer bytes.Buffer
err = GetObject(context.Background(), obj, "bucket", "object", 0, length, &bytesBuffer, "", opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if bytesBuffer.String() != "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed." {
t.Errorf("%s: Invalid upload ID error mismatch.", instanceType)
}
}
// Wrapper for calling testNonExistantBucketOperations for both Erasure and FS.
func TestNonExistantBucketOperations(t *testing.T) {
ExecObjectLayerTest(t, testNonExistantBucketOperations)
}
// Tests validate that bucket operation on non-existent bucket fails.
func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, t TestErrHandler) {
var opts ObjectOptions
_, err := obj.PutObject(context.Background(), "bucket1", "object", mustGetPutObjReader(t, bytes.NewBufferString("one"), int64(len("one")), "", ""), opts)
if err == nil {
t.Fatal("Expected error but found nil")
}
if err.Error() != "Bucket not found: bucket1" {
t.Errorf("%s: Expected the error msg to be `%s`, but instead found `%s`", instanceType, "Bucket not found: bucket1", err.Error())
}
}
// Wrapper for calling testBucketRecreateFails for both Erasure and FS.
func TestBucketRecreateFails(t *testing.T) {
ExecObjectLayerTest(t, testBucketRecreateFails)
}
// Tests validate that recreation of the bucket fails.
func testBucketRecreateFails(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(context.Background(), "string", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
err = obj.MakeBucketWithLocation(context.Background(), "string", BucketOptions{})
if err == nil {
t.Fatalf("%s: Expected error but found nil.", instanceType)
}
if err.Error() != "Bucket exists: string" {
t.Errorf("%s: Expected the error message to be `%s`, but instead found `%s`", instanceType, "Bucket exists: string", err.Error())
}
}
func enableCompression(t *testing.T, encrypt bool) {
// Enable compression and exec...
globalCompressConfigMu.Lock()
globalCompressConfig.Enabled = true
globalCompressConfig.MimeTypes = nil
globalCompressConfig.Extensions = nil
globalCompressConfig.AllowEncrypted = encrypt
globalCompressConfigMu.Unlock()
if encrypt {
globalAutoEncryption = encrypt
var err error
GlobalKMS, err = kms.Parse("my-minio-key:5lF+0pJM0OWwlQrvK2S/I7W9mO4a6rJJI7wzj7v09cw=")
if err != nil {
t.Fatal(err)
}
}
}
func enableEncrytion(t *testing.T) {
// Exec with default settings...
globalCompressConfigMu.Lock()
globalCompressConfig.Enabled = false
globalCompressConfigMu.Unlock()
globalAutoEncryption = true
var err error
GlobalKMS, err = kms.Parse("my-minio-key:5lF+0pJM0OWwlQrvK2S/I7W9mO4a6rJJI7wzj7v09cw=")
if err != nil {
t.Fatal(err)
}
}
func resetCompressEncryption() {
// Reset...
globalCompressConfigMu.Lock()
globalCompressConfig.Enabled = false
globalCompressConfig.AllowEncrypted = false
globalCompressConfigMu.Unlock()
globalAutoEncryption = false
GlobalKMS = nil
}
func execExtended(t *testing.T, fn func(t *testing.T)) {
// Exec with default settings...
resetCompressEncryption()
t.Run("default", func(t *testing.T) {
fn(t)
})
if testing.Short() {
return
}
t.Run("compressed", func(t *testing.T) {
resetCompressEncryption()
enableCompression(t, false)
fn(t)
})
t.Run("encrypted", func(t *testing.T) {
resetCompressEncryption()
enableEncrytion(t)
fn(t)
})
t.Run("compressed+encrypted", func(t *testing.T) {
resetCompressEncryption()
enableCompression(t, true)
fn(t)
})
}
// ExecExtendedObjectLayerTest will execute the tests with combinations of encrypted & compressed.
// This can be used to test functionality when reading and writing data.
func ExecExtendedObjectLayerTest(t *testing.T, objTest objTestType) {
execExtended(t, func(t *testing.T) {
ExecObjectLayerTest(t, objTest)
})
}
// Wrapper for calling testPutObject for both Erasure and FS.
func TestPutObject(t *testing.T) {
ExecExtendedObjectLayerTest(t, testPutObject)
}
// Tests validate PutObject without prefix.
func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
content := []byte("testcontent")
length := int64(len(content))
readerEOF := newTestReaderEOF(content)
readerNoEOF := newTestReaderNoEOF(content)
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
var bytesBuffer1 bytes.Buffer
var opts ObjectOptions
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetPutObjReader(t, readerEOF, length, "", ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
err = GetObject(context.Background(), obj, "bucket", "object", 0, length, &bytesBuffer1, "", opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if len(bytesBuffer1.Bytes()) != len(content) {
t.Errorf("%s: Expected content length to be `%d`, but instead found `%d`", instanceType, len(content), len(bytesBuffer1.Bytes()))
}
var bytesBuffer2 bytes.Buffer
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetPutObjReader(t, readerNoEOF, length, "", ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
err = GetObject(context.Background(), obj, "bucket", "object", 0, length, &bytesBuffer2, "", opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if len(bytesBuffer2.Bytes()) != len(content) {
t.Errorf("%s: Expected content length to be `%d`, but instead found `%d`", instanceType, len(content), len(bytesBuffer2.Bytes()))
}
}
// Wrapper for calling testPutObjectInSubdir for both Erasure and FS.
func TestPutObjectInSubdir(t *testing.T) {
ExecExtendedObjectLayerTest(t, testPutObjectInSubdir)
}
// Tests validate PutObject with subdirectory prefix.
func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
var opts ObjectOptions
uploadContent := `The specified multipart upload does not exist. The upload ID might be invalid, or the multipart
upload might have been aborted or completed.`
length := int64(len(uploadContent))
_, err = obj.PutObject(context.Background(), "bucket", "dir1/dir2/object", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), length, "", ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
var bytesBuffer bytes.Buffer
err = GetObject(context.Background(), obj, "bucket", "dir1/dir2/object", 0, length, &bytesBuffer, "", opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if len(bytesBuffer.Bytes()) != len(uploadContent) {
t.Errorf("%s: Expected length of downloaded data to be `%d`, but instead found `%d`",
instanceType, len(uploadContent), len(bytesBuffer.Bytes()))
}
}
// Wrapper for calling testListBuckets for both Erasure and FS.
func TestListBuckets(t *testing.T) {
ExecObjectLayerTest(t, testListBuckets)
}
// Tests validate ListBuckets.
func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
// test empty list.
buckets, err := obj.ListBuckets(context.Background())
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if len(buckets) != 0 {
t.Errorf("%s: Expected number of bucket to be `%d`, but instead found `%d`", instanceType, 0, len(buckets))
}
// add one and test exists.
err = obj.MakeBucketWithLocation(context.Background(), "bucket1", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
buckets, err = obj.ListBuckets(context.Background())
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if len(buckets) != 1 {
t.Errorf("%s: Expected number of bucket to be `%d`, but instead found `%d`", instanceType, 1, len(buckets))
}
// add two and test exists.
err = obj.MakeBucketWithLocation(context.Background(), "bucket2", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
buckets, err = obj.ListBuckets(context.Background())
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if len(buckets) != 2 {
t.Errorf("%s: Expected number of bucket to be `%d`, but instead found `%d`", instanceType, 2, len(buckets))
}
// add three and test exists + prefix.
err = obj.MakeBucketWithLocation(context.Background(), "bucket22", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
buckets, err = obj.ListBuckets(context.Background())
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if len(buckets) != 3 {
t.Errorf("%s: Expected number of bucket to be `%d`, but instead found `%d`", instanceType, 3, len(buckets))
}
}
// Wrapper for calling testListBucketsOrder for both Erasure and FS.
func TestListBucketsOrder(t *testing.T) {
ExecObjectLayerTest(t, testListBucketsOrder)
}
// Tests validate the order of result of ListBuckets.
func testListBucketsOrder(obj ObjectLayer, instanceType string, t TestErrHandler) {
// if implementation contains a map, order of map keys will vary.
// this ensures they return in the same order each time.
// add one and test exists.
err := obj.MakeBucketWithLocation(context.Background(), "bucket1", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
err = obj.MakeBucketWithLocation(context.Background(), "bucket2", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
buckets, err := obj.ListBuckets(context.Background())
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if len(buckets) != 2 {
t.Errorf("%s: Expected number of bucket to be `%d`, but instead found `%d`", instanceType, 2, len(buckets))
}
if buckets[0].Name != "bucket1" {
t.Errorf("%s: Expected bucket name to be `%s`, but instead found `%s`", instanceType, "bucket1", buckets[0].Name)
}
if buckets[1].Name != "bucket2" {
t.Errorf("%s: Expected bucket name to be `%s`, but instead found `%s`", instanceType, "bucket2", buckets[1].Name)
}
}
// Wrapper for calling testListObjectsTestsForNonExistantBucket for both Erasure and FS.
func TestListObjectsTestsForNonExistantBucket(t *testing.T) {
ExecObjectLayerTest(t, testListObjectsTestsForNonExistantBucket)
}
// Tests validate that ListObjects operation on a non-existent bucket fails as expected.
func testListObjectsTestsForNonExistantBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
result, err := obj.ListObjects(context.Background(), "bucket", "", "", "", 1000)
if err == nil {
t.Fatalf("%s: Expected error but found nil.", instanceType)
}
if len(result.Objects) != 0 {
t.Fatalf("%s: Expected number of objects in the result to be `%d`, but instead found `%d`", instanceType, 0, len(result.Objects))
}
if result.IsTruncated {
t.Fatalf("%s: Expected IsTruncated to be `false`, but instead found it to be `%v`", instanceType, result.IsTruncated)
}
if err.Error() != "Bucket not found: bucket" {
t.Errorf("%s: Expected the error msg to be `%s`, but instead found `%s`", instanceType, "Bucket not found: bucket", err.Error())
}
}
// Wrapper for calling testNonExistantObjectInBucket for both Erasure and FS.
func TestNonExistantObjectInBucket(t *testing.T) {
ExecObjectLayerTest(t, testNonExistantObjectInBucket)
}
// Tests validate that GetObject fails on a non-existent bucket as expected.
func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
_, err = obj.GetObjectInfo(context.Background(), "bucket", "dir1", ObjectOptions{})
if err == nil {
t.Fatalf("%s: Expected error but found nil", instanceType)
}
if isErrObjectNotFound(err) {
if err.Error() != "Object not found: bucket/dir1" {
t.Errorf("%s: Expected the Error message to be `%s`, but instead found `%s`", instanceType, "Object not found: bucket/dir1", err.Error())
}
} else {
if err.Error() != "fails" {
t.Errorf("%s: Expected the Error message to be `%s`, but instead found it to be `%s`", instanceType, "fails", err.Error())
}
}
}
// Wrapper for calling testGetDirectoryReturnsObjectNotFound for both Erasure and FS.
func TestGetDirectoryReturnsObjectNotFound(t *testing.T) {
ExecObjectLayerTest(t, testGetDirectoryReturnsObjectNotFound)
}
// Tests validate that GetObject on an existing directory fails as expected.
func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string, t TestErrHandler) {
bucketName := "bucket"
err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
content := "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."
length := int64(len(content))
var opts ObjectOptions
_, err = obj.PutObject(context.Background(), bucketName, "dir1/dir3/object", mustGetPutObjReader(t, bytes.NewBufferString(content), length, "", ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
testCases := []struct {
dir string
err error
}{
{
dir: "dir1/",
err: ObjectNotFound{Bucket: bucketName, Object: "dir1/"},
},
{
dir: "dir1/dir3/",
err: ObjectNotFound{Bucket: bucketName, Object: "dir1/dir3/"},
},
}
for i, testCase := range testCases {
_, expectedErr := obj.GetObjectInfo(context.Background(), bucketName, testCase.dir, opts)
if expectedErr != nil && expectedErr.Error() != testCase.err.Error() {
t.Errorf("Test %d, %s: Expected error %s, got %s", i+1, instanceType, testCase.err, expectedErr)
}
}
}
// Wrapper for calling testContentType for both Erasure and FS.
func TestContentType(t *testing.T) {
ExecObjectLayerTest(t, testContentType)
}
// Test content-type.
func testContentType(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{})
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
var opts ObjectOptions
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
// Test empty.
_, err = obj.PutObject(context.Background(), "bucket", "minio.png", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
objInfo, err := obj.GetObjectInfo(context.Background(), "bucket", "minio.png", opts)
if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err)
}
if objInfo.ContentType != "image/png" {
t.Errorf("%s: Expected Content type to be `%s`, but instead found `%s`", instanceType, "image/png", objInfo.ContentType)
}
}
| cmd/object_api_suite_test.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0005251636612229049,
0.00017947834567166865,
0.00016389452503062785,
0.00017099046090152115,
0.000052857361879432574
] |
{
"id": 4,
"code_window": [
"\t\tparityBlocks = defaultParityCount\n",
"\t}\n",
"\n",
"\twriteQuorum := dataBlocks\n",
"\tif dataBlocks == parityBlocks {\n",
"\t\twriteQuorum++\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdataBlocks := len(partsMetaData) - parityBlocks\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "add",
"edit_start_line_idx": 337
} | // +build freebsd openbsd netbsd
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import "syscall"
func direntInode(dirent *syscall.Dirent) uint64 {
return uint64(dirent.Fileno)
}
| cmd/os-dirent_fileino.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0001776714198058471,
0.00017460191156715155,
0.00016954987950157374,
0.00017658442084211856,
0.00000359978002961725
] |
{
"id": 4,
"code_window": [
"\t\tparityBlocks = defaultParityCount\n",
"\t}\n",
"\n",
"\twriteQuorum := dataBlocks\n",
"\tif dataBlocks == parityBlocks {\n",
"\t\twriteQuorum++\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdataBlocks := len(partsMetaData) - parityBlocks\n"
],
"file_path": "cmd/erasure-metadata.go",
"type": "add",
"edit_start_line_idx": 337
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package crypto
import (
"encoding/base64"
"net/http"
"sort"
"testing"
xhttp "github.com/minio/minio/cmd/http"
)
func TestIsRequested(t *testing.T) {
for i, test := range kmsIsRequestedTests {
_, got := IsRequested(test.Header)
got = got && S3KMS.IsRequested(test.Header)
if got != test.Expected {
t.Errorf("SSE-KMS: Test %d: Wanted %v but got %v", i, test.Expected, got)
}
}
for i, test := range s3IsRequestedTests {
_, got := IsRequested(test.Header)
got = got && S3.IsRequested(test.Header)
if got != test.Expected {
t.Errorf("SSE-S3: Test %d: Wanted %v but got %v", i, test.Expected, got)
}
}
for i, test := range ssecIsRequestedTests {
_, got := IsRequested(test.Header)
got = got && SSEC.IsRequested(test.Header)
if got != test.Expected {
t.Errorf("SSE-C: Test %d: Wanted %v but got %v", i, test.Expected, got)
}
}
}
var kmsIsRequestedTests = []struct {
Header http.Header
Expected bool
}{
{Header: http.Header{}, Expected: false}, // 0
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"aws:kms"}}, Expected: true}, // 1
{Header: http.Header{"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"0839-9047947-844842874-481"}}, Expected: true}, // 2
{Header: http.Header{"X-Amz-Server-Side-Encryption-Context": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
{
Header: http.Header{
"X-Amz-Server-Side-Encryption": []string{""},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{""},
"X-Amz-Server-Side-Encryption-Context": []string{""},
},
Expected: true,
}, // 4
{
Header: http.Header{
"X-Amz-Server-Side-Encryption": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{""},
},
Expected: true,
}, // 5
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES256"}}, Expected: false}, // 6
}
func TestKMSIsRequested(t *testing.T) {
for i, test := range kmsIsRequestedTests {
if got := S3KMS.IsRequested(test.Header); got != test.Expected {
t.Errorf("Test %d: Wanted %v but got %v", i, test.Expected, got)
}
}
}
var kmsParseHTTPTests = []struct {
Header http.Header
ShouldFail bool
}{
{Header: http.Header{}, ShouldFail: true}, // 0
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"aws:kms"}}, ShouldFail: false}, // 1
{Header: http.Header{
"X-Amz-Server-Side-Encryption": []string{"aws:kms"},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"s3-007-293847485-724784"},
}, ShouldFail: false}, // 2
{Header: http.Header{
"X-Amz-Server-Side-Encryption": []string{"aws:kms"},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"s3-007-293847485-724784"},
"X-Amz-Server-Side-Encryption-Context": []string{base64.StdEncoding.EncodeToString([]byte("{}"))},
}, ShouldFail: false}, // 3
{Header: http.Header{
"X-Amz-Server-Side-Encryption": []string{"aws:kms"},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"s3-007-293847485-724784"},
"X-Amz-Server-Side-Encryption-Context": []string{base64.StdEncoding.EncodeToString([]byte(`{"bucket": "some-bucket"}`))},
}, ShouldFail: false}, // 4
{Header: http.Header{
"X-Amz-Server-Side-Encryption": []string{"aws:kms"},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"s3-007-293847485-724784"},
"X-Amz-Server-Side-Encryption-Context": []string{base64.StdEncoding.EncodeToString([]byte(`{"bucket": "some-bucket"}`))},
}, ShouldFail: false}, // 5
{Header: http.Header{
"X-Amz-Server-Side-Encryption": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"s3-007-293847485-724784"},
"X-Amz-Server-Side-Encryption-Context": []string{base64.StdEncoding.EncodeToString([]byte(`{"bucket": "some-bucket"}`))},
}, ShouldFail: true}, // 6
{Header: http.Header{
"X-Amz-Server-Side-Encryption": []string{"aws:kms"},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": []string{"s3-007-293847485-724784"},
"X-Amz-Server-Side-Encryption-Context": []string{base64.StdEncoding.EncodeToString([]byte(`{"bucket": "some-bucket"`))}, // invalid JSON
}, ShouldFail: true}, // 7
}
func TestKMSParseHTTP(t *testing.T) {
for i, test := range kmsParseHTTPTests {
_, _, err := S3KMS.ParseHTTP(test.Header)
if err == nil && test.ShouldFail {
t.Errorf("Test %d: should fail but succeeded", i)
}
if err != nil && !test.ShouldFail {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
}
}
}
var s3IsRequestedTests = []struct {
Header http.Header
Expected bool
}{
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES256"}}, Expected: true}, // 0
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES-256"}}, Expected: true}, // 1
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{""}}, Expected: true}, // 2
{Header: http.Header{"X-Amz-Server-Side-Encryptio": []string{"AES256"}}, Expected: false}, // 3
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{xhttp.AmzEncryptionKMS}}, Expected: false}, // 4
}
func TestS3IsRequested(t *testing.T) {
for i, test := range s3IsRequestedTests {
if got := S3.IsRequested(test.Header); got != test.Expected {
t.Errorf("Test %d: Wanted %v but got %v", i, test.Expected, got)
}
}
}
var s3ParseTests = []struct {
Header http.Header
ExpectedErr error
}{
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES256"}}, ExpectedErr: nil}, // 0
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{"AES-256"}}, ExpectedErr: ErrInvalidEncryptionMethod}, // 1
{Header: http.Header{"X-Amz-Server-Side-Encryption": []string{""}}, ExpectedErr: ErrInvalidEncryptionMethod}, // 2
{Header: http.Header{"X-Amz-Server-Side-Encryptio": []string{"AES256"}}, ExpectedErr: ErrInvalidEncryptionMethod}, // 3
}
func TestS3Parse(t *testing.T) {
for i, test := range s3ParseTests {
if err := S3.ParseHTTP(test.Header); err != test.ExpectedErr {
t.Errorf("Test %d: Wanted '%v' but got '%v'", i, test.ExpectedErr, err)
}
}
}
var ssecIsRequestedTests = []struct {
Header http.Header
Expected bool
}{
{Header: http.Header{}, Expected: false}, // 0
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"}}, Expected: true}, // 1
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}}, Expected: true}, // 2
{Header: http.Header{"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{""},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{""},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{""},
},
Expected: true,
}, // 4
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Expected: true,
}, // 5
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Expected: false,
}, // 6
}
func TestSSECIsRequested(t *testing.T) {
for i, test := range ssecIsRequestedTests {
if got := SSEC.IsRequested(test.Header); got != test.Expected {
t.Errorf("Test %d: Wanted %v but got %v", i, test.Expected, got)
}
}
}
var ssecCopyIsRequestedTests = []struct {
Header http.Header
Expected bool
}{
{Header: http.Header{}, Expected: false}, // 0
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"}}, Expected: true}, // 1
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}}, Expected: true}, // 2
{Header: http.Header{"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="}}, Expected: true}, // 3
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{""},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{""},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{""},
},
Expected: true,
}, // 4
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Expected: true,
}, // 5
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
Expected: false,
}, // 6
}
func TestSSECopyIsRequested(t *testing.T) {
for i, test := range ssecCopyIsRequestedTests {
if got := SSECopy.IsRequested(test.Header); got != test.Expected {
t.Errorf("Test %d: Wanted %v but got %v", i, test.Expected, got)
}
}
}
var ssecParseTests = []struct {
Header http.Header
ExpectedErr error
}{
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: nil, // 0
},
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES-256"}, // invalid algorithm
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrInvalidCustomerAlgorithm, // 1
},
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{""}, // no client key
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrMissingCustomerKey, // 2
},
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRr.ZXltdXN0cHJvdmlkZWQ="}, // invalid key
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrInvalidCustomerKey, // 3
},
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{""}, // no key MD5
},
ExpectedErr: ErrMissingCustomerKeyMD5, // 4
},
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"DzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, // wrong client key
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrCustomerKeyMD5Mismatch, // 5
},
{
Header: http.Header{
"X-Amz-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": []string{".7PpPLAK26ONlVUGOWlusfg=="}, // wrong key MD5
},
ExpectedErr: ErrCustomerKeyMD5Mismatch, // 6
},
}
func TestSSECParse(t *testing.T) {
var zeroKey [32]byte
for i, test := range ssecParseTests {
key, err := SSEC.ParseHTTP(test.Header)
if err != test.ExpectedErr {
t.Errorf("Test %d: want error '%v' but got '%v'", i, test.ExpectedErr, err)
}
if err != nil && key != zeroKey {
t.Errorf("Test %d: parsing failed and client key is not zero key", i)
}
if err == nil && key == zeroKey {
t.Errorf("Test %d: parsed client key is zero key", i)
}
}
}
var ssecCopyParseTests = []struct {
Header http.Header
ExpectedErr error
}{
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: nil, // 0
},
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES-256"}, // invalid algorithm
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrInvalidCustomerAlgorithm, // 1
},
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{""}, // no client key
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrMissingCustomerKey, // 2
},
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRr.ZXltdXN0cHJvdmlkZWQ="}, // invalid key
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrInvalidCustomerKey, // 3
},
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{""}, // no key MD5
},
ExpectedErr: ErrMissingCustomerKeyMD5, // 4
},
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"DzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, // wrong client key
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedErr: ErrCustomerKeyMD5Mismatch, // 5
},
{
Header: http.Header{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": []string{"AES256"},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": []string{".7PpPLAK26ONlVUGOWlusfg=="}, // wrong key MD5
},
ExpectedErr: ErrCustomerKeyMD5Mismatch, // 6
},
}
func TestSSECopyParse(t *testing.T) {
var zeroKey [32]byte
for i, test := range ssecCopyParseTests {
key, err := SSECopy.ParseHTTP(test.Header)
if err != test.ExpectedErr {
t.Errorf("Test %d: want error '%v' but got '%v'", i, test.ExpectedErr, err)
}
if err != nil && key != zeroKey {
t.Errorf("Test %d: parsing failed and client key is not zero key", i)
}
if err == nil && key == zeroKey {
t.Errorf("Test %d: parsed client key is zero key", i)
}
if _, ok := test.Header[xhttp.AmzServerSideEncryptionCustomerKey]; ok {
t.Errorf("Test %d: client key is not removed from HTTP headers after parsing", i)
}
}
}
var removeSensitiveHeadersTests = []struct {
Header, ExpectedHeader http.Header
}{
{
Header: http.Header{
xhttp.AmzServerSideEncryptionCustomerKey: []string{""},
xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{""},
},
ExpectedHeader: http.Header{},
},
{ // Standard SSE-C request headers
Header: http.Header{
xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{xhttp.AmzEncryptionAES},
xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedHeader: http.Header{
xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{xhttp.AmzEncryptionAES},
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
},
{ // Standard SSE-C + SSE-C-copy request headers
Header: http.Header{
xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{xhttp.AmzEncryptionAES},
xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
ExpectedHeader: http.Header{
xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{xhttp.AmzEncryptionAES},
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
},
},
{ // Standard SSE-C + metadata request headers
Header: http.Header{
xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{xhttp.AmzEncryptionAES},
xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
"X-Amz-Meta-Test-1": []string{"Test-1"},
},
ExpectedHeader: http.Header{
xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{xhttp.AmzEncryptionAES},
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
"X-Amz-Meta-Test-1": []string{"Test-1"},
},
},
{ // https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
Header: http.Header{
"X-Amz-Meta-X-Amz-Unencrypted-Content-Md5": []string{"value"},
"X-Amz-Meta-X-Amz-Unencrypted-Content-Length": []string{"value"},
"X-Amz-Meta-Test-1": []string{"Test-1"},
},
ExpectedHeader: http.Header{
"X-Amz-Meta-Test-1": []string{"Test-1"},
},
},
}
func TestRemoveSensitiveHeaders(t *testing.T) {
isEqual := func(x, y http.Header) bool {
if len(x) != len(y) {
return false
}
for k, v := range x {
u, ok := y[k]
if !ok || len(v) != len(u) {
return false
}
sort.Strings(v)
sort.Strings(u)
for j := range v {
if v[j] != u[j] {
return false
}
}
}
return true
}
areKeysEqual := func(h http.Header, metadata map[string]string) bool {
if len(h) != len(metadata) {
return false
}
for k := range h {
if _, ok := metadata[k]; !ok {
return false
}
}
return true
}
for i, test := range removeSensitiveHeadersTests {
metadata := make(map[string]string, len(test.Header))
for k := range test.Header {
metadata[k] = "" // set metadata key - we don't care about the value
}
RemoveSensitiveHeaders(test.Header)
if !isEqual(test.ExpectedHeader, test.Header) {
t.Errorf("Test %d: filtered headers do not match expected headers - got: %v , want: %v", i, test.Header, test.ExpectedHeader)
}
RemoveSensitiveEntries(metadata)
if !areKeysEqual(test.ExpectedHeader, metadata) {
t.Errorf("Test %d: filtered headers do not match expected headers - got: %v , want: %v", i, test.Header, test.ExpectedHeader)
}
}
}
| cmd/crypto/header_test.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0003132300917059183,
0.00017369186389259994,
0.0001643019641051069,
0.00016914636944420636,
0.00002110187415382825
] |
{
"id": 5,
"code_window": [
"\t\treturn fis\n",
"\t}\n",
"\n",
"\ttests := []struct {\n",
"\t\tfis []FileInfo\n",
"\t\tmodTime time.Time\n",
"\t\tdataDir string\n",
"\t\texpectedErr error\n",
"\t}{\n",
"\t\t{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\tfis []FileInfo\n",
"\t\tmodTime time.Time\n",
"\t\tdataDir string\n",
"\t\texpectedErr error\n",
"\t\texpectedQuorum int\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"strconv"
"testing"
"time"
humanize "github.com/dustin/go-humanize"
)
const ActualSize = 1000
// Test FileInfo.AddObjectPart()
func TestAddObjectPart(t *testing.T) {
testCases := []struct {
partNum int
expectedIndex int
}{
{1, 0},
{2, 1},
{4, 2},
{5, 3},
{7, 4},
// Insert part.
{3, 2},
// Replace existing part.
{4, 3},
// Missing part.
{6, -1},
}
// Setup.
fi := newFileInfo("test-object", 8, 8)
fi.Erasure.Index = 1
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Test them.
for _, testCase := range testCases {
if testCase.expectedIndex > -1 {
partNumString := strconv.Itoa(testCase.partNum)
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
}
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
}
}
// Test objectPartIndex(). generates a sample FileInfo data and asserts
// the output of objectPartIndex() with the expected value.
func TestObjectPartIndex(t *testing.T) {
testCases := []struct {
partNum int
expectedIndex int
}{
{2, 1},
{1, 0},
{5, 3},
{4, 2},
{7, 4},
}
// Setup.
fi := newFileInfo("test-object", 8, 8)
fi.Erasure.Index = 1
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Add some parts for testing.
for _, testCase := range testCases {
partNumString := strconv.Itoa(testCase.partNum)
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
}
// Add failure test case.
testCases = append(testCases, struct {
partNum int
expectedIndex int
}{6, -1})
// Test them.
for _, testCase := range testCases {
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
}
}
// Test FileInfo.ObjectToPartOffset().
func TestObjectToPartOffset(t *testing.T) {
// Setup.
fi := newFileInfo("test-object", 8, 8)
fi.Erasure.Index = 1
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Add some parts for testing.
// Total size of all parts is 5,242,899 bytes.
for _, partNum := range []int{1, 2, 4, 5, 7} {
partNumString := strconv.Itoa(partNum)
fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize)
}
testCases := []struct {
offset int64
expectedIndex int
expectedOffset int64
expectedErr error
}{
{0, 0, 0, nil},
{1 * humanize.MiByte, 0, 1 * humanize.MiByte, nil},
{1 + humanize.MiByte, 1, 0, nil},
{2 + humanize.MiByte, 1, 1, nil},
// Its valid for zero sized object.
{-1, 0, -1, nil},
// Max fffset is always (size - 1).
{(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte) - 1, 4, 1048582, nil},
// Error if offset is size.
{(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte), 0, 0, InvalidRange{}},
}
// Test them.
for _, testCase := range testCases {
index, offset, err := fi.ObjectToPartOffset(context.Background(), testCase.offset)
if err != testCase.expectedErr {
t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err)
}
if index != testCase.expectedIndex {
t.Fatalf("%+v: index: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
if offset != testCase.expectedOffset {
t.Fatalf("%+v: offset: expected = %d, got: %d", testCase, testCase.expectedOffset, offset)
}
}
}
func TestFindFileInfoInQuorum(t *testing.T) {
getNFInfo := func(n int, quorum int, t int64, dataDir string) []FileInfo {
fi := newFileInfo("test", 8, 8)
fi.AddObjectPart(1, "etag", 100, 100)
fi.ModTime = time.Unix(t, 0)
fi.DataDir = dataDir
fis := make([]FileInfo, n)
for i := range fis {
fis[i] = fi
fis[i].Erasure.Index = i + 1
quorum--
if quorum == 0 {
break
}
}
return fis
}
tests := []struct {
fis []FileInfo
modTime time.Time
dataDir string
expectedErr error
}{
{
fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"),
modTime: time.Unix(1603863445, 0),
dataDir: "36a21454-a2ca-11eb-bbaa-93a81c686f21",
expectedErr: nil,
},
{
fis: getNFInfo(16, 7, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"),
modTime: time.Unix(1603863445, 0),
dataDir: "36a21454-a2ca-11eb-bbaa-93a81c686f21",
expectedErr: errErasureReadQuorum,
},
}
for _, test := range tests {
test := test
t.Run("", func(t *testing.T) {
_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, 8)
if err != test.expectedErr {
t.Errorf("Expected %s, got %s", test.expectedErr, err)
}
})
}
}
| cmd/erasure-metadata_test.go | 1 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.9983097314834595,
0.14347423613071442,
0.00016481945931445807,
0.00023091719776857644,
0.3471440076828003
] |
{
"id": 5,
"code_window": [
"\t\treturn fis\n",
"\t}\n",
"\n",
"\ttests := []struct {\n",
"\t\tfis []FileInfo\n",
"\t\tmodTime time.Time\n",
"\t\tdataDir string\n",
"\t\texpectedErr error\n",
"\t}{\n",
"\t\t{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\tfis []FileInfo\n",
"\t\tmodTime time.Time\n",
"\t\tdataDir string\n",
"\t\texpectedErr error\n",
"\t\texpectedQuorum int\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 178
} | # MinIO Server Debugging Guide [](https://slack.min.io) [](https://hub.docker.com/r/minio/minio/)
### HTTP Trace
HTTP tracing can be enabled by using [`mc admin trace`](https://github.com/minio/mc/blob/master/docs/minio-admin-complete-guide.md#command-trace---display-minio-server-http-trace) command.
Example:
```sh
minio server /data
```
Default trace is succinct only to indicate the API operations being called and the HTTP response status.
```sh
mc admin trace myminio
```
To trace entire HTTP request
```sh
mc admin trace --verbose myminio
```
To trace entire HTTP request and also internode communication
```sh
mc admin trace --all --verbose myminio
```
### Subnet Health
Subnet Health diagnostics help ensure that the underlying infrastructure that runs MinIO is configured correctly, and is functioning properly. This test is one-shot long running one, that is recommended to be run as soon as the cluster is first provisioned, and each time a failure scenario is encountered. Note that the test incurs majority of the available resources on the system. Care must be taken when using this to debug failure scenario, so as to prevent larger outages. Health tests can be triggered using `mc admin subnet health` command.
Example:
```sh
minio server /data
```
The command takes no flags
```sh
mc admin subnet health myminio
```
The output printed will be of the form
```sh
● Admin Info ... ✔
● CPU ... ✔
● Disk Hardware ... ✔
● Os Info ... ✔
● Mem Info ... ✔
● Process Info ... ✔
● Config ... ✔
● Drive ... ✔
● Net ... ✔
*********************************************************************************
WARNING!!
** THIS FILE MAY CONTAIN SENSITIVE INFORMATION ABOUT YOUR ENVIRONMENT **
** PLEASE INSPECT CONTENTS BEFORE SHARING IT ON ANY PUBLIC FORUM **
*********************************************************************************
mc: Health data saved to dc-11-health_20200321053323.json.gz
```
The gzipped output contains debugging information for your system
| docs/debugging/README.md | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0001709720236249268,
0.0001681713038124144,
0.00016570537991356105,
0.00016828885418362916,
0.0000018635274727785145
] |
{
"id": 5,
"code_window": [
"\t\treturn fis\n",
"\t}\n",
"\n",
"\ttests := []struct {\n",
"\t\tfis []FileInfo\n",
"\t\tmodTime time.Time\n",
"\t\tdataDir string\n",
"\t\texpectedErr error\n",
"\t}{\n",
"\t\t{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\tfis []FileInfo\n",
"\t\tmodTime time.Time\n",
"\t\tdataDir string\n",
"\t\texpectedErr error\n",
"\t\texpectedQuorum int\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package lock
import (
"context"
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/beevik/ntp"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/env"
)
// RetMode - object retention mode.
type RetMode string
const (
// RetGovernance - governance mode.
RetGovernance RetMode = "GOVERNANCE"
// RetCompliance - compliance mode.
RetCompliance RetMode = "COMPLIANCE"
)
// Valid - returns if retention mode is valid
func (r RetMode) Valid() bool {
switch r {
case RetGovernance, RetCompliance:
return true
}
return false
}
func parseRetMode(modeStr string) (mode RetMode) {
switch strings.ToUpper(modeStr) {
case "GOVERNANCE":
mode = RetGovernance
case "COMPLIANCE":
mode = RetCompliance
}
return mode
}
// LegalHoldStatus - object legal hold status.
type LegalHoldStatus string
const (
// LegalHoldOn - legal hold is on.
LegalHoldOn LegalHoldStatus = "ON"
// LegalHoldOff - legal hold is off.
LegalHoldOff LegalHoldStatus = "OFF"
)
// Valid - returns true if legal hold status has valid values
func (l LegalHoldStatus) Valid() bool {
switch l {
case LegalHoldOn, LegalHoldOff:
return true
}
return false
}
func parseLegalHoldStatus(holdStr string) (st LegalHoldStatus) {
switch strings.ToUpper(holdStr) {
case "ON":
st = LegalHoldOn
case "OFF":
st = LegalHoldOff
}
return st
}
// Bypass retention governance header.
const (
AmzObjectLockBypassRetGovernance = "X-Amz-Bypass-Governance-Retention"
AmzObjectLockRetainUntilDate = "X-Amz-Object-Lock-Retain-Until-Date"
AmzObjectLockMode = "X-Amz-Object-Lock-Mode"
AmzObjectLockLegalHold = "X-Amz-Object-Lock-Legal-Hold"
)
var (
// ErrMalformedBucketObjectConfig -indicates that the bucket object lock config is malformed
ErrMalformedBucketObjectConfig = errors.New("invalid bucket object lock config")
// ErrInvalidRetentionDate - indicates that retention date needs to be in ISO 8601 format
ErrInvalidRetentionDate = errors.New("date must be provided in ISO 8601 format")
// ErrPastObjectLockRetainDate - indicates that retention date must be in the future
ErrPastObjectLockRetainDate = errors.New("the retain until date must be in the future")
// ErrUnknownWORMModeDirective - indicates that the retention mode is invalid
ErrUnknownWORMModeDirective = errors.New("unknown WORM mode directive")
// ErrObjectLockMissingContentMD5 - indicates missing Content-MD5 header for put object requests with locking
ErrObjectLockMissingContentMD5 = errors.New("content-MD5 HTTP header is required for Put Object requests with Object Lock parameters")
// ErrObjectLockInvalidHeaders indicates that object lock headers are missing
ErrObjectLockInvalidHeaders = errors.New("x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied")
// ErrMalformedXML - generic error indicating malformed XML
ErrMalformedXML = errors.New("the XML you provided was not well-formed or did not validate against our published schema")
)
const (
ntpServerEnv = "MINIO_NTP_SERVER"
)
var (
ntpServer = env.Get(ntpServerEnv, "")
)
// UTCNowNTP - is similar in functionality to UTCNow()
// but only used when we do not wish to rely on system
// time.
func UTCNowNTP() (time.Time, error) {
// ntp server is disabled
if ntpServer == "" {
return time.Now().UTC(), nil
}
return ntp.Time(ntpServer)
}
// Retention - bucket level retention configuration.
type Retention struct {
Mode RetMode
Validity time.Duration
LockEnabled bool
}
// Retain - check whether given date is retainable by validity time.
func (r Retention) Retain(created time.Time) bool {
t, err := UTCNowNTP()
if err != nil {
logger.LogIf(context.Background(), err)
// Retain
return true
}
return created.Add(r.Validity).After(t)
}
// DefaultRetention - default retention configuration.
type DefaultRetention struct {
XMLName xml.Name `xml:"DefaultRetention"`
Mode RetMode `xml:"Mode"`
Days *uint64 `xml:"Days"`
Years *uint64 `xml:"Years"`
}
// Maximum support retention days and years supported by AWS S3.
const (
// This tested by using `mc lock` command
maximumRetentionDays = 36500
maximumRetentionYears = 100
)
// UnmarshalXML - decodes XML data.
func (dr *DefaultRetention) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Make subtype to avoid recursive UnmarshalXML().
type defaultRetention DefaultRetention
retention := defaultRetention{}
if err := d.DecodeElement(&retention, &start); err != nil {
return err
}
switch retention.Mode {
case RetGovernance, RetCompliance:
default:
return fmt.Errorf("unknown retention mode %v", retention.Mode)
}
if retention.Days == nil && retention.Years == nil {
return fmt.Errorf("either Days or Years must be specified")
}
if retention.Days != nil && retention.Years != nil {
return fmt.Errorf("either Days or Years must be specified, not both")
}
if retention.Days != nil {
if *retention.Days == 0 {
return fmt.Errorf("Default retention period must be a positive integer value for 'Days'")
}
if *retention.Days > maximumRetentionDays {
return fmt.Errorf("Default retention period too large for 'Days' %d", *retention.Days)
}
} else if *retention.Years == 0 {
return fmt.Errorf("Default retention period must be a positive integer value for 'Years'")
} else if *retention.Years > maximumRetentionYears {
return fmt.Errorf("Default retention period too large for 'Years' %d", *retention.Years)
}
*dr = DefaultRetention(retention)
return nil
}
// Config - object lock configuration specified in
// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html
type Config struct {
XMLNS string `xml:"xmlns,attr,omitempty"`
XMLName xml.Name `xml:"ObjectLockConfiguration"`
ObjectLockEnabled string `xml:"ObjectLockEnabled"`
Rule *struct {
DefaultRetention DefaultRetention `xml:"DefaultRetention"`
} `xml:"Rule,omitempty"`
}
// UnmarshalXML - decodes XML data.
func (config *Config) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// Make subtype to avoid recursive UnmarshalXML().
type objectLockConfig Config
parsedConfig := objectLockConfig{}
if err := d.DecodeElement(&parsedConfig, &start); err != nil {
return err
}
if parsedConfig.ObjectLockEnabled != "Enabled" {
return fmt.Errorf("only 'Enabled' value is allowed to ObjectLockEnabled element")
}
*config = Config(parsedConfig)
return nil
}
// ToRetention - convert to Retention type.
func (config *Config) ToRetention() Retention {
r := Retention{
LockEnabled: config.ObjectLockEnabled == "Enabled",
}
if config.Rule != nil {
r.Mode = config.Rule.DefaultRetention.Mode
t, err := UTCNowNTP()
if err != nil {
logger.LogIf(context.Background(), err)
// Do not change any configuration
// upon NTP failure.
return r
}
if config.Rule.DefaultRetention.Days != nil {
r.Validity = t.AddDate(0, 0, int(*config.Rule.DefaultRetention.Days)).Sub(t)
} else {
r.Validity = t.AddDate(int(*config.Rule.DefaultRetention.Years), 0, 0).Sub(t)
}
}
return r
}
// Maximum 4KiB size per object lock config.
const maxObjectLockConfigSize = 1 << 12
// ParseObjectLockConfig parses ObjectLockConfig from xml
func ParseObjectLockConfig(reader io.Reader) (*Config, error) {
config := Config{}
if err := xml.NewDecoder(io.LimitReader(reader, maxObjectLockConfigSize)).Decode(&config); err != nil {
return nil, err
}
return &config, nil
}
// NewObjectLockConfig returns a initialized lock.Config struct
func NewObjectLockConfig() *Config {
return &Config{
ObjectLockEnabled: "Enabled",
}
}
// RetentionDate is a embedded type containing time.Time to unmarshal
// Date in Retention
type RetentionDate struct {
time.Time
}
// UnmarshalXML parses date from Retention and validates date format
func (rDate *RetentionDate) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error {
var dateStr string
err := d.DecodeElement(&dateStr, &startElement)
if err != nil {
return err
}
// While AWS documentation mentions that the date specified
// must be present in ISO 8601 format, in reality they allow
// users to provide RFC 3339 compliant dates.
retDate, err := time.Parse(time.RFC3339, dateStr)
if err != nil {
return ErrInvalidRetentionDate
}
*rDate = RetentionDate{retDate}
return nil
}
// MarshalXML encodes expiration date if it is non-zero and encodes
// empty string otherwise
func (rDate *RetentionDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
if *rDate == (RetentionDate{time.Time{}}) {
return nil
}
return e.EncodeElement(rDate.Format(time.RFC3339), startElement)
}
// ObjectRetention specified in
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html
type ObjectRetention struct {
XMLNS string `xml:"xmlns,attr,omitempty"`
XMLName xml.Name `xml:"Retention"`
Mode RetMode `xml:"Mode,omitempty"`
RetainUntilDate RetentionDate `xml:"RetainUntilDate,omitempty"`
}
// Maximum 4KiB size per object retention config.
const maxObjectRetentionSize = 1 << 12
// ParseObjectRetention constructs ObjectRetention struct from xml input
func ParseObjectRetention(reader io.Reader) (*ObjectRetention, error) {
ret := ObjectRetention{}
if err := xml.NewDecoder(io.LimitReader(reader, maxObjectRetentionSize)).Decode(&ret); err != nil {
return nil, err
}
if ret.Mode != "" && !ret.Mode.Valid() {
return &ret, ErrUnknownWORMModeDirective
}
if ret.Mode.Valid() && ret.RetainUntilDate.IsZero() {
return &ret, ErrMalformedXML
}
if !ret.Mode.Valid() && !ret.RetainUntilDate.IsZero() {
return &ret, ErrMalformedXML
}
t, err := UTCNowNTP()
if err != nil {
logger.LogIf(context.Background(), err)
return &ret, ErrPastObjectLockRetainDate
}
if !ret.RetainUntilDate.IsZero() && ret.RetainUntilDate.Before(t) {
return &ret, ErrPastObjectLockRetainDate
}
return &ret, nil
}
// IsObjectLockRetentionRequested returns true if object lock retention headers are set.
func IsObjectLockRetentionRequested(h http.Header) bool {
if _, ok := h[AmzObjectLockMode]; ok {
return true
}
if _, ok := h[AmzObjectLockRetainUntilDate]; ok {
return true
}
return false
}
// IsObjectLockLegalHoldRequested returns true if object lock legal hold header is set.
func IsObjectLockLegalHoldRequested(h http.Header) bool {
_, ok := h[AmzObjectLockLegalHold]
return ok
}
// IsObjectLockGovernanceBypassSet returns true if object lock governance bypass header is set.
func IsObjectLockGovernanceBypassSet(h http.Header) bool {
return strings.ToLower(h.Get(AmzObjectLockBypassRetGovernance)) == "true"
}
// IsObjectLockRequested returns true if legal hold or object lock retention headers are requested.
func IsObjectLockRequested(h http.Header) bool {
return IsObjectLockLegalHoldRequested(h) || IsObjectLockRetentionRequested(h)
}
// ParseObjectLockRetentionHeaders parses http headers to extract retention mode and retention date
func ParseObjectLockRetentionHeaders(h http.Header) (rmode RetMode, r RetentionDate, err error) {
retMode := h.Get(AmzObjectLockMode)
dateStr := h.Get(AmzObjectLockRetainUntilDate)
if len(retMode) == 0 || len(dateStr) == 0 {
return rmode, r, ErrObjectLockInvalidHeaders
}
rmode = parseRetMode(retMode)
if !rmode.Valid() {
return rmode, r, ErrUnknownWORMModeDirective
}
var retDate time.Time
// While AWS documentation mentions that the date specified
// must be present in ISO 8601 format, in reality they allow
// users to provide RFC 3339 compliant dates.
retDate, err = time.Parse(time.RFC3339, dateStr)
if err != nil {
return rmode, r, ErrInvalidRetentionDate
}
t, err := UTCNowNTP()
if err != nil {
logger.LogIf(context.Background(), err)
return rmode, r, ErrPastObjectLockRetainDate
}
if retDate.Before(t) {
return rmode, r, ErrPastObjectLockRetainDate
}
return rmode, RetentionDate{retDate}, nil
}
// GetObjectRetentionMeta constructs ObjectRetention from metadata
func GetObjectRetentionMeta(meta map[string]string) ObjectRetention {
var mode RetMode
var retainTill RetentionDate
var modeStr, tillStr string
ok := false
modeStr, ok = meta[strings.ToLower(AmzObjectLockMode)]
if !ok {
modeStr, ok = meta[AmzObjectLockMode]
}
if ok {
mode = parseRetMode(modeStr)
} else {
return ObjectRetention{}
}
tillStr, ok = meta[strings.ToLower(AmzObjectLockRetainUntilDate)]
if !ok {
tillStr, ok = meta[AmzObjectLockRetainUntilDate]
}
if ok {
if t, e := time.Parse(time.RFC3339, tillStr); e == nil {
retainTill = RetentionDate{t.UTC()}
}
}
return ObjectRetention{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", Mode: mode, RetainUntilDate: retainTill}
}
// GetObjectLegalHoldMeta constructs ObjectLegalHold from metadata
func GetObjectLegalHoldMeta(meta map[string]string) ObjectLegalHold {
holdStr, ok := meta[strings.ToLower(AmzObjectLockLegalHold)]
if !ok {
holdStr, ok = meta[AmzObjectLockLegalHold]
}
if ok {
return ObjectLegalHold{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", Status: parseLegalHoldStatus(holdStr)}
}
return ObjectLegalHold{}
}
// ParseObjectLockLegalHoldHeaders parses request headers to construct ObjectLegalHold
func ParseObjectLockLegalHoldHeaders(h http.Header) (lhold ObjectLegalHold, err error) {
holdStatus, ok := h[AmzObjectLockLegalHold]
if ok {
lh := parseLegalHoldStatus(holdStatus[0])
if !lh.Valid() {
return lhold, ErrUnknownWORMModeDirective
}
lhold = ObjectLegalHold{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", Status: lh}
}
return lhold, nil
}
// ObjectLegalHold specified in
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLegalHold.html
type ObjectLegalHold struct {
XMLNS string `xml:"xmlns,attr,omitempty"`
XMLName xml.Name `xml:"LegalHold"`
Status LegalHoldStatus `xml:"Status,omitempty"`
}
// UnmarshalXML - decodes XML data.
func (l *ObjectLegalHold) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
switch start.Name.Local {
case "LegalHold", "ObjectLockLegalHold":
default:
return xml.UnmarshalError(fmt.Sprintf("expected element type <LegalHold>/<ObjectLockLegalHold> but have <%s>",
start.Name.Local))
}
for {
// Read tokens from the XML document in a stream.
t, err := d.Token()
if err != nil {
if err == io.EOF {
break
}
return err
}
switch se := t.(type) {
case xml.StartElement:
switch se.Name.Local {
case "Status":
var st LegalHoldStatus
if err = d.DecodeElement(&st, &se); err != nil {
return err
}
l.Status = st
default:
return xml.UnmarshalError(fmt.Sprintf("expected element type <Status> but have <%s>", se.Name.Local))
}
}
}
return nil
}
// IsEmpty returns true if struct is empty
func (l *ObjectLegalHold) IsEmpty() bool {
return !l.Status.Valid()
}
// ParseObjectLegalHold decodes the XML into ObjectLegalHold
func ParseObjectLegalHold(reader io.Reader) (hold *ObjectLegalHold, err error) {
hold = &ObjectLegalHold{}
if err = xml.NewDecoder(reader).Decode(hold); err != nil {
return
}
if !hold.Status.Valid() {
return nil, ErrMalformedXML
}
return
}
// FilterObjectLockMetadata filters object lock metadata if s3:GetObjectRetention permission is denied or if isCopy flag set.
func FilterObjectLockMetadata(metadata map[string]string, filterRetention, filterLegalHold bool) map[string]string {
// Copy on write
dst := metadata
var copied bool
delKey := func(key string) {
if _, ok := metadata[key]; !ok {
return
}
if !copied {
dst = make(map[string]string, len(metadata))
for k, v := range metadata {
dst[k] = v
}
copied = true
}
delete(dst, key)
}
legalHold := GetObjectLegalHoldMeta(metadata)
if !legalHold.Status.Valid() || filterLegalHold {
delKey(AmzObjectLockLegalHold)
}
ret := GetObjectRetentionMeta(metadata)
if !ret.Mode.Valid() || filterRetention {
delKey(AmzObjectLockMode)
delKey(AmzObjectLockRetainUntilDate)
return dst
}
return dst
}
| pkg/bucket/object/lock/lock.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.002599740633741021,
0.0003199958009645343,
0.0001617370726307854,
0.00017281090549658984,
0.000424233207013458
] |
{
"id": 5,
"code_window": [
"\t\treturn fis\n",
"\t}\n",
"\n",
"\ttests := []struct {\n",
"\t\tfis []FileInfo\n",
"\t\tmodTime time.Time\n",
"\t\tdataDir string\n",
"\t\texpectedErr error\n",
"\t}{\n",
"\t\t{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\tfis []FileInfo\n",
"\t\tmodTime time.Time\n",
"\t\tdataDir string\n",
"\t\texpectedErr error\n",
"\t\texpectedQuorum int\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"fmt"
"io"
"github.com/klauspost/reedsolomon"
"github.com/minio/minio/cmd/logger"
)
// getDataBlockLen - get length of data blocks from encoded blocks.
func getDataBlockLen(enBlocks [][]byte, dataBlocks int) int {
size := 0
// Figure out the data block length.
for _, block := range enBlocks[:dataBlocks] {
size += len(block)
}
return size
}
// Writes all the data blocks from encoded blocks until requested
// outSize length. Provides a way to skip bytes until the offset.
func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, dataBlocks int, offset int64, length int64) (int64, error) {
// Offset and out size cannot be negative.
if offset < 0 || length < 0 {
logger.LogIf(ctx, errUnexpected)
return 0, errUnexpected
}
// Do we have enough blocks?
if len(enBlocks) < dataBlocks {
logger.LogIf(ctx, fmt.Errorf("diskBlocks(%d)/dataBlocks(%d) - %w", len(enBlocks), dataBlocks, reedsolomon.ErrTooFewShards))
return 0, reedsolomon.ErrTooFewShards
}
// Do we have enough data?
if int64(getDataBlockLen(enBlocks, dataBlocks)) < length {
logger.LogIf(ctx, fmt.Errorf("getDataBlockLen(enBlocks, dataBlocks)(%d)/length(%d) - %w", getDataBlockLen(enBlocks, dataBlocks), length, reedsolomon.ErrShortData))
return 0, reedsolomon.ErrShortData
}
// Counter to decrement total left to write.
write := length
// Counter to increment total written.
var totalWritten int64
// Write all data blocks to dst.
for _, block := range enBlocks[:dataBlocks] {
// Skip blocks until we have reached our offset.
if offset >= int64(len(block)) {
// Decrement offset.
offset -= int64(len(block))
continue
} else {
// Skip until offset.
block = block[offset:]
// Reset the offset for next iteration to read everything
// from subsequent blocks.
offset = 0
}
// We have written all the blocks, write the last remaining block.
if write < int64(len(block)) {
n, err := io.Copy(dst, bytes.NewReader(block[:write]))
if err != nil {
// The writer will be closed incase of range queries, which will emit ErrClosedPipe.
// The reader pipe might be closed at ListObjects io.EOF ignore it.
if err != io.ErrClosedPipe && err != io.EOF {
logger.LogIf(ctx, err)
}
return 0, err
}
totalWritten += n
break
}
// Copy the block.
n, err := io.Copy(dst, bytes.NewReader(block))
if err != nil {
// The writer will be closed incase of range queries, which will emit ErrClosedPipe.
// The reader pipe might be closed at ListObjects io.EOF ignore it.
if err != io.ErrClosedPipe && err != io.EOF {
logger.LogIf(ctx, err)
}
return 0, err
}
// Decrement output size.
write -= n
// Increment written.
totalWritten += n
}
// Success.
return totalWritten, nil
}
| cmd/erasure-utils.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.000245329225435853,
0.00017605070024728775,
0.0001657319226069376,
0.00016983679961413145,
0.000021198813556111418
] |
{
"id": 6,
"code_window": [
"\t}{\n",
"\t\t{\n",
"\t\t\tfis: getNFInfo(16, 16, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: nil,\n",
"\t\t},\n",
"\t\t{\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfis: getNFInfo(16, 16, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: nil,\n",
"\t\t\texpectedQuorum: 8,\n",
"\t\t},\n",
"\t\t{\n",
"\t\t\tfis: getNFInfo(16, 7, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: errErasureReadQuorum,\n",
"\t\t\texpectedQuorum: 8,\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 184
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"net/http"
"sort"
"time"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/sync/errgroup"
)
const erasureAlgorithm = "rs-vandermonde"
// byObjectPartNumber is a collection satisfying sort.Interface.
type byObjectPartNumber []ObjectPartInfo
func (t byObjectPartNumber) Len() int { return len(t) }
func (t byObjectPartNumber) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t byObjectPartNumber) Less(i, j int) bool { return t[i].Number < t[j].Number }
// AddChecksumInfo adds a checksum of a part.
func (e *ErasureInfo) AddChecksumInfo(ckSumInfo ChecksumInfo) {
for i, sum := range e.Checksums {
if sum.PartNumber == ckSumInfo.PartNumber {
e.Checksums[i] = ckSumInfo
return
}
}
e.Checksums = append(e.Checksums, ckSumInfo)
}
// GetChecksumInfo - get checksum of a part.
func (e ErasureInfo) GetChecksumInfo(partNumber int) (ckSum ChecksumInfo) {
for _, sum := range e.Checksums {
if sum.PartNumber == partNumber {
// Return the checksum
return sum
}
}
return ChecksumInfo{}
}
// ShardFileSize - returns final erasure size from original size.
func (e ErasureInfo) ShardFileSize(totalLength int64) int64 {
if totalLength == 0 {
return 0
}
if totalLength == -1 {
return -1
}
numShards := totalLength / e.BlockSize
lastBlockSize := totalLength % e.BlockSize
lastShardSize := ceilFrac(lastBlockSize, int64(e.DataBlocks))
return numShards*e.ShardSize() + lastShardSize
}
// ShardSize - returns actual shared size from erasure blockSize.
func (e ErasureInfo) ShardSize() int64 {
return ceilFrac(e.BlockSize, int64(e.DataBlocks))
}
// IsValid - tells if erasure info fields are valid.
func (fi FileInfo) IsValid() bool {
if fi.Deleted {
// Delete marker has no data, no need to check
// for erasure coding information
return true
}
dataBlocks := fi.Erasure.DataBlocks
parityBlocks := fi.Erasure.ParityBlocks
correctIndexes := (fi.Erasure.Index > 0 &&
fi.Erasure.Index <= dataBlocks+parityBlocks &&
len(fi.Erasure.Distribution) == (dataBlocks+parityBlocks))
return ((dataBlocks >= parityBlocks) &&
(dataBlocks != 0) && (parityBlocks != 0) &&
correctIndexes)
}
// ToObjectInfo - Converts metadata to object info.
func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
object = decodeDirObject(object)
versionID := fi.VersionID
if (globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket)) && versionID == "" {
versionID = nullVersionID
}
objInfo := ObjectInfo{
IsDir: HasSuffix(object, SlashSeparator),
Bucket: bucket,
Name: object,
VersionID: versionID,
IsLatest: fi.IsLatest,
DeleteMarker: fi.Deleted,
Size: fi.Size,
ModTime: fi.ModTime,
Legacy: fi.XLV1,
ContentType: fi.Metadata["content-type"],
ContentEncoding: fi.Metadata["content-encoding"],
NumVersions: fi.NumVersions,
SuccessorModTime: fi.SuccessorModTime,
}
// Update expires
var (
t time.Time
e error
)
if exp, ok := fi.Metadata["expires"]; ok {
if t, e = time.Parse(http.TimeFormat, exp); e == nil {
objInfo.Expires = t.UTC()
}
}
objInfo.backendType = BackendErasure
// Extract etag from metadata.
objInfo.ETag = extractETag(fi.Metadata)
// Add user tags to the object info
tags := fi.Metadata[xhttp.AmzObjectTagging]
if len(tags) != 0 {
objInfo.UserTags = tags
}
// Add replication status to the object info
objInfo.ReplicationStatus = replication.StatusType(fi.Metadata[xhttp.AmzBucketReplicationStatus])
if fi.Deleted {
objInfo.ReplicationStatus = replication.StatusType(fi.DeleteMarkerReplicationStatus)
}
objInfo.TransitionStatus = fi.TransitionStatus
objInfo.transitionedObjName = fi.TransitionedObjName
objInfo.TransitionTier = fi.TransitionTier
// etag/md5Sum has already been extracted. We need to
// remove to avoid it from appearing as part of
// response headers. e.g, X-Minio-* or X-Amz-*.
// Tags have also been extracted, we remove that as well.
objInfo.UserDefined = cleanMetadata(fi.Metadata)
// All the parts per object.
objInfo.Parts = fi.Parts
// Update storage class
if sc, ok := fi.Metadata[xhttp.AmzStorageClass]; ok {
objInfo.StorageClass = sc
} else {
objInfo.StorageClass = globalMinioDefaultStorageClass
}
objInfo.VersionPurgeStatus = fi.VersionPurgeStatus
// set restore status for transitioned object
restoreHdr, ok := fi.Metadata[xhttp.AmzRestore]
if ok {
if restoreStatus, err := parseRestoreObjStatus(restoreHdr); err == nil {
objInfo.RestoreOngoing = restoreStatus.Ongoing()
objInfo.RestoreExpires, _ = restoreStatus.Expiry()
}
}
// Success.
return objInfo
}
// objectPartIndex - returns the index of matching object part number.
func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
for i, part := range parts {
if partNumber == part.Number {
return i
}
}
return -1
}
// AddObjectPart - add a new object part in order.
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize int64, actualSize int64) {
partInfo := ObjectPartInfo{
Number: partNumber,
ETag: partETag,
Size: partSize,
ActualSize: actualSize,
}
// Update part info if it already exists.
for i, part := range fi.Parts {
if partNumber == part.Number {
fi.Parts[i] = partInfo
return
}
}
// Proceed to include new part info.
fi.Parts = append(fi.Parts, partInfo)
// Parts in FileInfo should be in sorted order by part number.
sort.Sort(byObjectPartNumber(fi.Parts))
}
// ObjectToPartOffset - translate offset of an object to offset of its individual part.
func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) {
if offset == 0 {
// Special case - if offset is 0, then partIndex and partOffset are always 0.
return 0, 0, nil
}
partOffset = offset
// Seek until object offset maps to a particular part offset.
for i, part := range fi.Parts {
partIndex = i
// Offset is smaller than size we have reached the proper part offset.
if partOffset < part.Size {
return partIndex, partOffset, nil
}
// Continue to towards the next part.
partOffset -= part.Size
}
logger.LogIf(ctx, InvalidRange{})
// Offset beyond the size of the object return InvalidRange.
return 0, 0, InvalidRange{}
}
func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {
metaHashes := make([]string, len(metaArr))
h := sha256.New()
for i, meta := range metaArr {
if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir {
for _, part := range meta.Parts {
h.Write([]byte(fmt.Sprintf("part.%d", part.Number)))
}
h.Write([]byte(fmt.Sprintf("%v", meta.Erasure.Distribution)))
// make sure that length of Data is same
h.Write([]byte(fmt.Sprintf("%v", len(meta.Data))))
metaHashes[i] = hex.EncodeToString(h.Sum(nil))
h.Reset()
}
}
metaHashCountMap := make(map[string]int)
for _, hash := range metaHashes {
if hash == "" {
continue
}
metaHashCountMap[hash]++
}
maxHash := ""
maxCount := 0
for hash, count := range metaHashCountMap {
if count > maxCount {
maxCount = count
maxHash = hash
}
}
if maxCount < quorum {
return FileInfo{}, errErasureReadQuorum
}
for i, hash := range metaHashes {
if hash == maxHash {
return metaArr[i], nil
}
}
return FileInfo{}, errErasureReadQuorum
}
// pickValidFileInfo - picks one valid FileInfo content and returns from a
// slice of FileInfo.
func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {
return findFileInfoInQuorum(ctx, metaArr, modTime, dataDir, quorum)
}
// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.
func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) {
g := errgroup.WithNErrs(len(disks))
// Start writing `xl.meta` to all disks in parallel.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
// Pick one FileInfo for a disk at index.
fi := files[index]
fi.Erasure.Index = index + 1
if fi.IsValid() {
return disks[index].WriteMetadata(ctx, bucket, prefix, fi)
}
return errCorruptedFormat
}, index)
}
// Wait for all the routines.
mErrs := g.Wait()
err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum)
return evalDisks(disks, mErrs), err
}
// Returns per object readQuorum and writeQuorum
// readQuorum is the min required disks to read data.
// writeQuorum is the min required disks to write data.
func objectQuorumFromMeta(ctx context.Context, partsMetaData []FileInfo, errs []error, defaultParityCount int) (objectReadQuorum, objectWriteQuorum int, err error) {
// get the latest updated Metadata and a count of all the latest updated FileInfo(s)
latestFileInfo, err := getLatestFileInfo(ctx, partsMetaData, errs)
if err != nil {
return 0, 0, err
}
dataBlocks := latestFileInfo.Erasure.DataBlocks
parityBlocks := globalStorageClass.GetParityForSC(latestFileInfo.Metadata[xhttp.AmzStorageClass])
if parityBlocks <= 0 {
parityBlocks = defaultParityCount
}
writeQuorum := dataBlocks
if dataBlocks == parityBlocks {
writeQuorum++
}
// Since all the valid erasure code meta updated at the same time are equivalent, pass dataBlocks
// from latestFileInfo to get the quorum
return dataBlocks, writeQuorum, nil
}
| cmd/erasure-metadata.go | 1 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.00043765699956566095,
0.00018839535186998546,
0.0001612623018445447,
0.00017071334877982736,
0.000054988246120046824
] |
{
"id": 6,
"code_window": [
"\t}{\n",
"\t\t{\n",
"\t\t\tfis: getNFInfo(16, 16, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: nil,\n",
"\t\t},\n",
"\t\t{\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfis: getNFInfo(16, 16, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: nil,\n",
"\t\t\texpectedQuorum: 8,\n",
"\t\t},\n",
"\t\t{\n",
"\t\t\tfis: getNFInfo(16, 7, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: errErasureReadQuorum,\n",
"\t\t\texpectedQuorum: 8,\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 184
} | checks = ["all", "-ST1005", "-ST1000", "-SA4000", "-SA9004", "-SA1019", "-SA1008", "-U1000", "-ST1003", "-ST1018"]
| browser/staticcheck.conf | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.00024350016610696912,
0.00024350016610696912,
0.00024350016610696912,
0.00024350016610696912,
0
] |
{
"id": 6,
"code_window": [
"\t}{\n",
"\t\t{\n",
"\t\t\tfis: getNFInfo(16, 16, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: nil,\n",
"\t\t},\n",
"\t\t{\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfis: getNFInfo(16, 16, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: nil,\n",
"\t\t\texpectedQuorum: 8,\n",
"\t\t},\n",
"\t\t{\n",
"\t\t\tfis: getNFInfo(16, 7, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: errErasureReadQuorum,\n",
"\t\t\texpectedQuorum: 8,\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 184
} | /*
* MinIO Object Storage (c) 2021 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { connect } from "react-redux"
import InfiniteScroll from "react-infinite-scroller"
import ObjectsList from "./ObjectsList"
import { getFilteredObjects } from "./selectors"
export class ObjectsListContainer extends React.Component {
constructor(props) {
super(props)
this.state = {
page: 1
}
this.loadNextPage = this.loadNextPage.bind(this)
}
componentWillReceiveProps(nextProps) {
if (
nextProps.currentBucket !== this.props.currentBucket ||
nextProps.currentPrefix !== this.props.currentPrefix ||
nextProps.sortBy !== this.props.sortBy ||
nextProps.sortOrder !== this.props.sortOrder
) {
this.setState({
page: 1
})
}
}
componentDidUpdate(prevProps) {
if (this.props.filter !== prevProps.filter) {
this.setState({
page: 1
})
}
}
loadNextPage() {
this.setState(state => {
return { page: state.page + 1 }
})
}
render() {
const { filteredObjects, listLoading } = this.props
const visibleObjects = filteredObjects.slice(0, this.state.page * 100)
return (
<div style={{ position: "relative" }}>
<InfiniteScroll
pageStart={0}
loadMore={this.loadNextPage}
hasMore={filteredObjects.length > visibleObjects.length}
useWindow={true}
initialLoad={false}
>
<ObjectsList objects={visibleObjects} />
</InfiniteScroll>
{listLoading && <div className="loading" />}
</div>
)
}
}
const mapStateToProps = state => {
return {
currentBucket: state.buckets.currentBucket,
currentPrefix: state.objects.currentPrefix,
filteredObjects: getFilteredObjects(state),
filter: state.objects.filter,
sortBy: state.objects.sortBy,
sortOrder: state.objects.sortOrder,
listLoading: state.objects.listLoading
}
}
export default connect(mapStateToProps)(ObjectsListContainer)
| browser/app/js/objects/ObjectsListContainer.js | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0001760741724865511,
0.00017120043048635125,
0.00016150096780620515,
0.0001725481852190569,
0.00000445284149463987
] |
{
"id": 6,
"code_window": [
"\t}{\n",
"\t\t{\n",
"\t\t\tfis: getNFInfo(16, 16, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: nil,\n",
"\t\t},\n",
"\t\t{\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfis: getNFInfo(16, 16, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: nil,\n",
"\t\t\texpectedQuorum: 8,\n",
"\t\t},\n",
"\t\t{\n",
"\t\t\tfis: getNFInfo(16, 7, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: errErasureReadQuorum,\n",
"\t\t\texpectedQuorum: 8,\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 184
} | /*
* MinIO Object Storage (c) 2021 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import { connect } from "react-redux"
import humanize from "humanize"
import * as actionsCommon from "./actions"
export class StorageInfo extends React.Component {
componentWillMount() {
const { fetchStorageInfo } = this.props
fetchStorageInfo()
}
render() {
const { used } = this.props.storageInfo
if (!used || used == 0) {
return <noscript />
}
return (
<div className="feh-used">
<div className="fehu-chart">
<div style={{ width: 0 }} />
</div>
<ul>
<li>
<span>Used: </span>
{humanize.filesize(used)}
</li>
</ul>
</div>
)
}
}
const mapStateToProps = state => {
return {
storageInfo: state.browser.storageInfo
}
}
const mapDispatchToProps = dispatch => {
return {
fetchStorageInfo: () => dispatch(actionsCommon.fetchStorageInfo())
}
}
export default connect(
mapStateToProps,
mapDispatchToProps
)(StorageInfo)
| browser/app/js/browser/StorageInfo.js | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.00017609665519557893,
0.00017019544611684978,
0.00016119988868013024,
0.00017269107047468424,
0.000004828635610465426
] |
{
"id": 7,
"code_window": [
"\t\t},\n",
"\t\t{\n",
"\t\t\tfis: getNFInfo(16, 7, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: errErasureReadQuorum,\n",
"\t\t},\n",
"\t}\n",
"\n",
"\tfor _, test := range tests {\n",
"\t\ttest := test\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfis: getNFInfo(16, 16, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: errErasureReadQuorum,\n",
"\t\t\texpectedQuorum: 0,\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 190
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"net/http"
"sort"
"time"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/sync/errgroup"
)
const erasureAlgorithm = "rs-vandermonde"
// byObjectPartNumber is a collection satisfying sort.Interface.
type byObjectPartNumber []ObjectPartInfo
func (t byObjectPartNumber) Len() int { return len(t) }
func (t byObjectPartNumber) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t byObjectPartNumber) Less(i, j int) bool { return t[i].Number < t[j].Number }
// AddChecksumInfo adds a checksum of a part.
func (e *ErasureInfo) AddChecksumInfo(ckSumInfo ChecksumInfo) {
for i, sum := range e.Checksums {
if sum.PartNumber == ckSumInfo.PartNumber {
e.Checksums[i] = ckSumInfo
return
}
}
e.Checksums = append(e.Checksums, ckSumInfo)
}
// GetChecksumInfo - get checksum of a part.
func (e ErasureInfo) GetChecksumInfo(partNumber int) (ckSum ChecksumInfo) {
for _, sum := range e.Checksums {
if sum.PartNumber == partNumber {
// Return the checksum
return sum
}
}
return ChecksumInfo{}
}
// ShardFileSize - returns final erasure size from original size.
func (e ErasureInfo) ShardFileSize(totalLength int64) int64 {
if totalLength == 0 {
return 0
}
if totalLength == -1 {
return -1
}
numShards := totalLength / e.BlockSize
lastBlockSize := totalLength % e.BlockSize
lastShardSize := ceilFrac(lastBlockSize, int64(e.DataBlocks))
return numShards*e.ShardSize() + lastShardSize
}
// ShardSize - returns actual shared size from erasure blockSize.
func (e ErasureInfo) ShardSize() int64 {
return ceilFrac(e.BlockSize, int64(e.DataBlocks))
}
// IsValid - tells if erasure info fields are valid.
func (fi FileInfo) IsValid() bool {
if fi.Deleted {
// Delete marker has no data, no need to check
// for erasure coding information
return true
}
dataBlocks := fi.Erasure.DataBlocks
parityBlocks := fi.Erasure.ParityBlocks
correctIndexes := (fi.Erasure.Index > 0 &&
fi.Erasure.Index <= dataBlocks+parityBlocks &&
len(fi.Erasure.Distribution) == (dataBlocks+parityBlocks))
return ((dataBlocks >= parityBlocks) &&
(dataBlocks != 0) && (parityBlocks != 0) &&
correctIndexes)
}
// ToObjectInfo - Converts metadata to object info.
func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
object = decodeDirObject(object)
versionID := fi.VersionID
if (globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket)) && versionID == "" {
versionID = nullVersionID
}
objInfo := ObjectInfo{
IsDir: HasSuffix(object, SlashSeparator),
Bucket: bucket,
Name: object,
VersionID: versionID,
IsLatest: fi.IsLatest,
DeleteMarker: fi.Deleted,
Size: fi.Size,
ModTime: fi.ModTime,
Legacy: fi.XLV1,
ContentType: fi.Metadata["content-type"],
ContentEncoding: fi.Metadata["content-encoding"],
NumVersions: fi.NumVersions,
SuccessorModTime: fi.SuccessorModTime,
}
// Update expires
var (
t time.Time
e error
)
if exp, ok := fi.Metadata["expires"]; ok {
if t, e = time.Parse(http.TimeFormat, exp); e == nil {
objInfo.Expires = t.UTC()
}
}
objInfo.backendType = BackendErasure
// Extract etag from metadata.
objInfo.ETag = extractETag(fi.Metadata)
// Add user tags to the object info
tags := fi.Metadata[xhttp.AmzObjectTagging]
if len(tags) != 0 {
objInfo.UserTags = tags
}
// Add replication status to the object info
objInfo.ReplicationStatus = replication.StatusType(fi.Metadata[xhttp.AmzBucketReplicationStatus])
if fi.Deleted {
objInfo.ReplicationStatus = replication.StatusType(fi.DeleteMarkerReplicationStatus)
}
objInfo.TransitionStatus = fi.TransitionStatus
objInfo.transitionedObjName = fi.TransitionedObjName
objInfo.TransitionTier = fi.TransitionTier
// etag/md5Sum has already been extracted. We need to
// remove to avoid it from appearing as part of
// response headers. e.g, X-Minio-* or X-Amz-*.
// Tags have also been extracted, we remove that as well.
objInfo.UserDefined = cleanMetadata(fi.Metadata)
// All the parts per object.
objInfo.Parts = fi.Parts
// Update storage class
if sc, ok := fi.Metadata[xhttp.AmzStorageClass]; ok {
objInfo.StorageClass = sc
} else {
objInfo.StorageClass = globalMinioDefaultStorageClass
}
objInfo.VersionPurgeStatus = fi.VersionPurgeStatus
// set restore status for transitioned object
restoreHdr, ok := fi.Metadata[xhttp.AmzRestore]
if ok {
if restoreStatus, err := parseRestoreObjStatus(restoreHdr); err == nil {
objInfo.RestoreOngoing = restoreStatus.Ongoing()
objInfo.RestoreExpires, _ = restoreStatus.Expiry()
}
}
// Success.
return objInfo
}
// objectPartIndex - returns the index of matching object part number.
func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
for i, part := range parts {
if partNumber == part.Number {
return i
}
}
return -1
}
// AddObjectPart - add a new object part in order.
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize int64, actualSize int64) {
partInfo := ObjectPartInfo{
Number: partNumber,
ETag: partETag,
Size: partSize,
ActualSize: actualSize,
}
// Update part info if it already exists.
for i, part := range fi.Parts {
if partNumber == part.Number {
fi.Parts[i] = partInfo
return
}
}
// Proceed to include new part info.
fi.Parts = append(fi.Parts, partInfo)
// Parts in FileInfo should be in sorted order by part number.
sort.Sort(byObjectPartNumber(fi.Parts))
}
// ObjectToPartOffset - translate offset of an object to offset of its individual part.
func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) {
if offset == 0 {
// Special case - if offset is 0, then partIndex and partOffset are always 0.
return 0, 0, nil
}
partOffset = offset
// Seek until object offset maps to a particular part offset.
for i, part := range fi.Parts {
partIndex = i
// Offset is smaller than size we have reached the proper part offset.
if partOffset < part.Size {
return partIndex, partOffset, nil
}
// Continue to towards the next part.
partOffset -= part.Size
}
logger.LogIf(ctx, InvalidRange{})
// Offset beyond the size of the object return InvalidRange.
return 0, 0, InvalidRange{}
}
func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {
metaHashes := make([]string, len(metaArr))
h := sha256.New()
for i, meta := range metaArr {
if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir {
for _, part := range meta.Parts {
h.Write([]byte(fmt.Sprintf("part.%d", part.Number)))
}
h.Write([]byte(fmt.Sprintf("%v", meta.Erasure.Distribution)))
// make sure that length of Data is same
h.Write([]byte(fmt.Sprintf("%v", len(meta.Data))))
metaHashes[i] = hex.EncodeToString(h.Sum(nil))
h.Reset()
}
}
metaHashCountMap := make(map[string]int)
for _, hash := range metaHashes {
if hash == "" {
continue
}
metaHashCountMap[hash]++
}
maxHash := ""
maxCount := 0
for hash, count := range metaHashCountMap {
if count > maxCount {
maxCount = count
maxHash = hash
}
}
if maxCount < quorum {
return FileInfo{}, errErasureReadQuorum
}
for i, hash := range metaHashes {
if hash == maxHash {
return metaArr[i], nil
}
}
return FileInfo{}, errErasureReadQuorum
}
// pickValidFileInfo - picks one valid FileInfo content and returns from a
// slice of FileInfo.
func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {
return findFileInfoInQuorum(ctx, metaArr, modTime, dataDir, quorum)
}
// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.
func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) {
g := errgroup.WithNErrs(len(disks))
// Start writing `xl.meta` to all disks in parallel.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
// Pick one FileInfo for a disk at index.
fi := files[index]
fi.Erasure.Index = index + 1
if fi.IsValid() {
return disks[index].WriteMetadata(ctx, bucket, prefix, fi)
}
return errCorruptedFormat
}, index)
}
// Wait for all the routines.
mErrs := g.Wait()
err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum)
return evalDisks(disks, mErrs), err
}
// Returns per object readQuorum and writeQuorum
// readQuorum is the min required disks to read data.
// writeQuorum is the min required disks to write data.
func objectQuorumFromMeta(ctx context.Context, partsMetaData []FileInfo, errs []error, defaultParityCount int) (objectReadQuorum, objectWriteQuorum int, err error) {
// get the latest updated Metadata and a count of all the latest updated FileInfo(s)
latestFileInfo, err := getLatestFileInfo(ctx, partsMetaData, errs)
if err != nil {
return 0, 0, err
}
dataBlocks := latestFileInfo.Erasure.DataBlocks
parityBlocks := globalStorageClass.GetParityForSC(latestFileInfo.Metadata[xhttp.AmzStorageClass])
if parityBlocks <= 0 {
parityBlocks = defaultParityCount
}
writeQuorum := dataBlocks
if dataBlocks == parityBlocks {
writeQuorum++
}
// Since all the valid erasure code meta updated at the same time are equivalent, pass dataBlocks
// from latestFileInfo to get the quorum
return dataBlocks, writeQuorum, nil
}
| cmd/erasure-metadata.go | 1 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0032175942324101925,
0.0003418404667172581,
0.0001617831876501441,
0.00017101979756262153,
0.0005746027454733849
] |
{
"id": 7,
"code_window": [
"\t\t},\n",
"\t\t{\n",
"\t\t\tfis: getNFInfo(16, 7, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: errErasureReadQuorum,\n",
"\t\t},\n",
"\t}\n",
"\n",
"\tfor _, test := range tests {\n",
"\t\ttest := test\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfis: getNFInfo(16, 16, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: errErasureReadQuorum,\n",
"\t\t\texpectedQuorum: 0,\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 190
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package condition
import (
"fmt"
"net/http"
"strconv"
)
func toNumericLessThanFuncString(n name, key Key, value int) string {
return fmt.Sprintf("%v:%v:%v", n, key, value)
}
// numericLessThanFunc - String equals function. It checks whether value by Key in given
// values map is in condition values.
// For example,
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
// in value map for Key is in values.
type numericLessThanFunc struct {
k Key
value int
}
// evaluate() - evaluates to check whether value by Key in given values is in
// condition values.
func (f numericLessThanFunc) evaluate(values map[string][]string) bool {
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
if !ok {
requestValue = values[f.k.Name()]
}
if len(requestValue) == 0 {
return false
}
rvInt, err := strconv.Atoi(requestValue[0])
if err != nil {
return false
}
return rvInt < f.value
}
// key() - returns condition key which is used by this condition function.
func (f numericLessThanFunc) key() Key {
return f.k
}
// name() - returns "NumericLessThan" condition name.
func (f numericLessThanFunc) name() name {
return numericLessThan
}
func (f numericLessThanFunc) String() string {
return toNumericLessThanFuncString(numericLessThan, f.k, f.value)
}
// toMap - returns map representation of this function.
func (f numericLessThanFunc) toMap() map[Key]ValueSet {
if !f.k.IsValid() {
return nil
}
values := NewValueSet()
values.Add(NewIntValue(f.value))
return map[Key]ValueSet{
f.k: values,
}
}
// numericLessThanEqualsFunc - String not equals function. It checks whether value by Key in
// given values is NOT in condition values.
// For example,
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
// in value map for Key is NOT in values.
type numericLessThanEqualsFunc struct {
numericLessThanFunc
}
// evaluate() - evaluates to check whether value by Key in given values is NOT in
// condition values.
func (f numericLessThanEqualsFunc) evaluate(values map[string][]string) bool {
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
if !ok {
requestValue = values[f.k.Name()]
}
if len(requestValue) == 0 {
return false
}
rvInt, err := strconv.Atoi(requestValue[0])
if err != nil {
return false
}
return rvInt <= f.value
}
// name() - returns "NumericLessThanEquals" condition name.
func (f numericLessThanEqualsFunc) name() name {
return numericLessThanEquals
}
func (f numericLessThanEqualsFunc) String() string {
return toNumericLessThanFuncString(numericLessThanEquals, f.numericLessThanFunc.k, f.numericLessThanFunc.value)
}
// newNumericLessThanFunc - returns new NumericLessThan function.
func newNumericLessThanFunc(key Key, values ValueSet) (Function, error) {
v, err := valueToInt(numericLessThan, values)
if err != nil {
return nil, err
}
return NewNumericLessThanFunc(key, v)
}
// NewNumericLessThanFunc - returns new NumericLessThan function.
func NewNumericLessThanFunc(key Key, value int) (Function, error) {
return &numericLessThanFunc{key, value}, nil
}
// newNumericLessThanEqualsFunc - returns new NumericLessThanEquals function.
func newNumericLessThanEqualsFunc(key Key, values ValueSet) (Function, error) {
v, err := valueToInt(numericLessThanEquals, values)
if err != nil {
return nil, err
}
return NewNumericLessThanEqualsFunc(key, v)
}
// NewNumericLessThanEqualsFunc - returns new NumericLessThanEquals function.
func NewNumericLessThanEqualsFunc(key Key, value int) (Function, error) {
return &numericLessThanEqualsFunc{numericLessThanFunc{key, value}}, nil
}
| pkg/bucket/policy/condition/numericlessfunc.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0003650226863101125,
0.00018170686962548643,
0.0001610385806998238,
0.00016706192400306463,
0.000048174551920965314
] |
{
"id": 7,
"code_window": [
"\t\t},\n",
"\t\t{\n",
"\t\t\tfis: getNFInfo(16, 7, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: errErasureReadQuorum,\n",
"\t\t},\n",
"\t}\n",
"\n",
"\tfor _, test := range tests {\n",
"\t\ttest := test\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfis: getNFInfo(16, 16, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: errErasureReadQuorum,\n",
"\t\t\texpectedQuorum: 0,\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 190
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package handlers
import (
"context"
"net"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"time"
)
const defaultFlushInterval = time.Duration(100) * time.Millisecond
// Forwarder forwards all incoming HTTP requests to configured transport.
type Forwarder struct {
RoundTripper http.RoundTripper
PassHost bool
Logger func(error)
ErrorHandler func(http.ResponseWriter, *http.Request, error)
// internal variables
rewriter *headerRewriter
}
// NewForwarder creates an instance of Forwarder based on the provided list of configuration options
func NewForwarder(f *Forwarder) *Forwarder {
f.rewriter = &headerRewriter{}
if f.RoundTripper == nil {
f.RoundTripper = http.DefaultTransport
}
return f
}
// ServeHTTP forwards HTTP traffic using the configured transport
func (f *Forwarder) ServeHTTP(w http.ResponseWriter, inReq *http.Request) {
outReq := new(http.Request)
*outReq = *inReq // includes shallow copies of maps, but we handle this in Director
revproxy := httputil.ReverseProxy{
Director: func(req *http.Request) {
f.modifyRequest(req, inReq.URL)
},
Transport: f.RoundTripper,
FlushInterval: defaultFlushInterval,
ErrorHandler: f.customErrHandler,
}
if f.ErrorHandler != nil {
revproxy.ErrorHandler = f.ErrorHandler
}
revproxy.ServeHTTP(w, outReq)
}
// customErrHandler is originally implemented to avoid having the following error
// `http: proxy error: context canceled` printed by Golang
func (f *Forwarder) customErrHandler(w http.ResponseWriter, r *http.Request, err error) {
if f.Logger != nil && err != context.Canceled {
f.Logger(err)
}
w.WriteHeader(http.StatusBadGateway)
}
func (f *Forwarder) getURLFromRequest(req *http.Request) *url.URL {
// If the Request was created by Go via a real HTTP request, RequestURI will
// contain the original query string. If the Request was created in code, RequestURI
// will be empty, and we will use the URL object instead
u := req.URL
if req.RequestURI != "" {
parsedURL, err := url.ParseRequestURI(req.RequestURI)
if err == nil {
u = parsedURL
}
}
return u
}
// copyURL provides update safe copy by avoiding shallow copying User field
func copyURL(i *url.URL) *url.URL {
out := *i
if i.User != nil {
u := *i.User
out.User = &u
}
return &out
}
// Modify the request to handle the target URL
func (f *Forwarder) modifyRequest(outReq *http.Request, target *url.URL) {
outReq.URL = copyURL(outReq.URL)
outReq.URL.Scheme = target.Scheme
outReq.URL.Host = target.Host
u := f.getURLFromRequest(outReq)
outReq.URL.Path = u.Path
outReq.URL.RawPath = u.RawPath
outReq.URL.RawQuery = u.RawQuery
outReq.RequestURI = "" // Outgoing request should not have RequestURI
// Do not pass client Host header unless requested.
if !f.PassHost {
outReq.Host = target.Host
}
// TODO: only supports HTTP 1.1 for now.
outReq.Proto = "HTTP/1.1"
outReq.ProtoMajor = 1
outReq.ProtoMinor = 1
f.rewriter.Rewrite(outReq)
// Disable closeNotify when method GET for http pipelining
if outReq.Method == http.MethodGet {
quietReq := outReq.WithContext(context.Background())
*outReq = *quietReq
}
}
// headerRewriter is responsible for removing hop-by-hop headers and setting forwarding headers
type headerRewriter struct{}
// Clean up IP in case if it is ipv6 address and it has {zone} information in it, like
// "[fe80::d806:a55d:eb1b:49cc%vEthernet (vmxnet3 Ethernet Adapter - Virtual Switch)]:64692"
func ipv6fix(clientIP string) string {
return strings.Split(clientIP, "%")[0]
}
func (rw *headerRewriter) Rewrite(req *http.Request) {
if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
clientIP = ipv6fix(clientIP)
if req.Header.Get(xRealIP) == "" {
req.Header.Set(xRealIP, clientIP)
}
}
xfProto := req.Header.Get(xForwardedProto)
if xfProto == "" {
if req.TLS != nil {
req.Header.Set(xForwardedProto, "https")
} else {
req.Header.Set(xForwardedProto, "http")
}
}
if xfPort := req.Header.Get(xForwardedPort); xfPort == "" {
req.Header.Set(xForwardedPort, forwardedPort(req))
}
if xfHost := req.Header.Get(xForwardedHost); xfHost == "" && req.Host != "" {
req.Header.Set(xForwardedHost, req.Host)
}
}
func forwardedPort(req *http.Request) string {
if req == nil {
return ""
}
if _, port, err := net.SplitHostPort(req.Host); err == nil && port != "" {
return port
}
if req.TLS != nil {
return "443"
}
return "80"
}
| pkg/handlers/forwarder.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0006095320568419993,
0.00019606004934757948,
0.0001625946315471083,
0.000169144754181616,
0.00009813692304305732
] |
{
"id": 7,
"code_window": [
"\t\t},\n",
"\t\t{\n",
"\t\t\tfis: getNFInfo(16, 7, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: errErasureReadQuorum,\n",
"\t\t},\n",
"\t}\n",
"\n",
"\tfor _, test := range tests {\n",
"\t\ttest := test\n"
],
"labels": [
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tfis: getNFInfo(16, 16, 1603863445, \"36a21454-a2ca-11eb-bbaa-93a81c686f21\"),\n",
"\t\t\tmodTime: time.Unix(1603863445, 0),\n",
"\t\t\tdataDir: \"36a21454-a2ca-11eb-bbaa-93a81c686f21\",\n",
"\t\t\texpectedErr: errErasureReadQuorum,\n",
"\t\t\texpectedQuorum: 0,\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 190
} | // +build !windows,!plan9,!solaris
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package lock
import (
"os"
"syscall"
)
// Internal function implements support for both
// blocking and non blocking lock type.
func lockedOpenFile(path string, flag int, perm os.FileMode, lockType int) (*LockedFile, error) {
switch flag {
case syscall.O_RDONLY:
lockType |= syscall.LOCK_SH
case syscall.O_WRONLY:
fallthrough
case syscall.O_RDWR:
fallthrough
case syscall.O_WRONLY | syscall.O_CREAT:
fallthrough
case syscall.O_RDWR | syscall.O_CREAT:
lockType |= syscall.LOCK_EX
default:
return nil, &os.PathError{
Op: "open",
Path: path,
Err: syscall.EINVAL,
}
}
f, err := os.OpenFile(path, flag|syscall.O_SYNC, perm)
if err != nil {
return nil, err
}
if err = syscall.Flock(int(f.Fd()), lockType); err != nil {
f.Close()
if err == syscall.EWOULDBLOCK {
err = ErrAlreadyLocked
}
return nil, err
}
st, err := os.Stat(path)
if err != nil {
f.Close()
return nil, err
}
if st.IsDir() {
f.Close()
return nil, &os.PathError{
Op: "open",
Path: path,
Err: syscall.EISDIR,
}
}
return &LockedFile{File: f}, nil
}
// TryLockedOpenFile - tries a new write lock, functionality
// it is similar to LockedOpenFile with with syscall.LOCK_EX
// mode but along with syscall.LOCK_NB such that the function
// doesn't wait forever but instead returns if it cannot
// acquire a write lock.
func TryLockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
return lockedOpenFile(path, flag, perm, syscall.LOCK_NB)
}
// LockedOpenFile - initializes a new lock and protects
// the file from concurrent access across mount points.
// This implementation doesn't support all the open
// flags and shouldn't be considered as replacement
// for os.OpenFile().
func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
return lockedOpenFile(path, flag, perm, 0)
}
// Open - Call os.OpenFile
func Open(path string, flag int, perm os.FileMode) (*os.File, error) {
return os.OpenFile(path, flag, perm)
}
| pkg/lock/lock_nix.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.00017655985720921308,
0.00016816896095406264,
0.00016116576443891972,
0.0001672371436143294,
0.0000046138684410834685
] |
{
"id": 8,
"code_window": [
"\n",
"\tfor _, test := range tests {\n",
"\t\ttest := test\n",
"\t\tt.Run(\"\", func(t *testing.T) {\n",
"\t\t\t_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, 8)\n",
"\t\t\tif err != test.expectedErr {\n",
"\t\t\t\tt.Errorf(\"Expected %s, got %s\", test.expectedErr, err)\n",
"\t\t\t}\n",
"\t\t})\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, test.expectedQuorum)\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 200
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"net/http"
"sort"
"time"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/sync/errgroup"
)
const erasureAlgorithm = "rs-vandermonde"
// byObjectPartNumber is a collection satisfying sort.Interface.
type byObjectPartNumber []ObjectPartInfo
func (t byObjectPartNumber) Len() int { return len(t) }
func (t byObjectPartNumber) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t byObjectPartNumber) Less(i, j int) bool { return t[i].Number < t[j].Number }
// AddChecksumInfo adds a checksum of a part.
func (e *ErasureInfo) AddChecksumInfo(ckSumInfo ChecksumInfo) {
for i, sum := range e.Checksums {
if sum.PartNumber == ckSumInfo.PartNumber {
e.Checksums[i] = ckSumInfo
return
}
}
e.Checksums = append(e.Checksums, ckSumInfo)
}
// GetChecksumInfo - get checksum of a part.
func (e ErasureInfo) GetChecksumInfo(partNumber int) (ckSum ChecksumInfo) {
for _, sum := range e.Checksums {
if sum.PartNumber == partNumber {
// Return the checksum
return sum
}
}
return ChecksumInfo{}
}
// ShardFileSize - returns final erasure size from original size.
func (e ErasureInfo) ShardFileSize(totalLength int64) int64 {
if totalLength == 0 {
return 0
}
if totalLength == -1 {
return -1
}
numShards := totalLength / e.BlockSize
lastBlockSize := totalLength % e.BlockSize
lastShardSize := ceilFrac(lastBlockSize, int64(e.DataBlocks))
return numShards*e.ShardSize() + lastShardSize
}
// ShardSize - returns actual shared size from erasure blockSize.
func (e ErasureInfo) ShardSize() int64 {
return ceilFrac(e.BlockSize, int64(e.DataBlocks))
}
// IsValid - tells if erasure info fields are valid.
func (fi FileInfo) IsValid() bool {
if fi.Deleted {
// Delete marker has no data, no need to check
// for erasure coding information
return true
}
dataBlocks := fi.Erasure.DataBlocks
parityBlocks := fi.Erasure.ParityBlocks
correctIndexes := (fi.Erasure.Index > 0 &&
fi.Erasure.Index <= dataBlocks+parityBlocks &&
len(fi.Erasure.Distribution) == (dataBlocks+parityBlocks))
return ((dataBlocks >= parityBlocks) &&
(dataBlocks != 0) && (parityBlocks != 0) &&
correctIndexes)
}
// ToObjectInfo - Converts metadata to object info.
func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
object = decodeDirObject(object)
versionID := fi.VersionID
if (globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket)) && versionID == "" {
versionID = nullVersionID
}
objInfo := ObjectInfo{
IsDir: HasSuffix(object, SlashSeparator),
Bucket: bucket,
Name: object,
VersionID: versionID,
IsLatest: fi.IsLatest,
DeleteMarker: fi.Deleted,
Size: fi.Size,
ModTime: fi.ModTime,
Legacy: fi.XLV1,
ContentType: fi.Metadata["content-type"],
ContentEncoding: fi.Metadata["content-encoding"],
NumVersions: fi.NumVersions,
SuccessorModTime: fi.SuccessorModTime,
}
// Update expires
var (
t time.Time
e error
)
if exp, ok := fi.Metadata["expires"]; ok {
if t, e = time.Parse(http.TimeFormat, exp); e == nil {
objInfo.Expires = t.UTC()
}
}
objInfo.backendType = BackendErasure
// Extract etag from metadata.
objInfo.ETag = extractETag(fi.Metadata)
// Add user tags to the object info
tags := fi.Metadata[xhttp.AmzObjectTagging]
if len(tags) != 0 {
objInfo.UserTags = tags
}
// Add replication status to the object info
objInfo.ReplicationStatus = replication.StatusType(fi.Metadata[xhttp.AmzBucketReplicationStatus])
if fi.Deleted {
objInfo.ReplicationStatus = replication.StatusType(fi.DeleteMarkerReplicationStatus)
}
objInfo.TransitionStatus = fi.TransitionStatus
objInfo.transitionedObjName = fi.TransitionedObjName
objInfo.TransitionTier = fi.TransitionTier
// etag/md5Sum has already been extracted. We need to
// remove to avoid it from appearing as part of
// response headers. e.g, X-Minio-* or X-Amz-*.
// Tags have also been extracted, we remove that as well.
objInfo.UserDefined = cleanMetadata(fi.Metadata)
// All the parts per object.
objInfo.Parts = fi.Parts
// Update storage class
if sc, ok := fi.Metadata[xhttp.AmzStorageClass]; ok {
objInfo.StorageClass = sc
} else {
objInfo.StorageClass = globalMinioDefaultStorageClass
}
objInfo.VersionPurgeStatus = fi.VersionPurgeStatus
// set restore status for transitioned object
restoreHdr, ok := fi.Metadata[xhttp.AmzRestore]
if ok {
if restoreStatus, err := parseRestoreObjStatus(restoreHdr); err == nil {
objInfo.RestoreOngoing = restoreStatus.Ongoing()
objInfo.RestoreExpires, _ = restoreStatus.Expiry()
}
}
// Success.
return objInfo
}
// objectPartIndex - returns the index of matching object part number.
func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
for i, part := range parts {
if partNumber == part.Number {
return i
}
}
return -1
}
// AddObjectPart - add a new object part in order.
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize int64, actualSize int64) {
partInfo := ObjectPartInfo{
Number: partNumber,
ETag: partETag,
Size: partSize,
ActualSize: actualSize,
}
// Update part info if it already exists.
for i, part := range fi.Parts {
if partNumber == part.Number {
fi.Parts[i] = partInfo
return
}
}
// Proceed to include new part info.
fi.Parts = append(fi.Parts, partInfo)
// Parts in FileInfo should be in sorted order by part number.
sort.Sort(byObjectPartNumber(fi.Parts))
}
// ObjectToPartOffset - translate offset of an object to offset of its individual part.
func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) {
if offset == 0 {
// Special case - if offset is 0, then partIndex and partOffset are always 0.
return 0, 0, nil
}
partOffset = offset
// Seek until object offset maps to a particular part offset.
for i, part := range fi.Parts {
partIndex = i
// Offset is smaller than size we have reached the proper part offset.
if partOffset < part.Size {
return partIndex, partOffset, nil
}
// Continue to towards the next part.
partOffset -= part.Size
}
logger.LogIf(ctx, InvalidRange{})
// Offset beyond the size of the object return InvalidRange.
return 0, 0, InvalidRange{}
}
func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {
metaHashes := make([]string, len(metaArr))
h := sha256.New()
for i, meta := range metaArr {
if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir {
for _, part := range meta.Parts {
h.Write([]byte(fmt.Sprintf("part.%d", part.Number)))
}
h.Write([]byte(fmt.Sprintf("%v", meta.Erasure.Distribution)))
// make sure that length of Data is same
h.Write([]byte(fmt.Sprintf("%v", len(meta.Data))))
metaHashes[i] = hex.EncodeToString(h.Sum(nil))
h.Reset()
}
}
metaHashCountMap := make(map[string]int)
for _, hash := range metaHashes {
if hash == "" {
continue
}
metaHashCountMap[hash]++
}
maxHash := ""
maxCount := 0
for hash, count := range metaHashCountMap {
if count > maxCount {
maxCount = count
maxHash = hash
}
}
if maxCount < quorum {
return FileInfo{}, errErasureReadQuorum
}
for i, hash := range metaHashes {
if hash == maxHash {
return metaArr[i], nil
}
}
return FileInfo{}, errErasureReadQuorum
}
// pickValidFileInfo - picks one valid FileInfo content and returns from a
// slice of FileInfo.
func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (xmv FileInfo, e error) {
return findFileInfoInQuorum(ctx, metaArr, modTime, dataDir, quorum)
}
// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.
func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) {
g := errgroup.WithNErrs(len(disks))
// Start writing `xl.meta` to all disks in parallel.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
// Pick one FileInfo for a disk at index.
fi := files[index]
fi.Erasure.Index = index + 1
if fi.IsValid() {
return disks[index].WriteMetadata(ctx, bucket, prefix, fi)
}
return errCorruptedFormat
}, index)
}
// Wait for all the routines.
mErrs := g.Wait()
err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum)
return evalDisks(disks, mErrs), err
}
// Returns per object readQuorum and writeQuorum
// readQuorum is the min required disks to read data.
// writeQuorum is the min required disks to write data.
func objectQuorumFromMeta(ctx context.Context, partsMetaData []FileInfo, errs []error, defaultParityCount int) (objectReadQuorum, objectWriteQuorum int, err error) {
// get the latest updated Metadata and a count of all the latest updated FileInfo(s)
latestFileInfo, err := getLatestFileInfo(ctx, partsMetaData, errs)
if err != nil {
return 0, 0, err
}
dataBlocks := latestFileInfo.Erasure.DataBlocks
parityBlocks := globalStorageClass.GetParityForSC(latestFileInfo.Metadata[xhttp.AmzStorageClass])
if parityBlocks <= 0 {
parityBlocks = defaultParityCount
}
writeQuorum := dataBlocks
if dataBlocks == parityBlocks {
writeQuorum++
}
// Since all the valid erasure code meta updated at the same time are equivalent, pass dataBlocks
// from latestFileInfo to get the quorum
return dataBlocks, writeQuorum, nil
}
| cmd/erasure-metadata.go | 1 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0017269374802708626,
0.00023711893300060183,
0.00015921416343189776,
0.0001741126470733434,
0.00026516124489717185
] |
{
"id": 8,
"code_window": [
"\n",
"\tfor _, test := range tests {\n",
"\t\ttest := test\n",
"\t\tt.Run(\"\", func(t *testing.T) {\n",
"\t\t\t_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, 8)\n",
"\t\t\tif err != test.expectedErr {\n",
"\t\t\t\tt.Errorf(\"Expected %s, got %s\", test.expectedErr, err)\n",
"\t\t\t}\n",
"\t\t})\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, test.expectedQuorum)\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 200
} | /*
* MinIO Object Storage (c) 2021 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var webpack = require('webpack')
var path = require('path')
var glob = require('glob-all')
var CopyWebpackPlugin = require('copy-webpack-plugin')
var PurgecssPlugin = require('purgecss-webpack-plugin')
var exports = {
context: __dirname,
mode: 'development',
entry: [
path.resolve(__dirname, 'app/index.js')
],
output: {
path: path.resolve(__dirname, 'dev'),
filename: 'index_bundle.js',
publicPath: '/minio/'
},
module: {
rules: [{
test: /\.js$/,
exclude: /(node_modules|bower_components)/,
use: [{
loader: 'babel-loader',
options: {
presets: ['react', 'es2015']
}
}]
}, {
test: /\.less$/,
use: [{
loader: 'style-loader'
}, {
loader: 'css-loader'
}, {
loader: 'less-loader'
}]
}, {
test: /\.css$/,
use: [{
loader: 'style-loader'
}, {
loader: 'css-loader'
}]
}, {
test: /\.(eot|woff|woff2|ttf|svg|png)/,
use: [{
loader: 'url-loader'
}]
}]
},
node:{
fs:'empty'
},
devServer: {
historyApiFallback: {
index: '/minio/'
},
proxy: {
'/minio/webrpc': {
target: 'http://localhost:9000',
secure: false,
headers: {'Host': "localhost:9000"}
},
'/minio/upload/*': {
target: 'http://localhost:9000',
secure: false
},
'/minio/download/*': {
target: 'http://localhost:9000',
secure: false
},
'/minio/zip': {
target: 'http://localhost:9000',
secure: false
}
}
},
plugins: [
new CopyWebpackPlugin({patterns: [
{from: 'app/css/loader.css'},
{from: 'app/img/browsers/chrome.png'},
{from: 'app/img/browsers/firefox.png'},
{from: 'app/img/browsers/safari.png'},
{from: 'app/img/logo.svg'},
{from: 'app/img/favicon/favicon-16x16.png'},
{from: 'app/img/favicon/favicon-32x32.png'},
{from: 'app/img/favicon/favicon-96x96.png'},
{from: 'app/index.html'}
]}),
new webpack.ContextReplacementPlugin(/moment[\\\/]locale$/, /^\.\/(en)$/),
new PurgecssPlugin({
paths: glob.sync([
path.join(__dirname, 'app/index.html'),
path.join(__dirname, 'app/js/*.js')
])
})
]
}
if (process.env.NODE_ENV === 'dev') {
exports.entry = [
'webpack-dev-server/client?http://localhost:8080',
path.resolve(__dirname, 'app/index.js')
]
}
module.exports = exports
| browser/webpack.config.js | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.00017682519683148712,
0.0001724684698274359,
0.00016476133896503597,
0.00017255971033591777,
0.0000030810954285698244
] |
{
"id": 8,
"code_window": [
"\n",
"\tfor _, test := range tests {\n",
"\t\ttest := test\n",
"\t\tt.Run(\"\", func(t *testing.T) {\n",
"\t\t\t_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, 8)\n",
"\t\t\tif err != test.expectedErr {\n",
"\t\t\t\tt.Errorf(\"Expected %s, got %s\", test.expectedErr, err)\n",
"\t\t\t}\n",
"\t\t})\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, test.expectedQuorum)\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 200
} | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package net
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"net/url"
"path"
"strings"
)
// URL - improved JSON friendly url.URL.
type URL url.URL
// IsEmpty - checks URL is empty or not.
func (u URL) IsEmpty() bool {
return u.String() == ""
}
// String - returns string representation of URL.
func (u URL) String() string {
// if port number 80 and 443, remove for http and https scheme respectively
if u.Host != "" {
host, err := ParseHost(u.Host)
if err != nil {
panic(err)
}
switch {
case u.Scheme == "http" && host.Port == 80:
fallthrough
case u.Scheme == "https" && host.Port == 443:
u.Host = host.Name
}
}
uu := url.URL(u)
return uu.String()
}
// MarshalJSON - converts to JSON string data.
func (u URL) MarshalJSON() ([]byte, error) {
return json.Marshal(u.String())
}
// UnmarshalJSON - parses given data into URL.
func (u *URL) UnmarshalJSON(data []byte) (err error) {
var s string
if err = json.Unmarshal(data, &s); err != nil {
return err
}
// Allow empty string
if s == "" {
*u = URL{}
return nil
}
var ru *URL
if ru, err = ParseURL(s); err != nil {
return err
}
*u = *ru
return nil
}
// ParseHTTPURL - parses a string into HTTP URL, string is
// expected to be of form http:// or https://
func ParseHTTPURL(s string) (u *URL, err error) {
u, err = ParseURL(s)
if err != nil {
return nil, err
}
switch u.Scheme {
default:
return nil, fmt.Errorf("unexpected scheme found %s", u.Scheme)
case "http", "https":
return u, nil
}
}
// ParseURL - parses string into URL.
func ParseURL(s string) (u *URL, err error) {
var uu *url.URL
if uu, err = url.Parse(s); err != nil {
return nil, err
}
if uu.Hostname() == "" {
if uu.Scheme != "" {
return nil, errors.New("scheme appears with empty host")
}
} else {
portStr := uu.Port()
if portStr == "" {
switch uu.Scheme {
case "http":
portStr = "80"
case "https":
portStr = "443"
}
}
if _, err = ParseHost(net.JoinHostPort(uu.Hostname(), portStr)); err != nil {
return nil, err
}
}
// Clean path in the URL.
// Note: path.Clean() is used on purpose because in MS Windows filepath.Clean() converts
// `/` into `\` ie `/foo` becomes `\foo`
if uu.Path != "" {
uu.Path = path.Clean(uu.Path)
}
// path.Clean removes the trailing '/' and converts '//' to '/'.
if strings.HasSuffix(s, "/") && !strings.HasSuffix(uu.Path, "/") {
uu.Path += "/"
}
v := URL(*uu)
u = &v
return u, nil
}
// IsNetworkOrHostDown - if there was a network error or if the host is down.
// expectTimeouts indicates that *context* timeouts are expected and does not
// indicate a downed host. Other timeouts still returns down.
func IsNetworkOrHostDown(err error, expectTimeouts bool) bool {
if err == nil {
return false
}
if errors.Is(err, context.Canceled) {
return false
}
if expectTimeouts && errors.Is(err, context.DeadlineExceeded) {
return false
}
// We need to figure if the error either a timeout
// or a non-temporary error.
urlErr := &url.Error{}
if errors.As(err, &urlErr) {
switch urlErr.Err.(type) {
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
return true
}
}
var e net.Error
if errors.As(err, &e) {
if e.Timeout() {
return true
}
}
// Fallback to other mechanisms.
switch {
case strings.Contains(err.Error(), "Connection closed by foreign host"):
return true
case strings.Contains(err.Error(), "TLS handshake timeout"):
// If error is - tlsHandshakeTimeoutError.
return true
case strings.Contains(err.Error(), "i/o timeout"):
// If error is - tcp timeoutError.
return true
case strings.Contains(err.Error(), "connection timed out"):
// If err is a net.Dial timeout.
return true
case strings.Contains(err.Error(), "connection reset by peer"):
// IF err is a peer reset on a socket.
return true
case strings.Contains(err.Error(), "broken pipe"):
// IF err is a broken pipe on a socket.
return true
case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"):
// Denial errors
return true
}
return false
}
| pkg/net/url.go | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.0049600750207901,
0.0004207949968986213,
0.00016148018767125905,
0.00017237306747119874,
0.001019789488054812
] |
{
"id": 8,
"code_window": [
"\n",
"\tfor _, test := range tests {\n",
"\t\ttest := test\n",
"\t\tt.Run(\"\", func(t *testing.T) {\n",
"\t\t\t_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, 8)\n",
"\t\t\tif err != test.expectedErr {\n",
"\t\t\t\tt.Errorf(\"Expected %s, got %s\", test.expectedErr, err)\n",
"\t\t\t}\n",
"\t\t})\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, test.expectedQuorum)\n"
],
"file_path": "cmd/erasure-metadata_test.go",
"type": "replace",
"edit_start_line_idx": 200
} | Attribution 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution 4.0 International Public License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution 4.0 International Public License ("Public License"). To the
extent this Public License may be interpreted as a contract, You are
granted the Licensed Rights in consideration of Your acceptance of
these terms and conditions, and the Licensor grants You such rights in
consideration of benefits the Licensor receives from making the
Licensed Material available under these terms and conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
d. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
e. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
f. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
g. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
h. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
i. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
j. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
k. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
4. If You Share Adapted Material You produce, the Adapter's
License You apply must not prevent recipients of the Adapted
Material from complying with this Public License.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material; and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
will be considered the “Licensor.” The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the
public licenses.
Creative Commons may be contacted at creativecommons.org.
| docs/LICENSE | 0 | https://github.com/minio/minio/commit/cacdeca8cc8357ecee3d8245707526011df781f3 | [
0.00018073161481879652,
0.00017720511823426932,
0.00016913328727241606,
0.00017757725436240435,
0.000002262369207528536
] |
{
"id": 0,
"code_window": [
" \"wrap.go\",\n",
" ],\n",
" importmap = \"k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters\",\n",
" importpath = \"k8s.io/apiserver/pkg/server/filters\",\n",
" deps = [\n",
" \"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
" \"//staging/src/k8s.io/api/core/v1:go_default_library\",\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/BUILD",
"type": "add",
"edit_start_line_idx": 45
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filters
import (
"errors"
"net/http"
utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
)
// WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown.
func WithWaitGroup(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, wg *utilwaitgroup.SafeWaitGroup) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
if !ok {
// if this happens, the handler chain isn't setup correctly because there is no request info
responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context"))
return
}
if !longRunning(req, requestInfo) {
if err := wg.Add(1); err != nil {
http.Error(w, "apiserver is shutting down.", http.StatusInternalServerError)
return
}
defer wg.Done()
}
handler.ServeHTTP(w, req)
})
}
| staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go | 1 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.023279234766960144,
0.0047929296270012856,
0.00016739747661631554,
0.00017463239782955498,
0.009243153035640717
] |
{
"id": 0,
"code_window": [
" \"wrap.go\",\n",
" ],\n",
" importmap = \"k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters\",\n",
" importpath = \"k8s.io/apiserver/pkg/server/filters\",\n",
" deps = [\n",
" \"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
" \"//staging/src/k8s.io/api/core/v1:go_default_library\",\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/BUILD",
"type": "add",
"edit_start_line_idx": 45
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"fake_example_client.go",
"fake_testtype.go",
],
importmap = "k8s.io/kubernetes/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake",
importpath = "k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/code-generator/_examples/apiserver/apis/example:go_default_library",
"//staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion:go_default_library",
],
)
| staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.009535896591842175,
0.0054425764828920364,
0.0031156970653682947,
0.0036761360242962837,
0.002903443295508623
] |
{
"id": 0,
"code_window": [
" \"wrap.go\",\n",
" ],\n",
" importmap = \"k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters\",\n",
" importpath = \"k8s.io/apiserver/pkg/server/filters\",\n",
" deps = [\n",
" \"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
" \"//staging/src/k8s.io/api/core/v1:go_default_library\",\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/BUILD",
"type": "add",
"edit_start_line_idx": 45
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// resourcequota contains a controller that makes resource quota usage observations
package resourcequota // import "k8s.io/kubernetes/pkg/controller/resourcequota"
| pkg/controller/resourcequota/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.00017432511958759278,
0.00017031870083883405,
0.00016631226753816009,
0.00017031870083883405,
0.0000040064260247163475
] |
{
"id": 0,
"code_window": [
" \"wrap.go\",\n",
" ],\n",
" importmap = \"k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters\",\n",
" importpath = \"k8s.io/apiserver/pkg/server/filters\",\n",
" deps = [\n",
" \"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
" \"//staging/src/k8s.io/api/core/v1:go_default_library\",\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/BUILD",
"type": "add",
"edit_start_line_idx": 45
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gcp
import (
"fmt"
"strings"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = SIGDescribe("[Disruptive]NodeLease", func() {
f := framework.NewDefaultFramework("node-lease-test")
var systemPodsNo int32
var c clientset.Interface
var ns string
var group string
ginkgo.BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
systemPods, err := e2epod.GetPodsInNamespace(c, ns, map[string]string{})
gomega.Expect(err).To(gomega.BeNil())
systemPodsNo = int32(len(systemPods))
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
} else {
group = framework.TestContext.CloudConfig.NodeInstanceGroup
}
})
ginkgo.Describe("NodeLease deletion", func() {
var skipped bool
ginkgo.BeforeEach(func() {
skipped = true
framework.SkipUnlessProviderIs("gce", "gke", "aws")
framework.SkipUnlessNodeCountIsAtLeast(2)
skipped = false
})
ginkgo.AfterEach(func() {
if skipped {
return
}
ginkgo.By("restoring the original node instance group size")
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
// Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs
// right before a test that tries to get logs, for example, we may get unlucky and try to use a
// closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies
// being closed, so we sleep.
//
// TODO(cjcullen) reduce this sleep (#19314)
if framework.ProviderIs("gke") {
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
time.Sleep(5 * time.Minute)
}
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
if err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
framework.Failf("Couldn't restore the original cluster size: %v", err)
}
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health.
ginkgo.By("waiting for system pods to successfully restart")
err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
gomega.Expect(err).To(gomega.BeNil())
})
ginkgo.It("node lease should be deleted when corresponding node is deleted", func() {
leaseClient := c.CoordinationV1().Leases(v1.NamespaceNodeLease)
err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute)
gomega.Expect(err).To(gomega.BeNil())
ginkgo.By("verify node lease exists for every nodes")
originalNodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
framework.ExpectEqual(len(originalNodes.Items), framework.TestContext.CloudConfig.NumNodes)
gomega.Eventually(func() error {
pass := true
for _, node := range originalNodes.Items {
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err)
pass = false
}
}
if pass {
return nil
}
return fmt.Errorf("some node lease is not ready")
}, 1*time.Minute, 5*time.Second).Should(gomega.BeNil())
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
ginkgo.By(fmt.Sprintf("decreasing cluster size to %d", targetNumNodes))
err = framework.ResizeGroup(group, targetNumNodes)
gomega.Expect(err).To(gomega.BeNil())
err = framework.WaitForGroupSize(group, targetNumNodes)
gomega.Expect(err).To(gomega.BeNil())
err = e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute)
gomega.Expect(err).To(gomega.BeNil())
targetNodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
framework.ExpectEqual(len(targetNodes.Items), int(targetNumNodes))
ginkgo.By("verify node lease is deleted for the deleted node")
var deletedNodeName string
for _, originalNode := range originalNodes.Items {
originalNodeName := originalNode.ObjectMeta.Name
for _, targetNode := range targetNodes.Items {
if originalNodeName == targetNode.ObjectMeta.Name {
continue
}
}
deletedNodeName = originalNodeName
break
}
framework.ExpectNotEqual(deletedNodeName, "")
gomega.Eventually(func() error {
if _, err := leaseClient.Get(deletedNodeName, metav1.GetOptions{}); err == nil {
return fmt.Errorf("node lease is not deleted yet for node %q", deletedNodeName)
}
return nil
}, 1*time.Minute, 5*time.Second).Should(gomega.BeNil())
ginkgo.By("verify node leases still exist for remaining nodes")
gomega.Eventually(func() error {
for _, node := range targetNodes.Items {
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
return err
}
}
return nil
}, 1*time.Minute, 5*time.Second).Should(gomega.BeNil())
})
})
})
| test/e2e/cloud/gcp/node_lease.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.0019801906310021877,
0.0002780191134661436,
0.0001646425953367725,
0.00017220484733115882,
0.0004255498934071511
] |
{
"id": 1,
"code_window": [
" \"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/waitgroup:go_default_library\",\n",
" \"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library\",\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library\",\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/BUILD",
"type": "add",
"edit_start_line_idx": 46
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"content_type_test.go",
"cors_test.go",
"maxinflight_test.go",
"timeout_test.go",
],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/filters:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"content_type.go",
"cors.go",
"doc.go",
"longrunning.go",
"maxinflight.go",
"timeout.go",
"waitgroup.go",
"wrap.go",
],
importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters",
importpath = "k8s.io/apiserver/pkg/server/filters",
deps = [
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/waitgroup:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| staging/src/k8s.io/apiserver/pkg/server/filters/BUILD | 1 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.9737652540206909,
0.18949101865291595,
0.00016807098290883005,
0.010451342910528183,
0.3160490095615387
] |
{
"id": 1,
"code_window": [
" \"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/waitgroup:go_default_library\",\n",
" \"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library\",\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library\",\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/BUILD",
"type": "add",
"edit_start_line_idx": 46
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package webhook
import (
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
)
// ErrCallingWebhook is returned for transport-layer errors calling webhooks. It
// represents a failure to talk to the webhook, not the webhook rejecting a
// request.
type ErrCallingWebhook struct {
WebhookName string
Reason error
}
func (e *ErrCallingWebhook) Error() string {
if e.Reason != nil {
return fmt.Sprintf("failed calling webhook %q: %v", e.WebhookName, e.Reason)
}
return fmt.Sprintf("failed calling webhook %q; no further details available", e.WebhookName)
}
// ErrWebhookRejection represents a webhook properly rejecting a request.
type ErrWebhookRejection struct {
Status *apierrors.StatusError
}
func (e *ErrWebhookRejection) Error() string {
return e.Status.Error()
}
| staging/src/k8s.io/apiserver/pkg/util/webhook/error.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.0008347532711923122,
0.00042334868339821696,
0.00016127155686262995,
0.00017703810590319335,
0.0003093634149990976
] |
{
"id": 1,
"code_window": [
" \"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/waitgroup:go_default_library\",\n",
" \"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library\",\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library\",\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/BUILD",
"type": "add",
"edit_start_line_idx": 46
} | // +build linux
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bandwidth
import (
"bufio"
"bytes"
"encoding/hex"
"fmt"
"net"
"strings"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/exec"
"k8s.io/klog"
)
// tcShaper provides an implementation of the Shaper interface on Linux using the 'tc' tool.
// In general, using this requires that the caller posses the NET_CAP_ADMIN capability, though if you
// do this within an container, it only requires the NS_CAPABLE capability for manipulations to that
// container's network namespace.
// Uses the hierarchical token bucket queuing discipline (htb), this requires Linux 2.4.20 or newer
// or a custom kernel with that queuing discipline backported.
type tcShaper struct {
e exec.Interface
iface string
}
// NewTCShaper makes a new tcShaper for the given interface
func NewTCShaper(iface string) Shaper {
shaper := &tcShaper{
e: exec.New(),
iface: iface,
}
return shaper
}
func (t *tcShaper) execAndLog(cmdStr string, args ...string) error {
klog.V(6).Infof("Running: %s %s", cmdStr, strings.Join(args, " "))
cmd := t.e.Command(cmdStr, args...)
out, err := cmd.CombinedOutput()
klog.V(6).Infof("Output from tc: %s", string(out))
return err
}
func (t *tcShaper) nextClassID() (int, error) {
data, err := t.e.Command("tc", "class", "show", "dev", t.iface).CombinedOutput()
if err != nil {
return -1, err
}
scanner := bufio.NewScanner(bytes.NewBuffer(data))
classes := sets.String{}
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
// skip empty lines
if len(line) == 0 {
continue
}
parts := strings.Split(line, " ")
// expected tc line:
// class htb 1:1 root prio 0 rate 1000Kbit ceil 1000Kbit burst 1600b cburst 1600b
if len(parts) != 14 {
return -1, fmt.Errorf("unexpected output from tc: %s (%v)", scanner.Text(), parts)
}
classes.Insert(parts[2])
}
// Make sure it doesn't go forever
for nextClass := 1; nextClass < 10000; nextClass++ {
if !classes.Has(fmt.Sprintf("1:%d", nextClass)) {
return nextClass, nil
}
}
// This should really never happen
return -1, fmt.Errorf("exhausted class space, please try again")
}
// Convert a CIDR from text to a hex representation
// Strips any masked parts of the IP, so 1.2.3.4/16 becomes hex(1.2.0.0)/ffffffff
func hexCIDR(cidr string) (string, error) {
ip, ipnet, err := net.ParseCIDR(cidr)
if err != nil {
return "", err
}
ip = ip.Mask(ipnet.Mask)
hexIP := hex.EncodeToString([]byte(ip))
hexMask := ipnet.Mask.String()
return hexIP + "/" + hexMask, nil
}
// Convert a CIDR from hex representation to text, opposite of the above.
func asciiCIDR(cidr string) (string, error) {
parts := strings.Split(cidr, "/")
if len(parts) != 2 {
return "", fmt.Errorf("unexpected CIDR format: %s", cidr)
}
ipData, err := hex.DecodeString(parts[0])
if err != nil {
return "", err
}
ip := net.IP(ipData)
maskData, err := hex.DecodeString(parts[1])
if err != nil {
return "", err
}
mask := net.IPMask(maskData)
size, _ := mask.Size()
return fmt.Sprintf("%s/%d", ip.String(), size), nil
}
func (t *tcShaper) findCIDRClass(cidr string) (classAndHandleList [][]string, found bool, err error) {
data, err := t.e.Command("tc", "filter", "show", "dev", t.iface).CombinedOutput()
if err != nil {
return classAndHandleList, false, err
}
hex, err := hexCIDR(cidr)
if err != nil {
return classAndHandleList, false, err
}
spec := fmt.Sprintf("match %s", hex)
scanner := bufio.NewScanner(bytes.NewBuffer(data))
filter := ""
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if len(line) == 0 {
continue
}
if strings.HasPrefix(line, "filter") {
filter = line
continue
}
if strings.Contains(line, spec) {
parts := strings.Split(filter, " ")
// expected tc line:
// filter parent 1: protocol ip pref 1 u32 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:1
if len(parts) != 19 {
return classAndHandleList, false, fmt.Errorf("unexpected output from tc: %s %d (%v)", filter, len(parts), parts)
}
resultTmp := []string{parts[18], parts[9]}
classAndHandleList = append(classAndHandleList, resultTmp)
}
}
if len(classAndHandleList) > 0 {
return classAndHandleList, true, nil
}
return classAndHandleList, false, nil
}
func makeKBitString(rsrc *resource.Quantity) string {
return fmt.Sprintf("%dkbit", (rsrc.Value() / 1000))
}
func (t *tcShaper) makeNewClass(rate string) (int, error) {
class, err := t.nextClassID()
if err != nil {
return -1, err
}
if err := t.execAndLog("tc", "class", "add",
"dev", t.iface,
"parent", "1:",
"classid", fmt.Sprintf("1:%d", class),
"htb", "rate", rate); err != nil {
return -1, err
}
return class, nil
}
func (t *tcShaper) Limit(cidr string, upload, download *resource.Quantity) (err error) {
var downloadClass, uploadClass int
if download != nil {
if downloadClass, err = t.makeNewClass(makeKBitString(download)); err != nil {
return err
}
if err := t.execAndLog("tc", "filter", "add",
"dev", t.iface,
"protocol", "ip",
"parent", "1:0",
"prio", "1", "u32",
"match", "ip", "dst", cidr,
"flowid", fmt.Sprintf("1:%d", downloadClass)); err != nil {
return err
}
}
if upload != nil {
if uploadClass, err = t.makeNewClass(makeKBitString(upload)); err != nil {
return err
}
if err := t.execAndLog("tc", "filter", "add",
"dev", t.iface,
"protocol", "ip",
"parent", "1:0",
"prio", "1", "u32",
"match", "ip", "src", cidr,
"flowid", fmt.Sprintf("1:%d", uploadClass)); err != nil {
return err
}
}
return nil
}
// tests to see if an interface exists, if it does, return true and the status line for the interface
// returns false, "", <err> if an error occurs.
func (t *tcShaper) interfaceExists() (bool, string, error) {
data, err := t.e.Command("tc", "qdisc", "show", "dev", t.iface).CombinedOutput()
if err != nil {
return false, "", err
}
value := strings.TrimSpace(string(data))
if len(value) == 0 {
return false, "", nil
}
// Newer versions of tc and/or the kernel return the following instead of nothing:
// qdisc noqueue 0: root refcnt 2
fields := strings.Fields(value)
if len(fields) > 1 && fields[1] == "noqueue" {
return false, "", nil
}
return true, value, nil
}
func (t *tcShaper) ReconcileCIDR(cidr string, upload, download *resource.Quantity) error {
_, found, err := t.findCIDRClass(cidr)
if err != nil {
return err
}
if !found {
return t.Limit(cidr, upload, download)
}
// TODO: actually check bandwidth limits here
return nil
}
func (t *tcShaper) ReconcileInterface() error {
exists, output, err := t.interfaceExists()
if err != nil {
return err
}
if !exists {
klog.V(4).Info("Didn't find bandwidth interface, creating")
return t.initializeInterface()
}
fields := strings.Split(output, " ")
if len(fields) < 12 || fields[1] != "htb" || fields[2] != "1:" {
if err := t.deleteInterface(fields[2]); err != nil {
return err
}
return t.initializeInterface()
}
return nil
}
func (t *tcShaper) initializeInterface() error {
return t.execAndLog("tc", "qdisc", "add", "dev", t.iface, "root", "handle", "1:", "htb", "default", "30")
}
func (t *tcShaper) Reset(cidr string) error {
classAndHandle, found, err := t.findCIDRClass(cidr)
if err != nil {
return err
}
if !found {
return fmt.Errorf("Failed to find cidr: %s on interface: %s", cidr, t.iface)
}
for i := 0; i < len(classAndHandle); i++ {
if err := t.execAndLog("tc", "filter", "del",
"dev", t.iface,
"parent", "1:",
"proto", "ip",
"prio", "1",
"handle", classAndHandle[i][1], "u32"); err != nil {
return err
}
if err := t.execAndLog("tc", "class", "del",
"dev", t.iface,
"parent", "1:",
"classid", classAndHandle[i][0]); err != nil {
return err
}
}
return nil
}
func (t *tcShaper) deleteInterface(class string) error {
return t.execAndLog("tc", "qdisc", "delete", "dev", t.iface, "root", "handle", class)
}
func (t *tcShaper) GetCIDRs() ([]string, error) {
data, err := t.e.Command("tc", "filter", "show", "dev", t.iface).CombinedOutput()
if err != nil {
return nil, err
}
result := []string{}
scanner := bufio.NewScanner(bytes.NewBuffer(data))
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if len(line) == 0 {
continue
}
if strings.Contains(line, "match") {
parts := strings.Split(line, " ")
// expected tc line:
// match <cidr> at <number>
if len(parts) != 4 {
return nil, fmt.Errorf("unexpected output: %v", parts)
}
cidr, err := asciiCIDR(parts[1])
if err != nil {
return nil, err
}
result = append(result, cidr)
}
}
return result, nil
}
| pkg/util/bandwidth/linux.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.0009320288081653416,
0.00019974903261754662,
0.000162226366228424,
0.00017140948330052197,
0.0001339067966910079
] |
{
"id": 1,
"code_window": [
" \"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library\",\n",
" \"//staging/src/k8s.io/apimachinery/pkg/util/waitgroup:go_default_library\",\n",
" \"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library\",\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library\",\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/BUILD",
"type": "add",
"edit_start_line_idx": 46
} | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"io"
"os/exec"
"strings"
)
// getCmd uses the given environment to form the ginkgo command to run tests. It will
// set the stdout/stderr to the given writer.
func getCmd(env Getenver, w io.Writer) *exec.Cmd {
ginkgoArgs := []string{}
// The logic of the parallel env var impacting the skip value necessitates it
// being placed before the rest of the flag resolution.
skip := env.Getenv(skipEnvKey)
switch env.Getenv(parallelEnvKey) {
case "y", "Y", "true":
ginkgoArgs = append(ginkgoArgs, "--p")
if len(skip) == 0 {
skip = serialTestsRegexp
}
}
ginkgoArgs = append(ginkgoArgs, []string{
"--focus=" + env.Getenv(focusEnvKey),
"--skip=" + skip,
"--noColor=true",
}...)
extraArgs := []string{
"--disable-log-dump",
"--repo-root=/kubernetes",
"--provider=" + env.Getenv(providerEnvKey),
"--report-dir=" + env.Getenv(resultsDirEnvKey),
"--kubeconfig=" + env.Getenv(kubeconfigEnvKey),
}
// Extra args handling
sep := " "
if len(env.Getenv(extraArgsSeparaterEnvKey)) > 0 {
sep = env.Getenv(extraArgsSeparaterEnvKey)
}
if len(env.Getenv(extraGinkgoArgsEnvKey)) > 0 {
ginkgoArgs = append(ginkgoArgs, strings.Split(env.Getenv(extraGinkgoArgsEnvKey), sep)...)
}
if len(env.Getenv(extraArgsEnvKey)) > 0 {
fmt.Printf("sep is %q args are %q", sep, env.Getenv(extraArgsEnvKey))
fmt.Println("split", strings.Split(env.Getenv(extraArgsEnvKey), sep))
extraArgs = append(extraArgs, strings.Split(env.Getenv(extraArgsEnvKey), sep)...)
}
if len(env.Getenv(dryRunEnvKey)) > 0 {
ginkgoArgs = append(ginkgoArgs, "--dryRun=true")
}
args := []string{}
args = append(args, ginkgoArgs...)
args = append(args, env.Getenv(testBinEnvKey))
args = append(args, "--")
args = append(args, extraArgs...)
cmd := exec.Command(env.Getenv(ginkgoEnvKey), args...)
cmd.Stdout = w
cmd.Stderr = w
return cmd
}
// cmdInfo generates a useful look at what the command is for printing/debug.
func cmdInfo(cmd *exec.Cmd) string {
return fmt.Sprintf(
`Command env: %v
Run from directory: %v
Executable path: %v
Args (comma-delimited): %v`, cmd.Env, cmd.Dir, cmd.Path, strings.Join(cmd.Args, ","),
)
}
| cluster/images/conformance/go-runner/cmd.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.000177176741999574,
0.00017154417582787573,
0.00016717688413336873,
0.0001712993544060737,
0.0000032144200758921215
] |
{
"id": 2,
"code_window": [
" \"//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library\",\n",
" \"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library\",\n",
" \"//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library\",\n",
" \"//vendor/k8s.io/klog:go_default_library\",\n",
" ],\n",
")\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library\",\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/BUILD",
"type": "add",
"edit_start_line_idx": 55
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filters
import (
"errors"
"net/http"
utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
)
// WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown.
func WithWaitGroup(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, wg *utilwaitgroup.SafeWaitGroup) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
if !ok {
// if this happens, the handler chain isn't setup correctly because there is no request info
responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context"))
return
}
if !longRunning(req, requestInfo) {
if err := wg.Add(1); err != nil {
http.Error(w, "apiserver is shutting down.", http.StatusInternalServerError)
return
}
defer wg.Done()
}
handler.ServeHTTP(w, req)
})
}
| staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go | 1 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.013487129472196102,
0.002837822772562504,
0.00016326688637491316,
0.00017720817413646728,
0.005324658006429672
] |
{
"id": 2,
"code_window": [
" \"//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library\",\n",
" \"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library\",\n",
" \"//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library\",\n",
" \"//vendor/k8s.io/klog:go_default_library\",\n",
" ],\n",
")\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library\",\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/BUILD",
"type": "add",
"edit_start_line_idx": 55
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func TestLookupContainerPortNumberByServicePort(t *testing.T) {
tests := []struct {
name string
svc v1.Service
pod v1.Pod
port int32
containerPort int32
err bool
}{
{
name: "test success 1 (int port)",
svc: v1.Service{
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Port: 80,
TargetPort: intstr.FromInt(8080),
},
},
},
},
pod: v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: int32(8080)},
},
},
},
},
},
port: 80,
containerPort: 8080,
err: false,
},
{
name: "test success 2 (clusterIP: None)",
svc: v1.Service{
Spec: v1.ServiceSpec{
ClusterIP: v1.ClusterIPNone,
Ports: []v1.ServicePort{
{
Port: 80,
TargetPort: intstr.FromInt(8080),
},
},
},
},
pod: v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: int32(8080)},
},
},
},
},
},
port: 80,
containerPort: 80,
err: false,
},
{
name: "test success 3 (named port)",
svc: v1.Service{
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Port: 80,
TargetPort: intstr.FromString("http"),
},
},
},
},
pod: v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: int32(8080)},
},
},
},
},
},
port: 80,
containerPort: 8080,
err: false,
},
{
name: "test success (targetPort omitted)",
svc: v1.Service{
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Port: 80,
},
},
},
},
pod: v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: int32(80)},
},
},
},
},
},
port: 80,
containerPort: 80,
err: false,
},
{
name: "test failure 1 (cannot find a matching named port)",
svc: v1.Service{
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Port: 80,
TargetPort: intstr.FromString("http"),
},
},
},
},
pod: v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Ports: []v1.ContainerPort{
{
Name: "https",
ContainerPort: int32(443)},
},
},
},
},
},
port: 80,
containerPort: -1,
err: true,
},
{
name: "test failure 2 (cannot find a matching service port)",
svc: v1.Service{
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Port: 80,
TargetPort: intstr.FromString("http"),
},
},
},
},
pod: v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Ports: []v1.ContainerPort{
{
Name: "https",
ContainerPort: int32(443)},
},
},
},
},
},
port: 443,
containerPort: 443,
err: true,
},
{
name: "test failure 2 (cannot find a matching service port, but ClusterIP: None)",
svc: v1.Service{
Spec: v1.ServiceSpec{
ClusterIP: v1.ClusterIPNone,
Ports: []v1.ServicePort{
{
Port: 80,
TargetPort: intstr.FromString("http"),
},
},
},
},
pod: v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: int32(80)},
},
},
},
},
},
port: 443,
containerPort: 443,
err: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
containerPort, err := LookupContainerPortNumberByServicePort(tt.svc, tt.pod, tt.port)
if err != nil {
if tt.err {
if containerPort != tt.containerPort {
t.Errorf("%v: expected port %v; got %v", tt.name, tt.containerPort, containerPort)
}
return
}
t.Errorf("%v: unexpected error: %v", tt.name, err)
return
}
if tt.err {
t.Errorf("%v: unexpected success", tt.name)
return
}
if containerPort != tt.containerPort {
t.Errorf("%v: expected port %v; got %v", tt.name, tt.containerPort, containerPort)
}
})
}
}
| staging/src/k8s.io/kubectl/pkg/util/service_port_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.0002144204918295145,
0.00017434312030673027,
0.00017019326332956553,
0.0001721458975225687,
0.000008065688234637491
] |
{
"id": 2,
"code_window": [
" \"//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library\",\n",
" \"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library\",\n",
" \"//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library\",\n",
" \"//vendor/k8s.io/klog:go_default_library\",\n",
" ],\n",
")\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library\",\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/BUILD",
"type": "add",
"edit_start_line_idx": 55
} | package bbolt
import "errors"
// These errors can be returned when opening or calling methods on a DB.
var (
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
// is opened or after it is closed.
ErrDatabaseNotOpen = errors.New("database not open")
// ErrDatabaseOpen is returned when opening a database that is
// already open.
ErrDatabaseOpen = errors.New("database already open")
// ErrInvalid is returned when both meta pages on a database are invalid.
// This typically occurs when a file is not a bolt database.
ErrInvalid = errors.New("invalid database")
// ErrVersionMismatch is returned when the data file was created with a
// different version of Bolt.
ErrVersionMismatch = errors.New("version mismatch")
// ErrChecksum is returned when either meta page checksum does not match.
ErrChecksum = errors.New("checksum error")
// ErrTimeout is returned when a database cannot obtain an exclusive lock
// on the data file after the timeout passed to Open().
ErrTimeout = errors.New("timeout")
)
// These errors can occur when beginning or committing a Tx.
var (
// ErrTxNotWritable is returned when performing a write operation on a
// read-only transaction.
ErrTxNotWritable = errors.New("tx not writable")
// ErrTxClosed is returned when committing or rolling back a transaction
// that has already been committed or rolled back.
ErrTxClosed = errors.New("tx closed")
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
// read-only database.
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
)
// These errors can occur when putting or deleting a value or a bucket.
var (
// ErrBucketNotFound is returned when trying to access a bucket that has
// not been created yet.
ErrBucketNotFound = errors.New("bucket not found")
// ErrBucketExists is returned when creating a bucket that already exists.
ErrBucketExists = errors.New("bucket already exists")
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
ErrBucketNameRequired = errors.New("bucket name required")
// ErrKeyRequired is returned when inserting a zero-length key.
ErrKeyRequired = errors.New("key required")
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
ErrKeyTooLarge = errors.New("key too large")
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
ErrValueTooLarge = errors.New("value too large")
// ErrIncompatibleValue is returned when trying create or delete a bucket
// on an existing non-bucket key or when trying to create or delete a
// non-bucket key on an existing bucket key.
ErrIncompatibleValue = errors.New("incompatible value")
)
| vendor/go.etcd.io/bbolt/errors.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.00029533388442359865,
0.00018634367734193802,
0.0001635839871596545,
0.00017097179079428315,
0.0000416873226640746
] |
{
"id": 2,
"code_window": [
" \"//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library\",\n",
" \"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library\",\n",
" \"//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library\",\n",
" \"//vendor/k8s.io/klog:go_default_library\",\n",
" ],\n",
")\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library\",\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/BUILD",
"type": "add",
"edit_start_line_idx": 55
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["mock.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock",
importpath = "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud:go_default_library",
"//vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter:go_default_library",
"//vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta:go_default_library",
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.0028442873153835535,
0.0009538393351249397,
0.00017069658497348428,
0.0004001867491751909,
0.001107101677916944
] |
{
"id": 3,
"code_window": [
"\n",
"import (\n",
"\t\"errors\"\n",
"\t\"net/http\"\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\"fmt\"\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "add",
"edit_start_line_idx": 20
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"content_type_test.go",
"cors_test.go",
"maxinflight_test.go",
"timeout_test.go",
],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/filters:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"content_type.go",
"cors.go",
"doc.go",
"longrunning.go",
"maxinflight.go",
"timeout.go",
"waitgroup.go",
"wrap.go",
],
importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters",
importpath = "k8s.io/apiserver/pkg/server/filters",
deps = [
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/waitgroup:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| staging/src/k8s.io/apiserver/pkg/server/filters/BUILD | 1 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.00023837233311496675,
0.0001779792655725032,
0.00016704987501725554,
0.0001689366763457656,
0.000022961270587984473
] |
{
"id": 3,
"code_window": [
"\n",
"import (\n",
"\t\"errors\"\n",
"\t\"net/http\"\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\"fmt\"\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "add",
"edit_start_line_idx": 20
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reconciliation
import (
"fmt"
"reflect"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/registry/rbac/validation"
)
type ReconcileOperation string
var (
ReconcileCreate ReconcileOperation = "create"
ReconcileUpdate ReconcileOperation = "update"
ReconcileRecreate ReconcileOperation = "recreate"
ReconcileNone ReconcileOperation = "none"
)
type RuleOwnerModifier interface {
Get(namespace, name string) (RuleOwner, error)
Create(RuleOwner) (RuleOwner, error)
Update(RuleOwner) (RuleOwner, error)
}
type RuleOwner interface {
GetObject() runtime.Object
GetNamespace() string
GetName() string
GetLabels() map[string]string
SetLabels(map[string]string)
GetAnnotations() map[string]string
SetAnnotations(map[string]string)
GetRules() []rbacv1.PolicyRule
SetRules([]rbacv1.PolicyRule)
GetAggregationRule() *rbacv1.AggregationRule
SetAggregationRule(*rbacv1.AggregationRule)
DeepCopyRuleOwner() RuleOwner
}
type ReconcileRoleOptions struct {
// Role is the expected role that will be reconciled
Role RuleOwner
// Confirm indicates writes should be performed. When false, results are returned as a dry-run.
Confirm bool
// RemoveExtraPermissions indicates reconciliation should remove extra permissions from an existing role
RemoveExtraPermissions bool
// Client is used to look up existing roles, and create/update the role when Confirm=true
Client RuleOwnerModifier
}
type ReconcileClusterRoleResult struct {
// Role is the reconciled role from the reconciliation operation.
// If the reconcile was performed as a dry-run, or the existing role was protected, the reconciled role is not persisted.
Role RuleOwner
// MissingRules contains expected rules that were missing from the currently persisted role
MissingRules []rbacv1.PolicyRule
// ExtraRules contains extra permissions the currently persisted role had
ExtraRules []rbacv1.PolicyRule
// MissingAggregationRuleSelectors contains expected selectors that were missing from the currently persisted role
MissingAggregationRuleSelectors []metav1.LabelSelector
// ExtraAggregationRuleSelectors contains extra selectors the currently persisted role had
ExtraAggregationRuleSelectors []metav1.LabelSelector
// Operation is the API operation required to reconcile.
// If no reconciliation was needed, it is set to ReconcileNone.
// If options.Confirm == false, the reconcile was in dry-run mode, so the operation was not performed.
// If result.Protected == true, the role opted out of reconciliation, so the operation was not performed.
// Otherwise, the operation was performed.
Operation ReconcileOperation
// Protected indicates an existing role prevented reconciliation
Protected bool
}
func (o *ReconcileRoleOptions) Run() (*ReconcileClusterRoleResult, error) {
return o.run(0)
}
func (o *ReconcileRoleOptions) run(attempts int) (*ReconcileClusterRoleResult, error) {
// This keeps us from retrying forever if a role keeps appearing and disappearing as we reconcile.
// Conflict errors on update are handled at a higher level.
if attempts > 2 {
return nil, fmt.Errorf("exceeded maximum attempts")
}
var result *ReconcileClusterRoleResult
existing, err := o.Client.Get(o.Role.GetNamespace(), o.Role.GetName())
switch {
case errors.IsNotFound(err):
aggregationRule := o.Role.GetAggregationRule()
if aggregationRule == nil {
aggregationRule = &rbacv1.AggregationRule{}
}
result = &ReconcileClusterRoleResult{
Role: o.Role,
MissingRules: o.Role.GetRules(),
MissingAggregationRuleSelectors: aggregationRule.ClusterRoleSelectors,
Operation: ReconcileCreate,
}
case err != nil:
return nil, err
default:
result, err = computeReconciledRole(existing, o.Role, o.RemoveExtraPermissions)
if err != nil {
return nil, err
}
}
// If reconcile-protected, short-circuit
if result.Protected {
return result, nil
}
// If we're in dry-run mode, short-circuit
if !o.Confirm {
return result, nil
}
switch result.Operation {
case ReconcileCreate:
created, err := o.Client.Create(result.Role)
// If created since we started this reconcile, re-run
if errors.IsAlreadyExists(err) {
return o.run(attempts + 1)
}
if err != nil {
return nil, err
}
result.Role = created
case ReconcileUpdate:
updated, err := o.Client.Update(result.Role)
// If deleted since we started this reconcile, re-run
if errors.IsNotFound(err) {
return o.run(attempts + 1)
}
if err != nil {
return nil, err
}
result.Role = updated
case ReconcileNone:
// no-op
default:
return nil, fmt.Errorf("invalid operation: %v", result.Operation)
}
return result, nil
}
// computeReconciledRole returns the role that must be created and/or updated to make the
// existing role's permissions match the expected role's permissions
func computeReconciledRole(existing, expected RuleOwner, removeExtraPermissions bool) (*ReconcileClusterRoleResult, error) {
result := &ReconcileClusterRoleResult{Operation: ReconcileNone}
result.Protected = (existing.GetAnnotations()[rbacv1.AutoUpdateAnnotationKey] == "false")
// Start with a copy of the existing object
result.Role = existing.DeepCopyRuleOwner()
// Merge expected annotations and labels
result.Role.SetAnnotations(merge(expected.GetAnnotations(), result.Role.GetAnnotations()))
if !reflect.DeepEqual(result.Role.GetAnnotations(), existing.GetAnnotations()) {
result.Operation = ReconcileUpdate
}
result.Role.SetLabels(merge(expected.GetLabels(), result.Role.GetLabels()))
if !reflect.DeepEqual(result.Role.GetLabels(), existing.GetLabels()) {
result.Operation = ReconcileUpdate
}
// Compute extra and missing rules
// Don't compute extra permissions if expected and existing roles are both aggregated
if expected.GetAggregationRule() == nil || existing.GetAggregationRule() == nil {
_, result.ExtraRules = validation.Covers(expected.GetRules(), existing.GetRules())
}
_, result.MissingRules = validation.Covers(existing.GetRules(), expected.GetRules())
switch {
case !removeExtraPermissions && len(result.MissingRules) > 0:
// add missing rules in the union case
result.Role.SetRules(append(result.Role.GetRules(), result.MissingRules...))
result.Operation = ReconcileUpdate
case removeExtraPermissions && (len(result.MissingRules) > 0 || len(result.ExtraRules) > 0):
// stomp to expected rules in the non-union case
result.Role.SetRules(expected.GetRules())
result.Operation = ReconcileUpdate
}
// Compute extra and missing rules
_, result.ExtraAggregationRuleSelectors = aggregationRuleCovers(expected.GetAggregationRule(), existing.GetAggregationRule())
_, result.MissingAggregationRuleSelectors = aggregationRuleCovers(existing.GetAggregationRule(), expected.GetAggregationRule())
switch {
case expected.GetAggregationRule() == nil && existing.GetAggregationRule() != nil:
// we didn't expect this to be an aggregated role at all, remove the existing aggregation
result.Role.SetAggregationRule(nil)
result.Operation = ReconcileUpdate
case !removeExtraPermissions && len(result.MissingAggregationRuleSelectors) > 0:
// add missing rules in the union case
aggregationRule := result.Role.GetAggregationRule()
if aggregationRule == nil {
aggregationRule = &rbacv1.AggregationRule{}
}
aggregationRule.ClusterRoleSelectors = append(aggregationRule.ClusterRoleSelectors, result.MissingAggregationRuleSelectors...)
result.Role.SetAggregationRule(aggregationRule)
result.Operation = ReconcileUpdate
case removeExtraPermissions && (len(result.MissingAggregationRuleSelectors) > 0 || len(result.ExtraAggregationRuleSelectors) > 0):
result.Role.SetAggregationRule(expected.GetAggregationRule())
result.Operation = ReconcileUpdate
}
return result, nil
}
// merge combines the given maps with the later annotations having higher precedence
func merge(maps ...map[string]string) map[string]string {
var output map[string]string = nil
for _, m := range maps {
if m != nil && output == nil {
output = map[string]string{}
}
for k, v := range m {
output[k] = v
}
}
return output
}
// aggregationRuleCovers determines whether or not the ownerSelectors cover the servantSelectors in terms of semantically
// equal label selectors.
// It returns whether or not the ownerSelectors cover and a list of the rules that the ownerSelectors do not cover.
func aggregationRuleCovers(ownerRule, servantRule *rbacv1.AggregationRule) (bool, []metav1.LabelSelector) {
switch {
case ownerRule == nil && servantRule == nil:
return true, []metav1.LabelSelector{}
case ownerRule == nil && servantRule != nil:
return false, servantRule.ClusterRoleSelectors
case ownerRule != nil && servantRule == nil:
return true, []metav1.LabelSelector{}
}
ownerSelectors := ownerRule.ClusterRoleSelectors
servantSelectors := servantRule.ClusterRoleSelectors
uncoveredSelectors := []metav1.LabelSelector{}
for _, servantSelector := range servantSelectors {
covered := false
for _, ownerSelector := range ownerSelectors {
if equality.Semantic.DeepEqual(ownerSelector, servantSelector) {
covered = true
break
}
}
if !covered {
uncoveredSelectors = append(uncoveredSelectors, servantSelector)
}
}
return (len(uncoveredSelectors) == 0), uncoveredSelectors
}
| pkg/registry/rbac/reconciliation/reconcile_role.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.00047768239164724946,
0.00019021829939447343,
0.00016368305659852922,
0.00016883504576981068,
0.000059831421822309494
] |
{
"id": 3,
"code_window": [
"\n",
"import (\n",
"\t\"errors\"\n",
"\t\"net/http\"\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\"fmt\"\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "add",
"edit_start_line_idx": 20
} | package pflag
import "strconv"
// -- int8 Value
type int8Value int8
func newInt8Value(val int8, p *int8) *int8Value {
*p = val
return (*int8Value)(p)
}
func (i *int8Value) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 8)
*i = int8Value(v)
return err
}
func (i *int8Value) Type() string {
return "int8"
}
func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
func int8Conv(sval string) (interface{}, error) {
v, err := strconv.ParseInt(sval, 0, 8)
if err != nil {
return 0, err
}
return int8(v), nil
}
// GetInt8 return the int8 value of a flag with the given name
func (f *FlagSet) GetInt8(name string) (int8, error) {
val, err := f.getFlagType(name, "int8", int8Conv)
if err != nil {
return 0, err
}
return val.(int8), nil
}
// Int8Var defines an int8 flag with specified name, default value, and usage string.
// The argument p points to an int8 variable in which to store the value of the flag.
func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
f.VarP(newInt8Value(value, p), name, "", usage)
}
// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
f.VarP(newInt8Value(value, p), name, shorthand, usage)
}
// Int8Var defines an int8 flag with specified name, default value, and usage string.
// The argument p points to an int8 variable in which to store the value of the flag.
func Int8Var(p *int8, name string, value int8, usage string) {
CommandLine.VarP(newInt8Value(value, p), name, "", usage)
}
// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
}
// Int8 defines an int8 flag with specified name, default value, and usage string.
// The return value is the address of an int8 variable that stores the value of the flag.
func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
p := new(int8)
f.Int8VarP(p, name, "", value, usage)
return p
}
// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
p := new(int8)
f.Int8VarP(p, name, shorthand, value, usage)
return p
}
// Int8 defines an int8 flag with specified name, default value, and usage string.
// The return value is the address of an int8 variable that stores the value of the flag.
func Int8(name string, value int8, usage string) *int8 {
return CommandLine.Int8P(name, "", value, usage)
}
// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
func Int8P(name, shorthand string, value int8, usage string) *int8 {
return CommandLine.Int8P(name, shorthand, value, usage)
}
| vendor/github.com/spf13/pflag/int8.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.003454831661656499,
0.000584667781367898,
0.00016729519120417535,
0.00018049689242616296,
0.001017781556583941
] |
{
"id": 3,
"code_window": [
"\n",
"import (\n",
"\t\"errors\"\n",
"\t\"net/http\"\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"\t\"fmt\"\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "add",
"edit_start_line_idx": 20
} | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.00017867596761789173,
0.00017042874242179096,
0.00016719919221941382,
0.00017041020328179002,
0.0000025931960863090353
] |
{
"id": 4,
"code_window": [
"\t\"net/http\"\n",
"\n",
"\tutilwaitgroup \"k8s.io/apimachinery/pkg/util/waitgroup\"\n",
"\t\"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters\"\n",
"\tapirequest \"k8s.io/apiserver/pkg/endpoints/request\"\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/api/core/v1\"\n",
"\tapierrors \"k8s.io/apimachinery/pkg/api/errors\"\n",
"\t\"k8s.io/apimachinery/pkg/runtime\"\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "add",
"edit_start_line_idx": 22
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filters
import (
"errors"
"net/http"
utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
)
// WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown.
func WithWaitGroup(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, wg *utilwaitgroup.SafeWaitGroup) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
if !ok {
// if this happens, the handler chain isn't setup correctly because there is no request info
responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context"))
return
}
if !longRunning(req, requestInfo) {
if err := wg.Add(1); err != nil {
http.Error(w, "apiserver is shutting down.", http.StatusInternalServerError)
return
}
defer wg.Done()
}
handler.ServeHTTP(w, req)
})
}
| staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go | 1 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.19635596871376038,
0.04098694026470184,
0.00017526162264402956,
0.00026343466015532613,
0.07774259895086288
] |
{
"id": 4,
"code_window": [
"\t\"net/http\"\n",
"\n",
"\tutilwaitgroup \"k8s.io/apimachinery/pkg/util/waitgroup\"\n",
"\t\"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters\"\n",
"\tapirequest \"k8s.io/apiserver/pkg/endpoints/request\"\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/api/core/v1\"\n",
"\tapierrors \"k8s.io/apimachinery/pkg/api/errors\"\n",
"\t\"k8s.io/apimachinery/pkg/runtime\"\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "add",
"edit_start_line_idx": 22
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package queueset
import (
"time"
"k8s.io/apiserver/pkg/util/promise"
)
// request is a temporary container for "requests" with additional tracking fields
// required for the functionality FQScheduler
type request struct {
Queue *queue
// StartTime is the clock time when the request began executing
StartTime time.Time
// Decision gets set to the decision about what to do with this request
Decision promise.LockingMutable
// ArrivalTime is when the request entered this system
ArrivalTime time.Time
// IsWaiting indicates whether the request is presently waiting in a queue
IsWaiting bool
// descr1 and descr2 are not used in any logic but they appear in
// log messages
descr1, descr2 interface{}
}
// queue is an array of requests with additional metadata required for
// the FQScheduler
type queue struct {
Requests []*request
// VirtualStart is the virtual time when the oldest request in the
// queue (if there is any) started virtually executing
VirtualStart float64
RequestsExecuting int
Index int
}
// Enqueue enqueues a request into the queue
func (q *queue) Enqueue(request *request) {
request.IsWaiting = true
q.Requests = append(q.Requests, request)
}
// Dequeue dequeues a request from the queue
func (q *queue) Dequeue() (*request, bool) {
if len(q.Requests) == 0 {
return nil, false
}
request := q.Requests[0]
q.Requests = q.Requests[1:]
request.IsWaiting = false
return request, true
}
// GetVirtualFinish returns the expected virtual finish time of the request at
// index J in the queue with estimated finish time G
func (q *queue) GetVirtualFinish(J int, G float64) float64 {
// The virtual finish time of request number J in the queue
// (counting from J=1 for the head) is J * G + (virtual start time).
// counting from J=1 for the head (eg: queue.Requests[0] -> J=1) - J+1
jg := float64(J+1) * float64(G)
return jg + q.VirtualStart
}
| staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.013881991617381573,
0.0017240230226889253,
0.00016531304572708905,
0.00017694529378786683,
0.0042989798821508884
] |
{
"id": 4,
"code_window": [
"\t\"net/http\"\n",
"\n",
"\tutilwaitgroup \"k8s.io/apimachinery/pkg/util/waitgroup\"\n",
"\t\"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters\"\n",
"\tapirequest \"k8s.io/apiserver/pkg/endpoints/request\"\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/api/core/v1\"\n",
"\tapierrors \"k8s.io/apimachinery/pkg/api/errors\"\n",
"\t\"k8s.io/apimachinery/pkg/runtime\"\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "add",
"edit_start_line_idx": 22
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeAutoscalingV1 struct {
*testing.Fake
}
func (c *FakeAutoscalingV1) HorizontalPodAutoscalers(namespace string) v1.HorizontalPodAutoscalerInterface {
return &FakeHorizontalPodAutoscalers{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeAutoscalingV1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}
| staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.0032267053611576557,
0.0010591379832476377,
0.0001743239554343745,
0.00017649857909418643,
0.0012062548194080591
] |
{
"id": 4,
"code_window": [
"\t\"net/http\"\n",
"\n",
"\tutilwaitgroup \"k8s.io/apimachinery/pkg/util/waitgroup\"\n",
"\t\"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters\"\n",
"\tapirequest \"k8s.io/apiserver/pkg/endpoints/request\"\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/api/core/v1\"\n",
"\tapierrors \"k8s.io/apimachinery/pkg/api/errors\"\n",
"\t\"k8s.io/apimachinery/pkg/runtime\"\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "add",
"edit_start_line_idx": 22
} | #!/bin/bash
set -e
go version
go test -v google.golang.org/appengine/...
go test -v -race google.golang.org/appengine/...
if [[ $GOAPP == "true" ]]; then
export PATH="$PATH:/tmp/sdk/go_appengine"
export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py
goapp version
goapp test -v google.golang.org/appengine/...
fi
| vendor/google.golang.org/appengine/travis_test.sh | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.00016830656386446208,
0.0001676016254350543,
0.00016689670155756176,
0.0001676016254350543,
7.049311534501612e-7
] |
{
"id": 5,
"code_window": [
"\tutilwaitgroup \"k8s.io/apimachinery/pkg/util/waitgroup\"\n",
"\t\"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters\"\n",
"\tapirequest \"k8s.io/apiserver/pkg/endpoints/request\"\n",
")\n",
"\n",
"// WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown.\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/client-go/kubernetes/scheme\"\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "add",
"edit_start_line_idx": 25
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"content_type_test.go",
"cors_test.go",
"maxinflight_test.go",
"timeout_test.go",
],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/filters:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"content_type.go",
"cors.go",
"doc.go",
"longrunning.go",
"maxinflight.go",
"timeout.go",
"waitgroup.go",
"wrap.go",
],
importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/filters",
importpath = "k8s.io/apiserver/pkg/server/filters",
deps = [
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/waitgroup:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| staging/src/k8s.io/apiserver/pkg/server/filters/BUILD | 1 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.00803136732429266,
0.0016755206743255258,
0.0001687299518380314,
0.0004406847874633968,
0.0025754219386726618
] |
{
"id": 5,
"code_window": [
"\tutilwaitgroup \"k8s.io/apimachinery/pkg/util/waitgroup\"\n",
"\t\"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters\"\n",
"\tapirequest \"k8s.io/apiserver/pkg/endpoints/request\"\n",
")\n",
"\n",
"// WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown.\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/client-go/kubernetes/scheme\"\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "add",
"edit_start_line_idx": 25
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
authorizationapi "k8s.io/api/authorization/v1beta1"
)
// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface.
type SubjectAccessReviewExpansion interface {
Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error)
CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error)
}
func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) {
return c.CreateContext(context.Background(), sar)
}
func (c *subjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) {
result = &authorizationapi.SubjectAccessReview{}
err = c.client.Post().
Context(ctx).
Resource("subjectaccessreviews").
Body(sar).
Do().
Into(result)
return
}
| staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.0001772404502844438,
0.00017309674876742065,
0.00016969189164228737,
0.00017100854893215,
0.000003207184590792167
] |
{
"id": 5,
"code_window": [
"\tutilwaitgroup \"k8s.io/apimachinery/pkg/util/waitgroup\"\n",
"\t\"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters\"\n",
"\tapirequest \"k8s.io/apiserver/pkg/endpoints/request\"\n",
")\n",
"\n",
"// WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown.\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/client-go/kubernetes/scheme\"\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "add",
"edit_start_line_idx": 25
} | // go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build arm64,freebsd
package unix
const (
// SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int
SYS_EXIT = 1 // { void sys_exit(int rval); } exit sys_exit_args void
SYS_FORK = 2 // { int fork(void); }
SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); }
SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); }
SYS_OPEN = 5 // { int open(char *path, int flags, int mode); }
SYS_CLOSE = 6 // { int close(int fd); }
SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); }
SYS_LINK = 9 // { int link(char *path, char *link); }
SYS_UNLINK = 10 // { int unlink(char *path); }
SYS_CHDIR = 12 // { int chdir(char *path); }
SYS_FCHDIR = 13 // { int fchdir(int fd); }
SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); }
SYS_CHMOD = 15 // { int chmod(char *path, int mode); }
SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); }
SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int
SYS_GETPID = 20 // { pid_t getpid(void); }
SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); }
SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); }
SYS_SETUID = 23 // { int setuid(uid_t uid); }
SYS_GETUID = 24 // { uid_t getuid(void); }
SYS_GETEUID = 25 // { uid_t geteuid(void); }
SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); }
SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); }
SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, int flags); }
SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, struct sockaddr * __restrict from, __socklen_t * __restrict fromlenaddr); }
SYS_ACCEPT = 30 // { int accept(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen); }
SYS_GETPEERNAME = 31 // { int getpeername(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); }
SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); }
SYS_ACCESS = 33 // { int access(char *path, int amode); }
SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); }
SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); }
SYS_SYNC = 36 // { int sync(void); }
SYS_KILL = 37 // { int kill(int pid, int signum); }
SYS_GETPPID = 39 // { pid_t getppid(void); }
SYS_DUP = 41 // { int dup(u_int fd); }
SYS_PIPE = 42 // { int pipe(void); }
SYS_GETEGID = 43 // { gid_t getegid(void); }
SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); }
SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); }
SYS_GETGID = 47 // { gid_t getgid(void); }
SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); }
SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); }
SYS_ACCT = 51 // { int acct(char *path); }
SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); }
SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); }
SYS_REBOOT = 55 // { int reboot(int opt); }
SYS_REVOKE = 56 // { int revoke(char *path); }
SYS_SYMLINK = 57 // { int symlink(char *path, char *link); }
SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); }
SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); }
SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int
SYS_CHROOT = 61 // { int chroot(char *path); }
SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); }
SYS_VFORK = 66 // { int vfork(void); }
SYS_SBRK = 69 // { int sbrk(int incr); }
SYS_SSTK = 70 // { int sstk(int incr); }
SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int
SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); }
SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); }
SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); }
SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); }
SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); }
SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); }
SYS_GETPGRP = 81 // { int getpgrp(void); }
SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); }
SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); }
SYS_SWAPON = 85 // { int swapon(char *name); }
SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); }
SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); }
SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); }
SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); }
SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); }
SYS_FSYNC = 95 // { int fsync(int fd); }
SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); }
SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); }
SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); }
SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); }
SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); }
SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); }
SYS_LISTEN = 106 // { int listen(int s, int backlog); }
SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); }
SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); }
SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); }
SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); }
SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); }
SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); }
SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); }
SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); }
SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); }
SYS_SETREGID = 127 // { int setregid(int rgid, int egid); }
SYS_RENAME = 128 // { int rename(char *from, char *to); }
SYS_FLOCK = 131 // { int flock(int fd, int how); }
SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); }
SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); }
SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); }
SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); }
SYS_MKDIR = 136 // { int mkdir(char *path, int mode); }
SYS_RMDIR = 137 // { int rmdir(char *path); }
SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); }
SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); }
SYS_SETSID = 147 // { int setsid(void); }
SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); }
SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); }
SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); }
SYS_LGETFH = 160 // { int lgetfh(char *fname, struct fhandle *fhp); }
SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); }
SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); }
SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); }
SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); }
SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); }
SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); }
SYS_SETFIB = 175 // { int setfib(int fibnum); }
SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); }
SYS_SETGID = 181 // { int setgid(gid_t gid); }
SYS_SETEGID = 182 // { int setegid(gid_t egid); }
SYS_SETEUID = 183 // { int seteuid(uid_t euid); }
SYS_STAT = 188 // { int stat(char *path, struct stat *ub); }
SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); }
SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); }
SYS_PATHCONF = 191 // { int pathconf(char *path, int name); }
SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); }
SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int
SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int
SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); }
SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int
SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); }
SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); }
SYS_UNDELETE = 205 // { int undelete(char *path); }
SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); }
SYS_GETPGID = 207 // { int getpgid(pid_t pid); }
SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, int timeout); }
SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); }
SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); }
SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); }
SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); }
SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); }
SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); }
SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); }
SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); }
SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); }
SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); }
SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); }
SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); }
SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); }
SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); }
SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct itimerspec *value); }
SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); }
SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); }
SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); }
SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); }
SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); }
SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); }
SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); }
SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); }
SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); }
SYS_RFORK = 251 // { int rfork(int flags); }
SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); }
SYS_ISSETUGID = 253 // { int issetugid(void); }
SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); }
SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); }
SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); }
SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); }
SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); }
SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); }
SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); }
SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); }
SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); }
SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); }
SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); }
SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); }
SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); }
SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); }
SYS_MODNEXT = 300 // { int modnext(int modid); }
SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); }
SYS_MODFNEXT = 302 // { int modfnext(int modid); }
SYS_MODFIND = 303 // { int modfind(const char *name); }
SYS_KLDLOAD = 304 // { int kldload(const char *file); }
SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); }
SYS_KLDFIND = 306 // { int kldfind(const char *file); }
SYS_KLDNEXT = 307 // { int kldnext(int fileid); }
SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); }
SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); }
SYS_GETSID = 310 // { int getsid(pid_t pid); }
SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); }
SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); }
SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); }
SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); }
SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); }
SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); }
SYS_YIELD = 321 // { int yield(void); }
SYS_MLOCKALL = 324 // { int mlockall(int how); }
SYS_MUNLOCKALL = 325 // { int munlockall(void); }
SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); }
SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); }
SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); }
SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); }
SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); }
SYS_SCHED_YIELD = 331 // { int sched_yield (void); }
SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); }
SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); }
SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); }
SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); }
SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); }
SYS_JAIL = 338 // { int jail(struct jail *jail); }
SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, sigset_t *oset); }
SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); }
SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); }
SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec *timeout); }
SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, siginfo_t *info); }
SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, acl_type_t type, struct acl *aclp); }
SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, acl_type_t type, struct acl *aclp); }
SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, struct acl *aclp); }
SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, struct acl *aclp); }
SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, acl_type_t type); }
SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); }
SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); }
SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); }
SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); }
SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); }
SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); }
SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); }
SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); }
SYS_KQUEUE = 362 // { int kqueue(void); }
SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); }
SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); }
SYS___SETUGID = 374 // { int __setugid(int flag); }
SYS_EACCESS = 376 // { int eaccess(char *path, int amode); }
SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, unsigned int iovcnt, int flags); }
SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); }
SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); }
SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, struct mac *mac_p); }
SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, struct mac *mac_p); }
SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, struct mac *mac_p); }
SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, struct mac *mac_p); }
SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); }
SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); }
SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); }
SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); }
SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); }
SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); }
SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); }
SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); }
SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); }
SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); }
SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); }
SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); }
SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); }
SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, unsigned int value); }
SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, const char *name, int oflag, mode_t mode, unsigned int value); }
SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); }
SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); }
SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); }
SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); }
SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); }
SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); }
SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); }
SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); }
SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); }
SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); }
SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); }
SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); }
SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); }
SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); }
SYS_SWAPOFF = 424 // { int swapoff(const char *name); }
SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); }
SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, acl_type_t type, struct acl *aclp); }
SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, acl_type_t type); }
SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, acl_type_t type, struct acl *aclp); }
SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, int *sig); }
SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, int flags); }
SYS_THR_EXIT = 431 // { void thr_exit(long *state); }
SYS_THR_SELF = 432 // { int thr_self(long *id); }
SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); }
SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); }
SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); }
SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); }
SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); }
SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); }
SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); }
SYS_THR_WAKE = 443 // { int thr_wake(long id); }
SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); }
SYS_AUDIT = 445 // { int audit(const void *record, u_int length); }
SYS_AUDITON = 446 // { int auditon(int cmd, void *data, u_int length); }
SYS_GETAUID = 447 // { int getauid(uid_t *auid); }
SYS_SETAUID = 448 // { int setauid(uid_t *auid); }
SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); }
SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); }
SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); }
SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); }
SYS_AUDITCTL = 453 // { int auditctl(char *path); }
SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); }
SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); }
SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); }
SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); }
SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); }
SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); }
SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);}
SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); }
SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); }
SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); }
SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); }
SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); }
SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, lwpid_t lwpid, struct rtprio *rtp); }
SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); }
SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); }
SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); }
SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); }
SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); }
SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); }
SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); }
SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, int whence); }
SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); }
SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); }
SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); }
SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, mode_t mode); }
SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); }
SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); }
SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, cpusetid_t setid); }
SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); }
SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); }
SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *mask); }
SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, int flag); }
SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); }
SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); }
SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); }
SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); }
SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); }
SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); }
SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); }
SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); }
SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); }
SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); }
SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); }
SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); }
SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); }
SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); }
SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); }
SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); }
SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, unsigned int iovcnt, int flags); }
SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, unsigned int iovcnt, int flags); }
SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); }
SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); }
SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, int cmd, union semun *arg); }
SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, struct msqid_ds *buf); }
SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, struct shmid_ds *buf); }
SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); }
SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, int fd, cap_rights_t *rightsp); }
SYS_CAP_ENTER = 516 // { int cap_enter(void); }
SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); }
SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); }
SYS_PDKILL = 519 // { int pdkill(int fd, int signum); }
SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); }
SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *sm); }
SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, size_t namelen); }
SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); }
SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); }
SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, off_t offset, off_t len); }
SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, off_t len, int advice); }
SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); }
SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, cap_rights_t *rightsp); }
SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, const u_long *cmds, size_t ncmds); }
SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, u_long *cmds, size_t maxcmds); }
SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, uint32_t fcntlrights); }
SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, uint32_t *fcntlrightsp); }
SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, int namelen); }
SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, int namelen); }
SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, u_long flags, int atflag); }
SYS_ACCEPT4 = 541 // { int accept4(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen, int flags); }
SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); }
SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); }
SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, int com, void *data); }
SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); }
SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); }
SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); }
SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); }
SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); }
SYS_FDATASYNC = 550 // { int fdatasync(int fd); }
)
| vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.007656634785234928,
0.0013867930974811316,
0.00016345130279660225,
0.000873384065926075,
0.0014404058456420898
] |
{
"id": 5,
"code_window": [
"\tutilwaitgroup \"k8s.io/apimachinery/pkg/util/waitgroup\"\n",
"\t\"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters\"\n",
"\tapirequest \"k8s.io/apiserver/pkg/endpoints/request\"\n",
")\n",
"\n",
"// WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown.\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/client-go/kubernetes/scheme\"\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "add",
"edit_start_line_idx": 25
} | 0.23.2
| .bazelversion | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.00016943061200436205,
0.00016943061200436205,
0.00016943061200436205,
0.00016943061200436205,
0
] |
{
"id": 6,
"code_window": [
"\t\t}\n",
"\n",
"\t\tif !longRunning(req, requestInfo) {\n",
"\t\t\tif err := wg.Add(1); err != nil {\n",
"\t\t\t\thttp.Error(w, \"apiserver is shutting down.\", http.StatusInternalServerError)\n",
"\t\t\t\treturn\n",
"\t\t\t}\n",
"\t\t\tdefer wg.Done()\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// When apiserver is shutting down, signal clients to retry\n",
"\t\t\t\t// There is a good chance the client hit a different server, so a tight retry is good for client responsiveness.\n",
"\t\t\t\tw.Header().Add(\"Retry-After\", \"1\")\n",
"\t\t\t\tw.Header().Set(\"Content-Type\", runtime.ContentTypeJSON)\n",
"\t\t\t\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n",
"\t\t\t\tstatusErr := apierrors.NewServiceUnavailable(\"apiserver is shutting down\").Status()\n",
"\t\t\t\tw.WriteHeader(int(statusErr.Code))\n",
"\t\t\t\tfmt.Fprintln(w, runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &statusErr))\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "replace",
"edit_start_line_idx": 40
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filters
import (
"errors"
"net/http"
utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
)
// WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown.
func WithWaitGroup(handler http.Handler, longRunning apirequest.LongRunningRequestCheck, wg *utilwaitgroup.SafeWaitGroup) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
if !ok {
// if this happens, the handler chain isn't setup correctly because there is no request info
responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context"))
return
}
if !longRunning(req, requestInfo) {
if err := wg.Add(1); err != nil {
http.Error(w, "apiserver is shutting down.", http.StatusInternalServerError)
return
}
defer wg.Done()
}
handler.ServeHTTP(w, req)
})
}
| staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go | 1 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.9908921122550964,
0.25931698083877563,
0.00017841090448200703,
0.02617614157497883,
0.38059309124946594
] |
{
"id": 6,
"code_window": [
"\t\t}\n",
"\n",
"\t\tif !longRunning(req, requestInfo) {\n",
"\t\t\tif err := wg.Add(1); err != nil {\n",
"\t\t\t\thttp.Error(w, \"apiserver is shutting down.\", http.StatusInternalServerError)\n",
"\t\t\t\treturn\n",
"\t\t\t}\n",
"\t\t\tdefer wg.Done()\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// When apiserver is shutting down, signal clients to retry\n",
"\t\t\t\t// There is a good chance the client hit a different server, so a tight retry is good for client responsiveness.\n",
"\t\t\t\tw.Header().Add(\"Retry-After\", \"1\")\n",
"\t\t\t\tw.Header().Set(\"Content-Type\", runtime.ContentTypeJSON)\n",
"\t\t\t\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n",
"\t\t\t\tstatusErr := apierrors.NewServiceUnavailable(\"apiserver is shutting down\").Status()\n",
"\t\t\t\tw.WriteHeader(int(statusErr.Code))\n",
"\t\t\t\tfmt.Fprintln(w, runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &statusErr))\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "replace",
"edit_start_line_idx": 40
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package authenticator contains implementations for pkg/auth/authenticator interfaces
package authenticator // import "k8s.io/apiserver/plugin/pkg/authenticator"
| staging/src/k8s.io/apiserver/plugin/pkg/authenticator/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.0001794695999706164,
0.0001766069617588073,
0.00017374430899508297,
0.0001766069617588073,
0.000002862645487766713
] |
{
"id": 6,
"code_window": [
"\t\t}\n",
"\n",
"\t\tif !longRunning(req, requestInfo) {\n",
"\t\t\tif err := wg.Add(1); err != nil {\n",
"\t\t\t\thttp.Error(w, \"apiserver is shutting down.\", http.StatusInternalServerError)\n",
"\t\t\t\treturn\n",
"\t\t\t}\n",
"\t\t\tdefer wg.Done()\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// When apiserver is shutting down, signal clients to retry\n",
"\t\t\t\t// There is a good chance the client hit a different server, so a tight retry is good for client responsiveness.\n",
"\t\t\t\tw.Header().Add(\"Retry-After\", \"1\")\n",
"\t\t\t\tw.Header().Set(\"Content-Type\", runtime.ContentTypeJSON)\n",
"\t\t\t\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n",
"\t\t\t\tstatusErr := apierrors.NewServiceUnavailable(\"apiserver is shutting down\").Status()\n",
"\t\t\t\tw.WriteHeader(int(statusErr.Code))\n",
"\t\t\t\tfmt.Fprintln(w, runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &statusErr))\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "replace",
"edit_start_line_idx": 40
} | version: '2'
env:
GOFLAGS: -mod=vendor
tasks:
default:
deps: [test]
lint:
desc: Checks code style
cmds:
- gofmt -d -s *.go
- go vet ./...
silent: true
lint-fix:
desc: Fixes code style
cmds:
- gofmt -w -s *.go
test:
desc: Runs go tests
cmds:
- go test -race ./...
test-coverage:
desc: Runs go tests and calucates test coverage
cmds:
- go test -race -coverprofile=c.out ./...
| vendor/github.com/stretchr/objx/Taskfile.yml | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.00017371299327351153,
0.0001731825468596071,
0.00017277324513997883,
0.00017312195268459618,
3.964607628859085e-7
] |
{
"id": 6,
"code_window": [
"\t\t}\n",
"\n",
"\t\tif !longRunning(req, requestInfo) {\n",
"\t\t\tif err := wg.Add(1); err != nil {\n",
"\t\t\t\thttp.Error(w, \"apiserver is shutting down.\", http.StatusInternalServerError)\n",
"\t\t\t\treturn\n",
"\t\t\t}\n",
"\t\t\tdefer wg.Done()\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// When apiserver is shutting down, signal clients to retry\n",
"\t\t\t\t// There is a good chance the client hit a different server, so a tight retry is good for client responsiveness.\n",
"\t\t\t\tw.Header().Add(\"Retry-After\", \"1\")\n",
"\t\t\t\tw.Header().Set(\"Content-Type\", runtime.ContentTypeJSON)\n",
"\t\t\t\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n",
"\t\t\t\tstatusErr := apierrors.NewServiceUnavailable(\"apiserver is shutting down\").Status()\n",
"\t\t\t\tw.WriteHeader(int(statusErr.Code))\n",
"\t\t\t\tfmt.Fprintln(w, runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &statusErr))\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/server/filters/waitgroup.go",
"type": "replace",
"edit_start_line_idx": 40
} | // +build windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"k8s.io/klog"
utilexec "k8s.io/utils/exec"
"k8s.io/utils/keymutex"
utilpath "k8s.io/utils/path"
)
// Mounter provides the default implementation of mount.Interface
// for the windows platform. This implementation assumes that the
// kubelet is running in the host's root mount namespace.
type Mounter struct {
mounterPath string
}
// New returns a mount.Interface for the current system.
// It provides options to override the default mounter behavior.
// mounterPath allows using an alternative to `/bin/mount` for mounting.
func New(mounterPath string) Interface {
return &Mounter{
mounterPath: mounterPath,
}
}
// acquire lock for smb mount
var getSMBMountMutex = keymutex.NewHashed(0)
// Mount : mounts source to target with given options.
// currently only supports cifs(smb), bind mount(for disk)
func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error {
target = NormalizeWindowsPath(target)
if source == "tmpfs" {
klog.V(3).Infof("mounting source (%q), target (%q), with options (%q)", source, target, options)
return os.MkdirAll(target, 0755)
}
parentDir := filepath.Dir(target)
if err := os.MkdirAll(parentDir, 0755); err != nil {
return err
}
klog.V(4).Infof("mount options(%q) source:%q, target:%q, fstype:%q, begin to mount",
options, source, target, fstype)
bindSource := source
// tell it's going to mount azure disk or azure file according to options
if bind, _, _ := MakeBindOpts(options); bind {
// mount azure disk
bindSource = NormalizeWindowsPath(source)
} else {
if len(options) < 2 {
klog.Warningf("mount options(%q) command number(%d) less than 2, source:%q, target:%q, skip mounting",
options, len(options), source, target)
return nil
}
// currently only cifs mount is supported
if strings.ToLower(fstype) != "cifs" {
return fmt.Errorf("only cifs mount is supported now, fstype: %q, mounting source (%q), target (%q), with options (%q)", fstype, source, target, options)
}
// lock smb mount for the same source
getSMBMountMutex.LockKey(source)
defer getSMBMountMutex.UnlockKey(source)
if output, err := newSMBMapping(options[0], options[1], source); err != nil {
if isSMBMappingExist(source) {
klog.V(2).Infof("SMB Mapping(%s) already exists, now begin to remove and remount", source)
if output, err := removeSMBMapping(source); err != nil {
return fmt.Errorf("Remove-SmbGlobalMapping failed: %v, output: %q", err, output)
}
if output, err := newSMBMapping(options[0], options[1], source); err != nil {
return fmt.Errorf("New-SmbGlobalMapping remount failed: %v, output: %q", err, output)
}
} else {
return fmt.Errorf("New-SmbGlobalMapping failed: %v, output: %q", err, output)
}
}
}
if output, err := exec.Command("cmd", "/c", "mklink", "/D", target, bindSource).CombinedOutput(); err != nil {
klog.Errorf("mklink failed: %v, source(%q) target(%q) output: %q", err, bindSource, target, string(output))
return err
}
return nil
}
// do the SMB mount with username, password, remotepath
// return (output, error)
func newSMBMapping(username, password, remotepath string) (string, error) {
if username == "" || password == "" || remotepath == "" {
return "", fmt.Errorf("invalid parameter(username: %s, password: %s, remoteapth: %s)", username, password, remotepath)
}
// use PowerShell Environment Variables to store user input string to prevent command line injection
// https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_environment_variables?view=powershell-5.1
cmdLine := `$PWord = ConvertTo-SecureString -String $Env:smbpassword -AsPlainText -Force` +
`;$Credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $Env:smbuser, $PWord` +
`;New-SmbGlobalMapping -RemotePath $Env:smbremotepath -Credential $Credential`
cmd := exec.Command("powershell", "/c", cmdLine)
cmd.Env = append(os.Environ(),
fmt.Sprintf("smbuser=%s", username),
fmt.Sprintf("smbpassword=%s", password),
fmt.Sprintf("smbremotepath=%s", remotepath))
output, err := cmd.CombinedOutput()
return string(output), err
}
// check whether remotepath is already mounted
func isSMBMappingExist(remotepath string) bool {
cmd := exec.Command("powershell", "/c", `Get-SmbGlobalMapping -RemotePath $Env:smbremotepath`)
cmd.Env = append(os.Environ(), fmt.Sprintf("smbremotepath=%s", remotepath))
_, err := cmd.CombinedOutput()
return err == nil
}
// remove SMB mapping
func removeSMBMapping(remotepath string) (string, error) {
cmd := exec.Command("powershell", "/c", `Remove-SmbGlobalMapping -RemotePath $Env:smbremotepath -Force`)
cmd.Env = append(os.Environ(), fmt.Sprintf("smbremotepath=%s", remotepath))
output, err := cmd.CombinedOutput()
return string(output), err
}
// Unmount unmounts the target.
func (mounter *Mounter) Unmount(target string) error {
klog.V(4).Infof("azureMount: Unmount target (%q)", target)
target = NormalizeWindowsPath(target)
if output, err := exec.Command("cmd", "/c", "rmdir", target).CombinedOutput(); err != nil {
klog.Errorf("rmdir failed: %v, output: %q", err, string(output))
return err
}
return nil
}
// List returns a list of all mounted filesystems. todo
func (mounter *Mounter) List() ([]MountPoint, error) {
return []MountPoint{}, nil
}
// IsLikelyNotMountPoint determines if a directory is not a mountpoint.
func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
stat, err := os.Lstat(file)
if err != nil {
return true, err
}
// If current file is a symlink, then it is a mountpoint.
if stat.Mode()&os.ModeSymlink != 0 {
target, err := os.Readlink(file)
if err != nil {
return true, fmt.Errorf("readlink error: %v", err)
}
exists, err := utilpath.Exists(utilpath.CheckFollowSymlink, target)
if err != nil {
return true, err
}
return !exists, nil
}
return true, nil
}
// GetMountRefs : empty implementation here since there is no place to query all mount points on Windows
func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) {
windowsPath := NormalizeWindowsPath(pathname)
pathExists, pathErr := PathExists(windowsPath)
if !pathExists {
return []string{}, nil
} else if IsCorruptedMnt(pathErr) {
klog.Warningf("GetMountRefs found corrupted mount at %s, treating as unmounted path", windowsPath)
return []string{}, nil
} else if pathErr != nil {
return nil, fmt.Errorf("error checking path %s: %v", windowsPath, pathErr)
}
return []string{pathname}, nil
}
func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {
// Try to mount the disk
klog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, source, target)
if err := ValidateDiskNumber(source); err != nil {
klog.Errorf("diskMount: formatAndMount failed, err: %v", err)
return err
}
if len(fstype) == 0 {
// Use 'NTFS' as the default
fstype = "NTFS"
}
// format disk if it is unformatted(raw)
cmd := fmt.Sprintf("Get-Disk -Number %s | Where partitionstyle -eq 'raw' | Initialize-Disk -PartitionStyle MBR -PassThru"+
" | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem %s -Confirm:$false", source, fstype)
if output, err := mounter.Exec.Command("powershell", "/c", cmd).CombinedOutput(); err != nil {
return fmt.Errorf("diskMount: format disk failed, error: %v, output: %q", err, string(output))
}
klog.V(4).Infof("diskMount: Disk successfully formatted, disk: %q, fstype: %q", source, fstype)
driveLetter, err := getDriveLetterByDiskNumber(source, mounter.Exec)
if err != nil {
return err
}
driverPath := driveLetter + ":"
target = NormalizeWindowsPath(target)
klog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, driverPath, target)
if output, err := mounter.Exec.Command("cmd", "/c", "mklink", "/D", target, driverPath).CombinedOutput(); err != nil {
klog.Errorf("mklink failed: %v, output: %q", err, string(output))
return err
}
return nil
}
// Get drive letter according to windows disk number
func getDriveLetterByDiskNumber(diskNum string, exec utilexec.Interface) (string, error) {
cmd := fmt.Sprintf("(Get-Partition -DiskNumber %s).DriveLetter", diskNum)
output, err := exec.Command("powershell", "/c", cmd).CombinedOutput()
if err != nil {
return "", fmt.Errorf("azureMount: Get Drive Letter failed: %v, output: %q", err, string(output))
}
if len(string(output)) < 1 {
return "", fmt.Errorf("azureMount: Get Drive Letter failed, output is empty")
}
return string(output)[:1], nil
}
// getAllParentLinks walks all symbolic links and return all the parent targets recursively
func getAllParentLinks(path string) ([]string, error) {
const maxIter = 255
links := []string{}
for {
links = append(links, path)
if len(links) > maxIter {
return links, fmt.Errorf("unexpected length of parent links: %v", links)
}
fi, err := os.Lstat(path)
if err != nil {
return links, fmt.Errorf("Lstat: %v", err)
}
if fi.Mode()&os.ModeSymlink == 0 {
break
}
path, err = os.Readlink(path)
if err != nil {
return links, fmt.Errorf("Readlink error: %v", err)
}
}
return links, nil
}
| pkg/util/mount/mount_windows.go | 0 | https://github.com/kubernetes/kubernetes/commit/a3c82e8ae313a6ed97a63a4435438aa067225527 | [
0.00020623616001103073,
0.00017056391516234726,
0.0001616235967958346,
0.00016845281061250716,
0.000007929398634587415
] |
Subsets and Splits