hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 5,
"code_window": [
"\tname := data.Get(\"name\").(string)\n",
"\tsql := data.Get(\"sql\").(string)\n",
"\tusername_length := data.Get(\"username_length\").(int)\n",
"\n",
"\t// Get our connection\n",
"\tdb, err := b.DB(req.Storage)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdisplayname_length := data.Get(\"displayname_length\").(int)\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "add",
"edit_start_line_idx": 113
} | package mysql
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func pathListRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/?$",
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ListOperation: b.pathRoleList,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func pathRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the role.",
},
"sql": &framework.FieldSchema{
Type: framework.TypeString,
Description: "SQL string to create a user. See help for more info.",
},
"username_length": &framework.FieldSchema{
Type: framework.TypeInt,
Description: "number of characters to truncate generated mysql usernames to (default 10)",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathRoleRead,
logical.UpdateOperation: b.pathRoleCreate,
logical.DeleteOperation: b.pathRoleDelete,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
entry, err := s.Get("role/" + n)
if err != nil {
return nil, err
}
if entry == nil {
return nil, nil
}
var result roleEntry
if err := entry.DecodeJSON(&result); err != nil {
return nil, err
}
return &result, nil
}
func (b *backend) pathRoleDelete(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
err := req.Storage.Delete("role/" + data.Get("name").(string))
if err != nil {
return nil, err
}
return nil, nil
}
func (b *backend) pathRoleRead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
role, err := b.Role(req.Storage, data.Get("name").(string))
if err != nil {
return nil, err
}
if role == nil {
return nil, nil
}
return &logical.Response{
Data: map[string]interface{}{
"sql": role.SQL,
},
}, nil
}
func (b *backend) pathRoleList(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
entries, err := req.Storage.List("role/")
if err != nil {
return nil, err
}
return logical.ListResponse(entries), nil
}
func (b *backend) pathRoleCreate(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
name := data.Get("name").(string)
sql := data.Get("sql").(string)
username_length := data.Get("username_length").(int)
// Get our connection
db, err := b.DB(req.Storage)
if err != nil {
return nil, err
}
// Test the query by trying to prepare it
for _, query := range SplitSQL(sql) {
stmt, err := db.Prepare(Query(query, map[string]string{
"name": "foo",
"password": "bar",
}))
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error testing query: %s", err)), nil
}
stmt.Close()
}
// Store it
entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
SQL: sql,
USERNAME_LENGTH: username_length,
})
if err != nil {
return nil, err
}
if err := req.Storage.Put(entry); err != nil {
return nil, err
}
return nil, nil
}
type roleEntry struct {
SQL string `json:"sql"`
USERNAME_LENGTH int `json:"username_length"`
}
const pathRoleHelpSyn = `
Manage the roles that can be created with this backend.
`
const pathRoleHelpDesc = `
This path lets you manage the roles that can be created with this backend.
The "sql" parameter customizes the SQL string used to create the role.
This can be a sequence of SQL queries, each semi-colon seperated. Some
substitution will be done to the SQL string for certain keys.
The names of the variables must be surrounded by "{{" and "}}" to be replaced.
* "name" - The random username generated for the DB user.
* "password" - The random password generated for the DB user.
Example of a decent SQL query to use:
CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
GRANT ALL ON db1.* TO '{{name}}'@'%';
Note the above user would be able to access anything in db1. Please see the MySQL
manual on the GRANT command to learn how to do more fine grained access.
The "username_length" parameter determines how many characters of the
role name will be used in creating the generated mysql username; the
default is 10. Note that mysql versions prior to 5.8 have a 16 character
total limit on usernames.
`
| builtin/logical/mysql/path_roles.go | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.9988846182823181,
0.17964065074920654,
0.00016412002150900662,
0.0003446686314418912,
0.36526814103126526
] |
{
"id": 5,
"code_window": [
"\tname := data.Get(\"name\").(string)\n",
"\tsql := data.Get(\"sql\").(string)\n",
"\tusername_length := data.Get(\"username_length\").(int)\n",
"\n",
"\t// Get our connection\n",
"\tdb, err := b.DB(req.Storage)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdisplayname_length := data.Get(\"displayname_length\").(int)\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "add",
"edit_start_line_idx": 113
} | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build 386,freebsd
package unix
import (
"syscall"
"unsafe"
)
func Getpagesize() int { return 4096 }
func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
func NsecToTimespec(nsec int64) (ts Timespec) {
ts.Sec = int32(nsec / 1e9)
ts.Nsec = int32(nsec % 1e9)
return
}
func NsecToTimeval(nsec int64) (tv Timeval) {
nsec += 999 // round up to microsecond
tv.Usec = int32(nsec % 1e9 / 1e3)
tv.Sec = int32(nsec / 1e9)
return
}
func SetKevent(k *Kevent_t, fd, mode, flags int) {
k.Ident = uint32(fd)
k.Filter = int16(mode)
k.Flags = uint16(flags)
}
func (iov *Iovec) SetLen(length int) {
iov.Len = uint32(length)
}
func (msghdr *Msghdr) SetControllen(length int) {
msghdr.Controllen = uint32(length)
}
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
var writtenOut uint64 = 0
_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0)
written = int(writtenOut)
if e1 != 0 {
err = e1
}
return
}
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
| vendor/golang.org/x/sys/unix/syscall_freebsd_386.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00022913698921911418,
0.000180175993591547,
0.00016335486725438386,
0.00017505601863376796,
0.000020651012164307758
] |
{
"id": 5,
"code_window": [
"\tname := data.Get(\"name\").(string)\n",
"\tsql := data.Get(\"sql\").(string)\n",
"\tusername_length := data.Get(\"username_length\").(int)\n",
"\n",
"\t// Get our connection\n",
"\tdb, err := b.DB(req.Storage)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdisplayname_length := data.Get(\"displayname_length\").(int)\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "add",
"edit_start_line_idx": 113
} | // Copyright 2013 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"fmt"
"time"
)
// IssueComment represents a comment left on an issue.
type IssueComment struct {
ID *int `json:"id,omitempty"`
Body *string `json:"body,omitempty"`
User *User `json:"user,omitempty"`
Reactions *Reactions `json:"reactions,omitempty"`
CreatedAt *time.Time `json:"created_at,omitempty"`
UpdatedAt *time.Time `json:"updated_at,omitempty"`
URL *string `json:"url,omitempty"`
HTMLURL *string `json:"html_url,omitempty"`
IssueURL *string `json:"issue_url,omitempty"`
}
func (i IssueComment) String() string {
return Stringify(i)
}
// IssueListCommentsOptions specifies the optional parameters to the
// IssuesService.ListComments method.
type IssueListCommentsOptions struct {
// Sort specifies how to sort comments. Possible values are: created, updated.
Sort string `url:"sort,omitempty"`
// Direction in which to sort comments. Possible values are: asc, desc.
Direction string `url:"direction,omitempty"`
// Since filters comments by time.
Since time.Time `url:"since,omitempty"`
ListOptions
}
// ListComments lists all comments on the specified issue. Specifying an issue
// number of 0 will return all comments on all issues for the repository.
//
// GitHub API docs: http://developer.github.com/v3/issues/comments/#list-comments-on-an-issue
func (s *IssuesService) ListComments(owner string, repo string, number int, opt *IssueListCommentsOptions) ([]*IssueComment, *Response, error) {
var u string
if number == 0 {
u = fmt.Sprintf("repos/%v/%v/issues/comments", owner, repo)
} else {
u = fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number)
}
u, err := addOptions(u, opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeReactionsPreview)
comments := new([]*IssueComment)
resp, err := s.client.Do(req, comments)
if err != nil {
return nil, resp, err
}
return *comments, resp, err
}
// GetComment fetches the specified issue comment.
//
// GitHub API docs: http://developer.github.com/v3/issues/comments/#get-a-single-comment
func (s *IssuesService) GetComment(owner string, repo string, id int) (*IssueComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeReactionsPreview)
comment := new(IssueComment)
resp, err := s.client.Do(req, comment)
if err != nil {
return nil, resp, err
}
return comment, resp, err
}
// CreateComment creates a new comment on the specified issue.
//
// GitHub API docs: http://developer.github.com/v3/issues/comments/#create-a-comment
func (s *IssuesService) CreateComment(owner string, repo string, number int, comment *IssueComment) (*IssueComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number)
req, err := s.client.NewRequest("POST", u, comment)
if err != nil {
return nil, nil, err
}
c := new(IssueComment)
resp, err := s.client.Do(req, c)
if err != nil {
return nil, resp, err
}
return c, resp, err
}
// EditComment updates an issue comment.
//
// GitHub API docs: http://developer.github.com/v3/issues/comments/#edit-a-comment
func (s *IssuesService) EditComment(owner string, repo string, id int, comment *IssueComment) (*IssueComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id)
req, err := s.client.NewRequest("PATCH", u, comment)
if err != nil {
return nil, nil, err
}
c := new(IssueComment)
resp, err := s.client.Do(req, c)
if err != nil {
return nil, resp, err
}
return c, resp, err
}
// DeleteComment deletes an issue comment.
//
// GitHub API docs: http://developer.github.com/v3/issues/comments/#delete-a-comment
func (s *IssuesService) DeleteComment(owner string, repo string, id int) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(req, nil)
}
| vendor/github.com/google/go-github/github/issues_comments.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0030849191825836897,
0.0006644417298957705,
0.00016635651991236955,
0.00017917243530973792,
0.0009229461429640651
] |
{
"id": 5,
"code_window": [
"\tname := data.Get(\"name\").(string)\n",
"\tsql := data.Get(\"sql\").(string)\n",
"\tusername_length := data.Get(\"username_length\").(int)\n",
"\n",
"\t// Get our connection\n",
"\tdb, err := b.DB(req.Storage)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tdisplayname_length := data.Get(\"displayname_length\").(int)\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "add",
"edit_start_line_idx": 113
} | package authapi
import (
"encoding/json"
"net/url"
"strconv"
"github.com/duosecurity/duo_api_golang"
)
type AuthApi struct {
duoapi.DuoApi
}
// Build a new Duo Auth API object.
// api is a duoapi.DuoApi object used to make the Duo Rest API calls.
// Example: authapi.NewAuthApi(*duoapi.NewDuoApi(ikey,skey,host,userAgent,duoapi.SetTimeout(10*time.Second)))
func NewAuthApi(api duoapi.DuoApi) *AuthApi {
return &AuthApi{api}
}
// API calls will return a StatResult object. On success, Stat is 'OK'.
// On error, Stat is 'FAIL', and Code, Message, and Message_Detail
// contain error information.
type StatResult struct {
Stat string
Code *int32
Message *string
Message_Detail *string
}
// Return object for the 'Ping' API call.
type PingResult struct {
StatResult
Response struct {
Time int64
}
}
// Duo's Ping method. https://www.duosecurity.com/docs/authapi#/ping
// This is an unsigned Duo Rest API call which returns the Duo system's time.
// Use this method to determine whether your system time is in sync with Duo's.
func (api *AuthApi) Ping() (*PingResult, error) {
_, body, err := api.Call("GET", "/auth/v2/ping", nil, duoapi.UseTimeout)
if err != nil {
return nil, err
}
ret := &PingResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
// Return object for the 'Check' API call.
type CheckResult struct {
StatResult
Response struct {
Time int64
}
}
// Call Duo's Check method. https://www.duosecurity.com/docs/authapi#/check
// Check is a signed Duo API call, which returns the Duo system's time.
// Use this method to determine whether your ikey, skey and host are correct,
// and whether your system time is in sync with Duo's.
func (api *AuthApi) Check() (*CheckResult, error) {
_, body, err := api.SignedCall("GET", "/auth/v2/check", nil, duoapi.UseTimeout)
if err != nil {
return nil, err
}
ret := &CheckResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
// Return object for the 'Logo' API call.
type LogoResult struct {
StatResult
png *[]byte
}
// Duo's Logo method. https://www.duosecurity.com/docs/authapi#/logo
// If the API call is successful, the configured logo png is returned. Othwerwise,
// error information is returned in the LogoResult return value.
func (api *AuthApi) Logo() (*LogoResult, error) {
resp, body, err := api.SignedCall("GET", "/auth/v2/logo", nil, duoapi.UseTimeout)
if err != nil {
return nil, err
}
if resp.StatusCode == 200 {
ret := &LogoResult{StatResult: StatResult{Stat: "OK"},
png: &body}
return ret, nil
}
ret := &LogoResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
// Optional parameter for the Enroll method.
func EnrollUsername(username string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("username", username)
}
}
// Optional parameter for the Enroll method.
func EnrollValidSeconds(secs uint64) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("valid_secs", strconv.FormatUint(secs, 10))
}
}
// Enroll return type.
type EnrollResult struct {
StatResult
Response struct {
Activation_Barcode string
Activation_Code string
Expiration int64
User_Id string
Username string
}
}
// Duo's Enroll method. https://www.duosecurity.com/docs/authapi#/enroll
// Use EnrollUsername() to include the optional username parameter.
// Use EnrollValidSeconds() to change the default validation time limit that the
// user has to complete enrollment.
func (api *AuthApi) Enroll(options ...func(*url.Values)) (*EnrollResult, error) {
opts := url.Values{}
for _, o := range options {
o(&opts)
}
_, body, err := api.SignedCall("POST", "/auth/v2/enroll", opts, duoapi.UseTimeout)
if err != nil {
return nil, err
}
ret := &EnrollResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
// Response is "success", "invalid" or "waiting".
type EnrollStatusResult struct {
StatResult
Response string
}
// Duo's EnrollStatus method. https://www.duosecurity.com/docs/authapi#/enroll_status
// Return the status of an outstanding Enrollment.
func (api *AuthApi) EnrollStatus(userid string,
activationCode string) (*EnrollStatusResult, error) {
queryArgs := url.Values{}
queryArgs.Set("user_id", userid)
queryArgs.Set("activation_code", activationCode)
_, body, err := api.SignedCall("POST",
"/auth/v2/enroll_status",
queryArgs,
duoapi.UseTimeout)
if err != nil {
return nil, err
}
ret := &EnrollStatusResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
// Preauth return type.
type PreauthResult struct {
StatResult
Response struct {
Result string
Status_Msg string
Enroll_Portal_Url string
Devices []struct {
Device string
Type string
Name string
Number string
Capabilities []string
}
}
}
func PreauthUserId(userid string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("user_id", userid)
}
}
func PreauthUsername(username string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("username", username)
}
}
func PreauthIpAddr(ip string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("ipaddr", ip)
}
}
func PreauthTrustedToken(trustedtoken string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("trusted_device_token", trustedtoken)
}
}
// Duo's Preauth method. https://www.duosecurity.com/docs/authapi#/preauth
// options Optional values to include in the preauth call.
// Use PreauthUserId to specify the user_id parameter.
// Use PreauthUsername to specify the username parameter. You must
// specify PreauthUserId or PreauthUsername, but not both.
// Use PreauthIpAddr to include the ipaddr parameter, the ip address
// of the client attempting authroization.
// Use PreauthTrustedToken to specify the trusted_device_token parameter.
func (api *AuthApi) Preauth(options ...func(*url.Values)) (*PreauthResult, error) {
opts := url.Values{}
for _, o := range options {
o(&opts)
}
_, body, err := api.SignedCall("POST", "/auth/v2/preauth", opts, duoapi.UseTimeout)
if err != nil {
return nil, err
}
ret := &PreauthResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
func AuthUserId(userid string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("user_id", userid)
}
}
func AuthUsername(username string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("username", username)
}
}
func AuthIpAddr(ip string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("ipaddr", ip)
}
}
func AuthAsync() func(*url.Values) {
return func(opts *url.Values) {
opts.Set("async", "1")
}
}
func AuthDevice(device string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("device", device)
}
}
func AuthType(type_ string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("type", type_)
}
}
func AuthDisplayUsername(username string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("display_username", username)
}
}
func AuthPushinfo(pushinfo string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("pushinfo", pushinfo)
}
}
func AuthPasscode(passcode string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("passcode", passcode)
}
}
// Auth return type.
type AuthResult struct {
StatResult
Response struct {
// Synchronous
Result string
Status string
Status_Msg string
Trusted_Device_Token string
// Asynchronous
Txid string
}
}
// Duo's Auth method. https://www.duosecurity.com/docs/authapi#/auth
// Factor must be one of 'auto', 'push', 'passcode', 'sms' or 'phone'.
// Use AuthUserId to specify the user_id.
// Use AuthUsername to speicy the username. You must specify either AuthUserId
// or AuthUsername, but not both.
// Use AuthIpAddr to include the client's IP address.
// Use AuthAsync to toggle whether the call blocks for the user's response or not.
// If used asynchronously, get the auth status with the AuthStatus method.
// When using factor 'push', use AuthDevice to specify the device ID to push to.
// When using factor 'push', use AuthType to display some extra auth text to the user.
// When using factor 'push', use AuthDisplayUsername to display some extra text
// to the user.
// When using factor 'push', use AuthPushInfo to include some URL-encoded key/value
// pairs to display to the user.
// When using factor 'passcode', use AuthPasscode to specify the passcode entered
// by the user.
// When using factor 'sms' or 'phone', use AuthDevice to specify which device
// should receive the SMS or phone call.
func (api *AuthApi) Auth(factor string, options ...func(*url.Values)) (*AuthResult, error) {
params := url.Values{}
for _, o := range options {
o(¶ms)
}
params.Set("factor", factor)
var apiOps []duoapi.DuoApiOption
if _, ok := params["async"]; ok == true {
apiOps = append(apiOps, duoapi.UseTimeout)
}
_, body, err := api.SignedCall("POST", "/auth/v2/auth", params, apiOps...)
if err != nil {
return nil, err
}
ret := &AuthResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
// AuthStatus return type.
type AuthStatusResult struct {
StatResult
Response struct {
Result string
Status string
Status_Msg string
Trusted_Device_Token string
}
}
// Duo's auth_status method. https://www.duosecurity.com/docs/authapi#/auth_status
// When using the Auth call in async mode, use this method to retrieve the
// result of the authentication attempt.
// txid is returned by the Auth call.
func (api *AuthApi) AuthStatus(txid string) (*AuthStatusResult, error) {
opts := url.Values{}
opts.Set("txid", txid)
_, body, err := api.SignedCall("GET", "/auth/v2/auth_status", opts)
if err != nil {
return nil, err
}
ret := &AuthStatusResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
| vendor/github.com/duosecurity/duo_api_golang/authapi/authapi.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0008455337956547737,
0.00022561964578926563,
0.00016637094086036086,
0.00017062400002032518,
0.00015670471475459635
] |
{
"id": 6,
"code_window": [
"\t\tstmt.Close()\n",
"\t}\n",
"\n",
"\t// Store it\n",
"\tentry, err := logical.StorageEntryJSON(\"role/\"+name, &roleEntry{\n",
"\t\tSQL: sql,\n",
"\t\tUSERNAME_LENGTH: username_length,\n",
"\t})\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tSQL: sql,\n",
"\t\tUsernameLength: username_length,\n",
"\t\tDisplaynameLength: displayname_length,\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 135
} | package mysql
import (
"fmt"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
_ "github.com/lib/pq"
)
func pathRoleCreate(b *backend) *framework.Path {
return &framework.Path{
Pattern: "creds/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the role.",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathRoleCreateRead,
},
HelpSynopsis: pathRoleCreateReadHelpSyn,
HelpDescription: pathRoleCreateReadHelpDesc,
}
}
func (b *backend) pathRoleCreateRead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
name := data.Get("name").(string)
var usernameLength int
// Get the role
role, err := b.Role(req.Storage, name)
if err != nil {
return nil, err
}
if role == nil {
return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
}
// Determine if we have a lease
lease, err := b.Lease(req.Storage)
if err != nil {
return nil, err
}
if lease == nil {
lease = &configLease{}
}
// Generate our username and password. MySQL limits user to 16 characters
displayName := name
ul, ok := data.GetOk("username_length")
if ok == true {
usernameLength = ul.(int)
} else {
usernameLength = 10
}
if len(displayName) > usernameLength {
displayName = displayName[:usernameLength]
}
userUUID, err := uuid.GenerateUUID()
if err != nil {
return nil, err
}
username := fmt.Sprintf("%s-%s", displayName, userUUID)
if len(username) > 16 {
username = username[:16]
}
password, err := uuid.GenerateUUID()
if err != nil {
return nil, err
}
// Get our handle
db, err := b.DB(req.Storage)
if err != nil {
return nil, err
}
// Start a transaction
tx, err := db.Begin()
if err != nil {
return nil, err
}
defer tx.Rollback()
// Execute each query
for _, query := range SplitSQL(role.SQL) {
stmt, err := tx.Prepare(Query(query, map[string]string{
"name": username,
"password": password,
}))
if err != nil {
return nil, err
}
defer stmt.Close()
if _, err := stmt.Exec(); err != nil {
return nil, err
}
}
// Commit the transaction
if err := tx.Commit(); err != nil {
return nil, err
}
// Return the secret
resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
"username": username,
"password": password,
}, map[string]interface{}{
"username": username,
})
resp.Secret.TTL = lease.Lease
return resp, nil
}
const pathRoleCreateReadHelpSyn = `
Request database credentials for a certain role.
`
const pathRoleCreateReadHelpDesc = `
This path reads database credentials for a certain role. The
database credentials will be generated on demand and will be automatically
revoked when the lease is up.
`
| builtin/logical/mysql/path_role_create.go | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.007607878651469946,
0.001781283295713365,
0.00016367867647204548,
0.0015384505968540907,
0.0019562249071896076
] |
{
"id": 6,
"code_window": [
"\t\tstmt.Close()\n",
"\t}\n",
"\n",
"\t// Store it\n",
"\tentry, err := logical.StorageEntryJSON(\"role/\"+name, &roleEntry{\n",
"\t\tSQL: sql,\n",
"\t\tUSERNAME_LENGTH: username_length,\n",
"\t})\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tSQL: sql,\n",
"\t\tUsernameLength: username_length,\n",
"\t\tDisplaynameLength: displayname_length,\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 135
} | // password is a package for reading a password securely from a terminal.
// The code in this package disables echo in the terminal so that the
// password is not echoed back in plaintext to the user.
package password
import (
"errors"
"io"
"os"
"os/signal"
)
var ErrInterrupted = errors.New("interrupted")
// Read reads the password from the given os.File. The password
// will not be echoed back to the user. Ctrl-C will automatically return
// from this function with a blank string and an ErrInterrupted.
func Read(f *os.File) (string, error) {
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
defer signal.Stop(ch)
// Run the actual read in a go-routine so that we can still detect signals
var result string
var resultErr error
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
result, resultErr = read(f)
}()
// Wait on either the read to finish or the signal to come through
select {
case <-ch:
return "", ErrInterrupted
case <-doneCh:
return result, resultErr
}
}
func readline(f *os.File) (string, error) {
var buf [1]byte
resultBuf := make([]byte, 0, 64)
for {
n, err := f.Read(buf[:])
if err != nil && err != io.EOF {
return "", err
}
if n == 0 || buf[0] == '\n' || buf[0] == '\r' {
break
}
// ASCII code 3 is what is sent for a Ctrl-C while reading raw.
// If we see that, then get the interrupt. We have to do this here
// because terminals in raw mode won't catch it at the shell level.
if buf[0] == 3 {
return "", ErrInterrupted
}
resultBuf = append(resultBuf, buf[0])
}
return string(resultBuf), nil
}
| helper/password/password.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017576033133082092,
0.00016896681336220354,
0.00016357790445908904,
0.00016885001969058067,
0.0000035601826766651357
] |
{
"id": 6,
"code_window": [
"\t\tstmt.Close()\n",
"\t}\n",
"\n",
"\t// Store it\n",
"\tentry, err := logical.StorageEntryJSON(\"role/\"+name, &roleEntry{\n",
"\t\tSQL: sql,\n",
"\t\tUSERNAME_LENGTH: username_length,\n",
"\t})\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tSQL: sql,\n",
"\t\tUsernameLength: username_length,\n",
"\t\tDisplaynameLength: displayname_length,\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 135
} | package server
import (
"bytes"
"crypto/tls"
"io"
"net"
"testing"
)
type testListenerConnFn func(net.Listener) (net.Conn, error)
func testListenerImpl(t *testing.T, ln net.Listener, connFn testListenerConnFn, certName string) {
serverCh := make(chan net.Conn, 1)
go func() {
server, err := ln.Accept()
if err != nil {
t.Fatalf("err: %s", err)
}
if certName != "" {
tlsConn := server.(*tls.Conn)
tlsConn.Handshake()
}
serverCh <- server
}()
client, err := connFn(ln)
if err != nil {
t.Fatalf("err: %s", err)
}
if certName != "" {
tlsConn := client.(*tls.Conn)
if len(tlsConn.ConnectionState().PeerCertificates) != 1 {
t.Fatalf("err: number of certs too long")
}
peerName := tlsConn.ConnectionState().PeerCertificates[0].Subject.CommonName
if peerName != certName {
t.Fatalf("err: bad cert name %s, expected %s", peerName, certName)
}
}
server := <-serverCh
defer client.Close()
defer server.Close()
var buf bytes.Buffer
copyCh := make(chan struct{})
go func() {
io.Copy(&buf, server)
close(copyCh)
}()
if _, err := client.Write([]byte("foo")); err != nil {
t.Fatalf("err: %s", err)
}
client.Close()
<-copyCh
if buf.String() != "foo" {
t.Fatalf("bad: %v", buf.String())
}
}
| command/server/listener_test.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0001987178111448884,
0.000178301619598642,
0.00016387435607612133,
0.00017286898219026625,
0.000012133266864111647
] |
{
"id": 6,
"code_window": [
"\t\tstmt.Close()\n",
"\t}\n",
"\n",
"\t// Store it\n",
"\tentry, err := logical.StorageEntryJSON(\"role/\"+name, &roleEntry{\n",
"\t\tSQL: sql,\n",
"\t\tUSERNAME_LENGTH: username_length,\n",
"\t})\n",
"\tif err != nil {\n",
"\t\treturn nil, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tSQL: sql,\n",
"\t\tUsernameLength: username_length,\n",
"\t\tDisplaynameLength: displayname_length,\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 135
} | package awserr
import "fmt"
// SprintError returns a string of the formatted error code.
//
// Both extra and origErr are optional. If they are included their lines
// will be added, but if they are not included their lines will be ignored.
func SprintError(code, message, extra string, origErr error) string {
msg := fmt.Sprintf("%s: %s", code, message)
if extra != "" {
msg = fmt.Sprintf("%s\n\t%s", msg, extra)
}
if origErr != nil {
msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
}
return msg
}
// A baseError wraps the code and message which defines an error. It also
// can be used to wrap an original error object.
//
// Should be used as the root for errors satisfying the awserr.Error. Also
// for any error which does not fit into a specific error wrapper type.
type baseError struct {
// Classification of error
code string
// Detailed information about error
message string
// Optional original error this error is based off of. Allows building
// chained errors.
errs []error
}
// newBaseError returns an error object for the code, message, and errors.
//
// code is a short no whitespace phrase depicting the classification of
// the error that is being created.
//
// message is the free flow string containing detailed information about the
// error.
//
// origErrs is the error objects which will be nested under the new errors to
// be returned.
func newBaseError(code, message string, origErrs []error) *baseError {
b := &baseError{
code: code,
message: message,
errs: origErrs,
}
return b
}
// Error returns the string representation of the error.
//
// See ErrorWithExtra for formatting.
//
// Satisfies the error interface.
func (b baseError) Error() string {
size := len(b.errs)
if size > 0 {
return SprintError(b.code, b.message, "", errorList(b.errs))
}
return SprintError(b.code, b.message, "", nil)
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (b baseError) String() string {
return b.Error()
}
// Code returns the short phrase depicting the classification of the error.
func (b baseError) Code() string {
return b.code
}
// Message returns the error details message.
func (b baseError) Message() string {
return b.message
}
// OrigErr returns the original error if one was set. Nil is returned if no
// error was set. This only returns the first element in the list. If the full
// list is needed, use BatchedErrors.
func (b baseError) OrigErr() error {
switch len(b.errs) {
case 0:
return nil
case 1:
return b.errs[0]
default:
if err, ok := b.errs[0].(Error); ok {
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
}
return NewBatchError("BatchedErrors",
"multiple errors occured", b.errs)
}
}
// OrigErrs returns the original errors if one was set. An empty slice is
// returned if no error was set.
func (b baseError) OrigErrs() []error {
return b.errs
}
// So that the Error interface type can be included as an anonymous field
// in the requestError struct and not conflict with the error.Error() method.
type awsError Error
// A requestError wraps a request or service error.
//
// Composed of baseError for code, message, and original error.
type requestError struct {
awsError
statusCode int
requestID string
}
// newRequestError returns a wrapped error with additional information for
// request status code, and service requestID.
//
// Should be used to wrap all request which involve service requests. Even if
// the request failed without a service response, but had an HTTP status code
// that may be meaningful.
//
// Also wraps original errors via the baseError.
func newRequestError(err Error, statusCode int, requestID string) *requestError {
return &requestError{
awsError: err,
statusCode: statusCode,
requestID: requestID,
}
}
// Error returns the string representation of the error.
// Satisfies the error interface.
func (r requestError) Error() string {
extra := fmt.Sprintf("status code: %d, request id: %s",
r.statusCode, r.requestID)
return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (r requestError) String() string {
return r.Error()
}
// StatusCode returns the wrapped status code for the error
func (r requestError) StatusCode() int {
return r.statusCode
}
// RequestID returns the wrapped requestID
func (r requestError) RequestID() string {
return r.requestID
}
// OrigErrs returns the original errors if one was set. An empty slice is
// returned if no error was set.
func (r requestError) OrigErrs() []error {
if b, ok := r.awsError.(BatchedErrors); ok {
return b.OrigErrs()
}
return []error{r.OrigErr()}
}
// An error list that satisfies the golang interface
type errorList []error
// Error returns the string representation of the error.
//
// Satisfies the error interface.
func (e errorList) Error() string {
msg := ""
// How do we want to handle the array size being zero
if size := len(e); size > 0 {
for i := 0; i < size; i++ {
msg += fmt.Sprintf("%s", e[i].Error())
// We check the next index to see if it is within the slice.
// If it is, then we append a newline. We do this, because unit tests
// could be broken with the additional '\n'
if i+1 < size {
msg += "\n"
}
}
}
return msg
}
| vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0017456073546782136,
0.0003209836140740663,
0.00016172335017472506,
0.0001812357222661376,
0.0003683144459500909
] |
{
"id": 7,
"code_window": [
"}\n",
"\n",
"type roleEntry struct {\n",
"\tSQL string `json:\"sql\"`\n",
"\tUSERNAME_LENGTH int `json:\"username_length\"`\n",
"}\n",
"\n",
"const pathRoleHelpSyn = `\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tSQL string `json:\"sql\"`\n",
"\tUsernameLength int `json:\"username_length\"`\n",
"\tDisplaynameLength int `json:\"displayname_length\"`\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 148
} | package mysql
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func pathListRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/?$",
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ListOperation: b.pathRoleList,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func pathRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the role.",
},
"sql": &framework.FieldSchema{
Type: framework.TypeString,
Description: "SQL string to create a user. See help for more info.",
},
"username_length": &framework.FieldSchema{
Type: framework.TypeInt,
Description: "number of characters to truncate generated mysql usernames to (default 10)",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathRoleRead,
logical.UpdateOperation: b.pathRoleCreate,
logical.DeleteOperation: b.pathRoleDelete,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
entry, err := s.Get("role/" + n)
if err != nil {
return nil, err
}
if entry == nil {
return nil, nil
}
var result roleEntry
if err := entry.DecodeJSON(&result); err != nil {
return nil, err
}
return &result, nil
}
func (b *backend) pathRoleDelete(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
err := req.Storage.Delete("role/" + data.Get("name").(string))
if err != nil {
return nil, err
}
return nil, nil
}
func (b *backend) pathRoleRead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
role, err := b.Role(req.Storage, data.Get("name").(string))
if err != nil {
return nil, err
}
if role == nil {
return nil, nil
}
return &logical.Response{
Data: map[string]interface{}{
"sql": role.SQL,
},
}, nil
}
func (b *backend) pathRoleList(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
entries, err := req.Storage.List("role/")
if err != nil {
return nil, err
}
return logical.ListResponse(entries), nil
}
func (b *backend) pathRoleCreate(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
name := data.Get("name").(string)
sql := data.Get("sql").(string)
username_length := data.Get("username_length").(int)
// Get our connection
db, err := b.DB(req.Storage)
if err != nil {
return nil, err
}
// Test the query by trying to prepare it
for _, query := range SplitSQL(sql) {
stmt, err := db.Prepare(Query(query, map[string]string{
"name": "foo",
"password": "bar",
}))
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error testing query: %s", err)), nil
}
stmt.Close()
}
// Store it
entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
SQL: sql,
USERNAME_LENGTH: username_length,
})
if err != nil {
return nil, err
}
if err := req.Storage.Put(entry); err != nil {
return nil, err
}
return nil, nil
}
type roleEntry struct {
SQL string `json:"sql"`
USERNAME_LENGTH int `json:"username_length"`
}
const pathRoleHelpSyn = `
Manage the roles that can be created with this backend.
`
const pathRoleHelpDesc = `
This path lets you manage the roles that can be created with this backend.
The "sql" parameter customizes the SQL string used to create the role.
This can be a sequence of SQL queries, each semi-colon seperated. Some
substitution will be done to the SQL string for certain keys.
The names of the variables must be surrounded by "{{" and "}}" to be replaced.
* "name" - The random username generated for the DB user.
* "password" - The random password generated for the DB user.
Example of a decent SQL query to use:
CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
GRANT ALL ON db1.* TO '{{name}}'@'%';
Note the above user would be able to access anything in db1. Please see the MySQL
manual on the GRANT command to learn how to do more fine grained access.
The "username_length" parameter determines how many characters of the
role name will be used in creating the generated mysql username; the
default is 10. Note that mysql versions prior to 5.8 have a 16 character
total limit on usernames.
`
| builtin/logical/mysql/path_roles.go | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.998993456363678,
0.26705360412597656,
0.0001643770228838548,
0.004823715891689062,
0.4365203380584717
] |
{
"id": 7,
"code_window": [
"}\n",
"\n",
"type roleEntry struct {\n",
"\tSQL string `json:\"sql\"`\n",
"\tUSERNAME_LENGTH int `json:\"username_length\"`\n",
"}\n",
"\n",
"const pathRoleHelpSyn = `\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tSQL string `json:\"sql\"`\n",
"\tUsernameLength int `json:\"username_length\"`\n",
"\tDisplaynameLength int `json:\"displayname_length\"`\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 148
} | // Package rootcerts contains functions to aid in loading CA certificates for
// TLS connections.
//
// In addition, its default behavior on Darwin works around an open issue [1]
// in Go's crypto/x509 that prevents certicates from being loaded from the
// System or Login keychains.
//
// [1] https://github.com/golang/go/issues/14514
package rootcerts
| vendor/github.com/hashicorp/go-rootcerts/doc.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00016772947856225073,
0.00016772947856225073,
0.00016772947856225073,
0.00016772947856225073,
0
] |
{
"id": 7,
"code_window": [
"}\n",
"\n",
"type roleEntry struct {\n",
"\tSQL string `json:\"sql\"`\n",
"\tUSERNAME_LENGTH int `json:\"username_length\"`\n",
"}\n",
"\n",
"const pathRoleHelpSyn = `\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tSQL string `json:\"sql\"`\n",
"\tUsernameLength int `json:\"username_length\"`\n",
"\tDisplaynameLength int `json:\"displayname_length\"`\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 148
} | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
// Package terminal provides support functions for dealing with terminals, as
// commonly found on UNIX systems.
//
// Putting a terminal into raw mode is the most common requirement:
//
// oldState, err := terminal.MakeRaw(0)
// if err != nil {
// panic(err)
// }
// defer terminal.Restore(0, oldState)
package terminal
import (
"io"
"syscall"
"unsafe"
)
const (
enableLineInput = 2
enableEchoInput = 4
enableProcessedInput = 1
enableWindowInput = 8
enableMouseInput = 16
enableInsertMode = 32
enableQuickEditMode = 64
enableExtendedFlags = 128
enableAutoPosition = 256
enableProcessedOutput = 1
enableWrapAtEolOutput = 2
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
)
type (
short int16
word uint16
coord struct {
x short
y short
}
smallRect struct {
left short
top short
right short
bottom short
}
consoleScreenBufferInfo struct {
size coord
cursorPosition coord
attributes word
window smallRect
maximumWindowSize coord
}
)
type State struct {
mode uint32
}
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd int) (*State, error) {
var st uint32
_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
if e != 0 {
return nil, error(e)
}
raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)
_, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0)
if e != 0 {
return nil, error(e)
}
return &State{st}, nil
}
// GetState returns the current state of a terminal which may be useful to
// restore the terminal after a signal.
func GetState(fd int) (*State, error) {
var st uint32
_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
if e != 0 {
return nil, error(e)
}
return &State{st}, nil
}
// Restore restores the terminal connected to the given file descriptor to a
// previous state.
func Restore(fd int, state *State) error {
_, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)
return err
}
// GetSize returns the dimensions of the given terminal.
func GetSize(fd int) (width, height int, err error) {
var info consoleScreenBufferInfo
_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)
if e != 0 {
return 0, 0, error(e)
}
return int(info.size.x), int(info.size.y), nil
}
// ReadPassword reads a line of input from a terminal without local echo. This
// is commonly used for inputting passwords and other sensitive data. The slice
// returned does not include the \n.
func ReadPassword(fd int) ([]byte, error) {
var st uint32
_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
if e != 0 {
return nil, error(e)
}
old := st
st &^= (enableEchoInput)
st |= (enableProcessedInput | enableLineInput | enableProcessedOutput)
_, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
if e != 0 {
return nil, error(e)
}
defer func() {
syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)
}()
var buf [16]byte
var ret []byte
for {
n, err := syscall.Read(syscall.Handle(fd), buf[:])
if err != nil {
return nil, err
}
if n == 0 {
if len(ret) == 0 {
return nil, io.EOF
}
break
}
if buf[n-1] == '\n' {
n--
}
if n > 0 && buf[n-1] == '\r' {
n--
}
ret = append(ret, buf[:n]...)
if n < len(buf) {
break
}
}
return ret, nil
}
| vendor/golang.org/x/crypto/ssh/terminal/util_windows.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00022106220421846956,
0.00017458574438933283,
0.00016515108291059732,
0.0001713706587906927,
0.000013204195056459866
] |
{
"id": 7,
"code_window": [
"}\n",
"\n",
"type roleEntry struct {\n",
"\tSQL string `json:\"sql\"`\n",
"\tUSERNAME_LENGTH int `json:\"username_length\"`\n",
"}\n",
"\n",
"const pathRoleHelpSyn = `\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tSQL string `json:\"sql\"`\n",
"\tUsernameLength int `json:\"username_length\"`\n",
"\tDisplaynameLength int `json:\"displayname_length\"`\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 148
} | package command
import (
"flag"
"fmt"
"strings"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/meta"
)
// ReadCommand is a Command that reads data from the Vault.
type ReadCommand struct {
meta.Meta
}
func (c *ReadCommand) Run(args []string) int {
var format string
var field string
var err error
var secret *api.Secret
var flags *flag.FlagSet
flags = c.Meta.FlagSet("read", meta.FlagSetDefault)
flags.StringVar(&format, "format", "table", "")
flags.StringVar(&field, "field", "", "")
flags.Usage = func() { c.Ui.Error(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
args = flags.Args()
if len(args) != 1 || len(args[0]) == 0 {
c.Ui.Error("read expects one argument")
flags.Usage()
return 1
}
path := args[0]
if path[0] == '/' {
path = path[1:]
}
client, err := c.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error initializing client: %s", err))
return 2
}
secret, err = client.Logical().Read(path)
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error reading %s: %s", path, err))
return 1
}
if secret == nil {
c.Ui.Error(fmt.Sprintf(
"No value found at %s", path))
return 1
}
// Handle single field output
if field != "" {
return PrintRawField(c.Ui, secret, field)
}
return OutputSecret(c.Ui, format, secret)
}
func (c *ReadCommand) Synopsis() string {
return "Read data or secrets from Vault"
}
func (c *ReadCommand) Help() string {
helpText := `
Usage: vault read [options] path
Read data from Vault.
Reads data at the given path from Vault. This can be used to read
secrets and configuration as well as generate dynamic values from
materialized backends. Please reference the documentation for the
backends in use to determine key structure.
General Options:
` + meta.GeneralOptionsUsage() + `
Read Options:
-format=table The format for output. By default it is a whitespace-
delimited table. This can also be json or yaml.
-field=field If included, the raw value of the specified field
will be output raw to stdout.
`
return strings.TrimSpace(helpText)
}
| command/read.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00018391026242170483,
0.00017033176845870912,
0.00016275641974061728,
0.00016924491501413286,
0.000005833741397509584
] |
{
"id": 8,
"code_window": [
"\n",
"Note the above user would be able to access anything in db1. Please see the MySQL\n",
"manual on the GRANT command to learn how to do more fine grained access.\n",
"\n",
"The \"username_length\" parameter determines how many characters of the\n",
"role name will be used in creating the generated mysql username; the\n",
"default is 10. Note that mysql versions prior to 5.8 have a 16 character\n",
"total limit on usernames.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"The \"displayname_length\" parameter determines how many characters of the\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 176
} | package mysql
import (
"fmt"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
_ "github.com/lib/pq"
)
func pathRoleCreate(b *backend) *framework.Path {
return &framework.Path{
Pattern: "creds/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the role.",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathRoleCreateRead,
},
HelpSynopsis: pathRoleCreateReadHelpSyn,
HelpDescription: pathRoleCreateReadHelpDesc,
}
}
func (b *backend) pathRoleCreateRead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
name := data.Get("name").(string)
var usernameLength int
// Get the role
role, err := b.Role(req.Storage, name)
if err != nil {
return nil, err
}
if role == nil {
return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
}
// Determine if we have a lease
lease, err := b.Lease(req.Storage)
if err != nil {
return nil, err
}
if lease == nil {
lease = &configLease{}
}
// Generate our username and password. MySQL limits user to 16 characters
displayName := name
ul, ok := data.GetOk("username_length")
if ok == true {
usernameLength = ul.(int)
} else {
usernameLength = 10
}
if len(displayName) > usernameLength {
displayName = displayName[:usernameLength]
}
userUUID, err := uuid.GenerateUUID()
if err != nil {
return nil, err
}
username := fmt.Sprintf("%s-%s", displayName, userUUID)
if len(username) > 16 {
username = username[:16]
}
password, err := uuid.GenerateUUID()
if err != nil {
return nil, err
}
// Get our handle
db, err := b.DB(req.Storage)
if err != nil {
return nil, err
}
// Start a transaction
tx, err := db.Begin()
if err != nil {
return nil, err
}
defer tx.Rollback()
// Execute each query
for _, query := range SplitSQL(role.SQL) {
stmt, err := tx.Prepare(Query(query, map[string]string{
"name": username,
"password": password,
}))
if err != nil {
return nil, err
}
defer stmt.Close()
if _, err := stmt.Exec(); err != nil {
return nil, err
}
}
// Commit the transaction
if err := tx.Commit(); err != nil {
return nil, err
}
// Return the secret
resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
"username": username,
"password": password,
}, map[string]interface{}{
"username": username,
})
resp.Secret.TTL = lease.Lease
return resp, nil
}
const pathRoleCreateReadHelpSyn = `
Request database credentials for a certain role.
`
const pathRoleCreateReadHelpDesc = `
This path reads database credentials for a certain role. The
database credentials will be generated on demand and will be automatically
revoked when the lease is up.
`
| builtin/logical/mysql/path_role_create.go | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.17474450170993805,
0.01472871657460928,
0.00016655618674121797,
0.0007983751129359007,
0.044513460248708725
] |
{
"id": 8,
"code_window": [
"\n",
"Note the above user would be able to access anything in db1. Please see the MySQL\n",
"manual on the GRANT command to learn how to do more fine grained access.\n",
"\n",
"The \"username_length\" parameter determines how many characters of the\n",
"role name will be used in creating the generated mysql username; the\n",
"default is 10. Note that mysql versions prior to 5.8 have a 16 character\n",
"total limit on usernames.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"The \"displayname_length\" parameter determines how many characters of the\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 176
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build darwin
package fileutil
import (
"os"
"syscall"
)
// Fsync on HFS/OSX flushes the data on to the physical drive but the drive
// may not write it to the persistent media for quite sometime and it may be
// written in out-of-order sequence. Using F_FULLFSYNC ensures that the
// physical drive's buffer will also get flushed to the media.
func Fsync(f *os.File) error {
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0))
if errno == 0 {
return nil
}
return errno
}
// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence
// on physical drive media.
func Fdatasync(f *os.File) error {
return Fsync(f)
}
| vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017548870528116822,
0.00017006299458444118,
0.00016374175902456045,
0.0001704989408608526,
0.0000041526677705405746
] |
{
"id": 8,
"code_window": [
"\n",
"Note the above user would be able to access anything in db1. Please see the MySQL\n",
"manual on the GRANT command to learn how to do more fine grained access.\n",
"\n",
"The \"username_length\" parameter determines how many characters of the\n",
"role name will be used in creating the generated mysql username; the\n",
"default is 10. Note that mysql versions prior to 5.8 have a 16 character\n",
"total limit on usernames.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"The \"displayname_length\" parameter determines how many characters of the\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 176
} | package http
import (
"encoding/hex"
"errors"
"fmt"
"net/http"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
)
func handleSysSeal(core *vault.Core) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
req, statusCode, err := buildLogicalRequest(w, r)
if err != nil || statusCode != 0 {
respondError(w, statusCode, err)
return
}
switch req.Operation {
case logical.UpdateOperation:
default:
respondError(w, http.StatusMethodNotAllowed, nil)
return
}
// Seal with the token above
if err := core.SealWithRequest(req); err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
respondOk(w, nil)
})
}
func handleSysStepDown(core *vault.Core) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
req, statusCode, err := buildLogicalRequest(w, r)
if err != nil || statusCode != 0 {
respondError(w, statusCode, err)
return
}
switch req.Operation {
case logical.UpdateOperation:
default:
respondError(w, http.StatusMethodNotAllowed, nil)
return
}
// Seal with the token above
if err := core.StepDown(req); err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
respondOk(w, nil)
})
}
func handleSysUnseal(core *vault.Core) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "PUT":
case "POST":
default:
respondError(w, http.StatusMethodNotAllowed, nil)
return
}
// Parse the request
var req UnsealRequest
if err := parseRequest(r, &req); err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
if !req.Reset && req.Key == "" {
respondError(
w, http.StatusBadRequest,
errors.New("'key' must specified in request body as JSON, or 'reset' set to true"))
return
}
if req.Reset {
sealed, err := core.Sealed()
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
if !sealed {
respondError(w, http.StatusBadRequest, errors.New("vault is unsealed"))
return
}
core.ResetUnsealProcess()
} else {
// Decode the key, which is hex encoded
key, err := hex.DecodeString(req.Key)
if err != nil {
respondError(
w, http.StatusBadRequest,
errors.New("'key' must be a valid hex-string"))
return
}
// Attempt the unseal
if _, err := core.Unseal(key); err != nil {
// Ignore ErrInvalidKey because its a user error that we
// mask away. We just show them the seal status.
if !errwrap.ContainsType(err, new(vault.ErrInvalidKey)) {
respondError(w, http.StatusInternalServerError, err)
return
}
}
}
// Return the seal status
handleSysSealStatusRaw(core, w, r)
})
}
func handleSysSealStatus(core *vault.Core) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
respondError(w, http.StatusMethodNotAllowed, nil)
return
}
handleSysSealStatusRaw(core, w, r)
})
}
func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request) {
sealed, err := core.Sealed()
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
sealConfig, err := core.SealAccess().BarrierConfig()
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
if sealConfig == nil {
respondError(w, http.StatusBadRequest, fmt.Errorf(
"server is not yet initialized"))
return
}
respondOk(w, &SealStatusResponse{
Sealed: sealed,
T: sealConfig.SecretThreshold,
N: sealConfig.SecretShares,
Progress: core.SecretProgress(),
})
}
type SealStatusResponse struct {
Sealed bool `json:"sealed"`
T int `json:"t"`
N int `json:"n"`
Progress int `json:"progress"`
}
type UnsealRequest struct {
Key string
Reset bool
}
| http/sys_seal.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017362706421408802,
0.0001689459604676813,
0.0001654024381423369,
0.00016874137509148568,
0.000002527187007217435
] |
{
"id": 8,
"code_window": [
"\n",
"Note the above user would be able to access anything in db1. Please see the MySQL\n",
"manual on the GRANT command to learn how to do more fine grained access.\n",
"\n",
"The \"username_length\" parameter determines how many characters of the\n",
"role name will be used in creating the generated mysql username; the\n",
"default is 10. Note that mysql versions prior to 5.8 have a 16 character\n",
"total limit on usernames.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"The \"displayname_length\" parameter determines how many characters of the\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "replace",
"edit_start_line_idx": 176
} | package http
import (
"encoding/json"
"net/http"
"strconv"
"time"
"github.com/hashicorp/vault/vault"
)
func handleSysHealth(core *vault.Core) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
handleSysHealthGet(core, w, r)
case "HEAD":
handleSysHealthHead(core, w, r)
default:
respondError(w, http.StatusMethodNotAllowed, nil)
}
})
}
func fetchStatusCode(r *http.Request, field string) (int, bool, bool) {
var err error
statusCode := http.StatusOK
if statusCodeStr, statusCodeOk := r.URL.Query()[field]; statusCodeOk {
statusCode, err = strconv.Atoi(statusCodeStr[0])
if err != nil || len(statusCodeStr) < 1 {
return http.StatusBadRequest, false, false
}
return statusCode, true, true
}
return statusCode, false, true
}
func handleSysHealthGet(core *vault.Core, w http.ResponseWriter, r *http.Request) {
code, body, err := getSysHealth(core, r)
if err != nil {
respondError(w, http.StatusInternalServerError, nil)
return
}
if body == nil {
respondError(w, code, nil)
return
}
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(code)
// Generate the response
enc := json.NewEncoder(w)
enc.Encode(body)
}
func handleSysHealthHead(core *vault.Core, w http.ResponseWriter, r *http.Request) {
code, body, err := getSysHealth(core, r)
if err != nil {
code = http.StatusInternalServerError
}
if body != nil {
w.Header().Add("Content-Type", "application/json")
}
w.WriteHeader(code)
}
func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, error) {
// Check if being a standby is allowed for the purpose of a 200 OK
_, standbyOK := r.URL.Query()["standbyok"]
// FIXME: Change the sealed code to http.StatusServiceUnavailable at some
// point
sealedCode := http.StatusInternalServerError
if code, found, ok := fetchStatusCode(r, "sealedcode"); !ok {
return http.StatusBadRequest, nil, nil
} else if found {
sealedCode = code
}
standbyCode := http.StatusTooManyRequests // Consul warning code
if code, found, ok := fetchStatusCode(r, "standbycode"); !ok {
return http.StatusBadRequest, nil, nil
} else if found {
standbyCode = code
}
activeCode := http.StatusOK
if code, found, ok := fetchStatusCode(r, "activecode"); !ok {
return http.StatusBadRequest, nil, nil
} else if found {
activeCode = code
}
// Check system status
sealed, _ := core.Sealed()
standby, _ := core.Standby()
init, err := core.Initialized()
if err != nil {
return http.StatusInternalServerError, nil, err
}
// Determine the status code
code := activeCode
switch {
case !init:
code = http.StatusInternalServerError
case sealed:
code = sealedCode
case !standbyOK && standby:
code = standbyCode
}
// Format the body
body := &HealthResponse{
Initialized: init,
Sealed: sealed,
Standby: standby,
ServerTimeUTC: time.Now().UTC().Unix(),
}
return code, body, nil
}
type HealthResponse struct {
Initialized bool `json:"initialized"`
Sealed bool `json:"sealed"`
Standby bool `json:"standby"`
ServerTimeUTC int64 `json:"server_time_utc"`
}
| http/sys_health.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.002927495865151286,
0.00036606931826099753,
0.00016659463290125132,
0.0001691541401669383,
0.0007104137330316007
] |
{
"id": 9,
"code_window": [
"role name will be used in creating the generated mysql username; the\n",
"default is 10. Note that mysql versions prior to 5.8 have a 16 character\n",
"total limit on usernames.\n",
"`"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
"The \"username_length\" parameter determines how many total characters the\n",
"generated username (including both the displayname and the uuid portion) will\n",
"be truncated to. Versions of MySQL prior to 5.7.8 are limited to 16\n",
"characters total (see http://dev.mysql.com/doc/refman/5.7/en/user-names.html)\n",
"so that is the default; for versions >=5.7.8 it is safe to increase this\n",
"to 32.\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "add",
"edit_start_line_idx": 180
} | ---
layout: "docs"
page_title: "Secret Backend: MySQL"
sidebar_current: "docs-secrets-mysql"
description: |-
The MySQL secret backend for Vault generates database credentials to access MySQL.
---
# MySQL Secret Backend
Name: `mysql`
The MySQL secret backend for Vault generates database credentials
dynamically based on configured roles. This means that services that need
to access a database no longer need to hardcode credentials: they can request
them from Vault, and use Vault's leasing mechanism to more easily roll keys.
Additionally, it introduces a new ability: with every service accessing
the database with unique credentials, it makes auditing much easier when
questionable data access is discovered: you can track it down to the specific
instance of a service based on the SQL username.
Vault makes use of its own internal revocation system to ensure that users
become invalid within a reasonable time of the lease expiring.
This page will show a quick start for this backend. For detailed documentation
on every path, use `vault path-help` after mounting the backend.
## Quick Start
The first step to using the mysql backend is to mount it.
Unlike the `generic` backend, the `mysql` backend is not mounted by default.
```
$ vault mount mysql
Successfully mounted 'mysql' at 'mysql'!
```
Next, we must configure Vault to know how to connect to the MySQL
instance. This is done by providing a DSN (Data Source Name):
```
$ vault write mysql/config/connection \
connection_url="root:root@tcp(192.168.33.10:3306)/"
Success! Data written to: mysql/config/connection
```
In this case, we've configured Vault with the user "root" and password "root,
connecting to an instance at "192.168.33.10" on port 3306. It is not necessary
that Vault has the root user, but the user must have privileges to create
other users, namely the `GRANT OPTION` privilege.
Optionally, we can configure the lease settings for credentials generated
by Vault. This is done by writing to the `config/lease` key:
```
$ vault write mysql/config/lease \
lease=1h \
lease_max=24h
Success! Data written to: mysql/config/lease
```
This restricts each credential to being valid or leased for 1 hour
at a time, with a maximum use period of 24 hours. This forces an
application to renew their credentials at least hourly, and to recycle
them once per day.
The next step is to configure a role. A role is a logical name that maps
to a policy used to generate those credentials. For example, lets create
a "readonly" role:
```
$ vault write mysql/roles/readonly \
sql="CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';"
Success! Data written to: mysql/roles/readonly
```
By writing to the `roles/readonly` path we are defining the `readonly` role.
This role will be created by evaluating the given `sql` statements. By
default, the `{{name}}` and `{{password}}` fields will be populated by
Vault with dynamically generated values. This SQL statement is creating
the named user, and then granting it `SELECT` or read-only privileges
to tables in the database. More complex `GRANT` queries can be used to
customize the privileges of the role. See the [MySQL manual](https://dev.mysql.com/doc/refman/5.7/en/grant.html)
for more information.
To generate a new set of credentials, we simply read from that role:
```
$ vault read mysql/creds/readonly
Key Value
lease_id mysql/creds/readonly/bd404e98-0f35-b378-269a-b7770ef01897
lease_duration 3600
password 132ae3ef-5a64-7499-351e-bfe59f3a2a21
username root-aefa635a-18
```
By reading from the `creds/readonly` path, Vault has generated a new
set of credentials using the `readonly` role configuration. Here we
see the dynamically generated username and password, along with a one
hour lease.
Using ACLs, it is possible to restrict using the mysql backend such
that trusted operators can manage the role definitions, and both
users and applications are restricted in the credentials they are
allowed to read.
Optionally, you may configure the number of character from the role
name that are truncated to form the mysql usernamed interpolated into
the `{{name}}` field: the default is 10. Note that versions of
mysql prior to 5.8 have a 16 character total limit on user names, so
it is probably not safe to increase this above the default on versions
prior to that.
## API
### /mysql/config/connection
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Configures the connection DSN used to communicate with MySQL.
This is a root protected endpoint.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/mysql/config/connection`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">connection_url</span>
<span class="param-flags">required</span>
The MySQL DSN
</li>
</ul>
</dd>
<dd>
<ul>
<li>
<span class="param">value</span>
<span class="param-flags">optional</span>
</li>
</ul>
</dd>
<dd>
<ul>
<li>
<span class="param">max_open_connections</span>
<span class="param-flags">optional</span>
Maximum number of open connections to the database.
Defaults to 2.
</li>
</ul>
</dd>
<dd>
<ul>
<li>
<span class="param">verify-connection</span>
<span class="param-flags">optional</span>
If set, connection_url is verified by actually connecting to the database.
Defaults to true.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
A `204` response code.
</dd>
</dl>
### /mysql/config/lease
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Configures the lease settings for generated credentials.
If not configured, leases default to 1 hour. This is a root
protected endpoint.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/mysql/config/lease`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">lease</span>
<span class="param-flags">required</span>
The lease value provided as a string duration
with time suffix. Hour is the largest suffix.
</li>
<li>
<span class="param">lease_max</span>
<span class="param-flags">required</span>
The maximum lease value provided as a string duration
with time suffix. Hour is the largest suffix.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
A `204` response code.
</dd>
</dl>
### /mysql/roles/
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Creates or updates the role definition.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/mysql/roles/<name>`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">sql</span>
<span class="param-flags">required</span>
The SQL statements executed to create and configure the role.
Must be semi-colon separated. The '{{name}}' and '{{password}}'
values will be substituted.
</li>
<li>
<span class="param">username_length</span>
<span class="param-flags">optional</span>
Determines how many characters from the role name will be used
to form the mysql username interpolated into the '{{name}}' field
of the sql parameter.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
A `204` response code.
</dd>
</dl>
#### GET
<dl class="api">
<dt>Description</dt>
<dd>
Queries the role definition.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>URL</dt>
<dd>`/mysql/roles/<name>`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"data": {
"sql": "CREATE USER..."
}
}
```
</dd>
</dl>
#### LIST
<dl class="api">
<dt>Description</dt>
<dd>
Returns a list of available roles. Only the role names are returned, not
any values.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>URL</dt>
<dd>`/roles/?list=true`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"auth": null,
"data": {
"keys": ["dev", "prod"]
},
"lease_duration": 2592000,
"lease_id": "",
"renewable": false
}
```
</dd>
</dl>
#### DELETE
<dl class="api">
<dt>Description</dt>
<dd>
Deletes the role definition.
</dd>
<dt>Method</dt>
<dd>DELETE</dd>
<dt>URL</dt>
<dd>`/mysql/roles/<name>`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
A `204` response code.
</dd>
</dl>
### /mysql/creds/
#### GET
<dl class="api">
<dt>Description</dt>
<dd>
Generates a new set of dynamic credentials based on the named role.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>URL</dt>
<dd>`/mysql/creds/<name>`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"data": {
"username": "root-aefa635a-18",
"password": "132ae3ef-5a64-7499-351e-bfe59f3a2a21"
}
}
```
</dd>
</dl>
| website/source/docs/secrets/mysql/index.html.md | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.05840200558304787,
0.0025841062888503075,
0.0001662215800024569,
0.00027838716050609946,
0.009753813035786152
] |
{
"id": 9,
"code_window": [
"role name will be used in creating the generated mysql username; the\n",
"default is 10. Note that mysql versions prior to 5.8 have a 16 character\n",
"total limit on usernames.\n",
"`"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
"The \"username_length\" parameter determines how many total characters the\n",
"generated username (including both the displayname and the uuid portion) will\n",
"be truncated to. Versions of MySQL prior to 5.7.8 are limited to 16\n",
"characters total (see http://dev.mysql.com/doc/refman/5.7/en/user-names.html)\n",
"so that is the default; for versions >=5.7.8 it is safe to increase this\n",
"to 32.\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "add",
"edit_start_line_idx": 180
} | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"encoding/binary"
"io"
)
// LiteralData represents an encrypted file. See RFC 4880, section 5.9.
type LiteralData struct {
IsBinary bool
FileName string
Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined.
Body io.Reader
}
// ForEyesOnly returns whether the contents of the LiteralData have been marked
// as especially sensitive.
func (l *LiteralData) ForEyesOnly() bool {
return l.FileName == "_CONSOLE"
}
func (l *LiteralData) parse(r io.Reader) (err error) {
var buf [256]byte
_, err = readFull(r, buf[:2])
if err != nil {
return
}
l.IsBinary = buf[0] == 'b'
fileNameLen := int(buf[1])
_, err = readFull(r, buf[:fileNameLen])
if err != nil {
return
}
l.FileName = string(buf[:fileNameLen])
_, err = readFull(r, buf[:4])
if err != nil {
return
}
l.Time = binary.BigEndian.Uint32(buf[:4])
l.Body = r
return
}
// SerializeLiteral serializes a literal data packet to w and returns a
// WriteCloser to which the data itself can be written and which MUST be closed
// on completion. The fileName is truncated to 255 bytes.
func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
var buf [4]byte
buf[0] = 't'
if isBinary {
buf[0] = 'b'
}
if len(fileName) > 255 {
fileName = fileName[:255]
}
buf[1] = byte(len(fileName))
inner, err := serializeStreamHeader(w, packetTypeLiteralData)
if err != nil {
return
}
_, err = inner.Write(buf[:2])
if err != nil {
return
}
_, err = inner.Write([]byte(fileName))
if err != nil {
return
}
binary.BigEndian.PutUint32(buf[:], time)
_, err = inner.Write(buf[:])
if err != nil {
return
}
plaintext = inner
return
}
| vendor/golang.org/x/crypto/openpgp/packet/literal.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017623556777834892,
0.00016955638420768082,
0.00016555865295231342,
0.00016952822625171393,
0.000003115330628133961
] |
{
"id": 9,
"code_window": [
"role name will be used in creating the generated mysql username; the\n",
"default is 10. Note that mysql versions prior to 5.8 have a 16 character\n",
"total limit on usernames.\n",
"`"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
"The \"username_length\" parameter determines how many total characters the\n",
"generated username (including both the displayname and the uuid portion) will\n",
"be truncated to. Versions of MySQL prior to 5.7.8 are limited to 16\n",
"characters total (see http://dev.mysql.com/doc/refman/5.7/en/user-names.html)\n",
"so that is the default; for versions >=5.7.8 it is safe to increase this\n",
"to 32.\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "add",
"edit_start_line_idx": 180
} | Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.
| vendor/github.com/hashicorp/golang-lru/LICENSE | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017629389185458422,
0.0001723604218568653,
0.00016621682152617723,
0.0001729834038997069,
0.0000027892770049220417
] |
{
"id": 9,
"code_window": [
"role name will be used in creating the generated mysql username; the\n",
"default is 10. Note that mysql versions prior to 5.8 have a 16 character\n",
"total limit on usernames.\n",
"`"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
"The \"username_length\" parameter determines how many total characters the\n",
"generated username (including both the displayname and the uuid portion) will\n",
"be truncated to. Versions of MySQL prior to 5.7.8 are limited to 16\n",
"characters total (see http://dev.mysql.com/doc/refman/5.7/en/user-names.html)\n",
"so that is the default; for versions >=5.7.8 it is safe to increase this\n",
"to 32.\n"
],
"file_path": "builtin/logical/mysql/path_roles.go",
"type": "add",
"edit_start_line_idx": 180
} | The MIT License (MIT)
Copyright (c) 2013 HashiCorp, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
| vendor/github.com/hashicorp/net-rpc-msgpackrpc/LICENSE.md | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017246825154870749,
0.000169709965121001,
0.00016742380103096366,
0.00016923782823141664,
0.000002086272843371262
] |
{
"id": 10,
"code_window": [
"$ vault read mysql/creds/readonly\n",
"Key \tValue\n",
"lease_id \tmysql/creds/readonly/bd404e98-0f35-b378-269a-b7770ef01897\n",
"lease_duration\t3600\n",
"password \t132ae3ef-5a64-7499-351e-bfe59f3a2a21\n",
"username \troot-aefa635a-18\n",
"```\n",
"\n",
"By reading from the `creds/readonly` path, Vault has generated a new\n",
"set of credentials using the `readonly` role configuration. Here we\n",
"see the dynamically generated username and password, along with a one\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"username \treadonly-aefa635a-18\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 94
} | package mysql
import (
"fmt"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
_ "github.com/lib/pq"
)
func pathRoleCreate(b *backend) *framework.Path {
return &framework.Path{
Pattern: "creds/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the role.",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathRoleCreateRead,
},
HelpSynopsis: pathRoleCreateReadHelpSyn,
HelpDescription: pathRoleCreateReadHelpDesc,
}
}
func (b *backend) pathRoleCreateRead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
name := data.Get("name").(string)
var usernameLength int
// Get the role
role, err := b.Role(req.Storage, name)
if err != nil {
return nil, err
}
if role == nil {
return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil
}
// Determine if we have a lease
lease, err := b.Lease(req.Storage)
if err != nil {
return nil, err
}
if lease == nil {
lease = &configLease{}
}
// Generate our username and password. MySQL limits user to 16 characters
displayName := name
ul, ok := data.GetOk("username_length")
if ok == true {
usernameLength = ul.(int)
} else {
usernameLength = 10
}
if len(displayName) > usernameLength {
displayName = displayName[:usernameLength]
}
userUUID, err := uuid.GenerateUUID()
if err != nil {
return nil, err
}
username := fmt.Sprintf("%s-%s", displayName, userUUID)
if len(username) > 16 {
username = username[:16]
}
password, err := uuid.GenerateUUID()
if err != nil {
return nil, err
}
// Get our handle
db, err := b.DB(req.Storage)
if err != nil {
return nil, err
}
// Start a transaction
tx, err := db.Begin()
if err != nil {
return nil, err
}
defer tx.Rollback()
// Execute each query
for _, query := range SplitSQL(role.SQL) {
stmt, err := tx.Prepare(Query(query, map[string]string{
"name": username,
"password": password,
}))
if err != nil {
return nil, err
}
defer stmt.Close()
if _, err := stmt.Exec(); err != nil {
return nil, err
}
}
// Commit the transaction
if err := tx.Commit(); err != nil {
return nil, err
}
// Return the secret
resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
"username": username,
"password": password,
}, map[string]interface{}{
"username": username,
})
resp.Secret.TTL = lease.Lease
return resp, nil
}
const pathRoleCreateReadHelpSyn = `
Request database credentials for a certain role.
`
const pathRoleCreateReadHelpDesc = `
This path reads database credentials for a certain role. The
database credentials will be generated on demand and will be automatically
revoked when the lease is up.
`
| builtin/logical/mysql/path_role_create.go | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.9829555153846741,
0.07099568098783493,
0.00016234676877502352,
0.0005146031617186964,
0.25293344259262085
] |
{
"id": 10,
"code_window": [
"$ vault read mysql/creds/readonly\n",
"Key \tValue\n",
"lease_id \tmysql/creds/readonly/bd404e98-0f35-b378-269a-b7770ef01897\n",
"lease_duration\t3600\n",
"password \t132ae3ef-5a64-7499-351e-bfe59f3a2a21\n",
"username \troot-aefa635a-18\n",
"```\n",
"\n",
"By reading from the `creds/readonly` path, Vault has generated a new\n",
"set of credentials using the `readonly` role configuration. Here we\n",
"see the dynamically generated username and password, along with a one\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"username \treadonly-aefa635a-18\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 94
} | // Code generated by protoc-gen-go.
// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
// DO NOT EDIT!
/*
Package urlfetch is a generated protocol buffer package.
It is generated from these files:
google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
It has these top-level messages:
URLFetchServiceError
URLFetchRequest
URLFetchResponse
*/
package urlfetch
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type URLFetchServiceError_ErrorCode int32
const (
URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
)
var URLFetchServiceError_ErrorCode_name = map[int32]string{
0: "OK",
1: "INVALID_URL",
2: "FETCH_ERROR",
3: "UNSPECIFIED_ERROR",
4: "RESPONSE_TOO_LARGE",
5: "DEADLINE_EXCEEDED",
6: "SSL_CERTIFICATE_ERROR",
7: "DNS_ERROR",
8: "CLOSED",
9: "INTERNAL_TRANSIENT_ERROR",
10: "TOO_MANY_REDIRECTS",
11: "MALFORMED_REPLY",
12: "CONNECTION_ERROR",
}
var URLFetchServiceError_ErrorCode_value = map[string]int32{
"OK": 0,
"INVALID_URL": 1,
"FETCH_ERROR": 2,
"UNSPECIFIED_ERROR": 3,
"RESPONSE_TOO_LARGE": 4,
"DEADLINE_EXCEEDED": 5,
"SSL_CERTIFICATE_ERROR": 6,
"DNS_ERROR": 7,
"CLOSED": 8,
"INTERNAL_TRANSIENT_ERROR": 9,
"TOO_MANY_REDIRECTS": 10,
"MALFORMED_REPLY": 11,
"CONNECTION_ERROR": 12,
}
func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
p := new(URLFetchServiceError_ErrorCode)
*p = x
return p
}
func (x URLFetchServiceError_ErrorCode) String() string {
return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
}
func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
if err != nil {
return err
}
*x = URLFetchServiceError_ErrorCode(value)
return nil
}
type URLFetchRequest_RequestMethod int32
const (
URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
)
var URLFetchRequest_RequestMethod_name = map[int32]string{
1: "GET",
2: "POST",
3: "HEAD",
4: "PUT",
5: "DELETE",
6: "PATCH",
}
var URLFetchRequest_RequestMethod_value = map[string]int32{
"GET": 1,
"POST": 2,
"HEAD": 3,
"PUT": 4,
"DELETE": 5,
"PATCH": 6,
}
func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
p := new(URLFetchRequest_RequestMethod)
*p = x
return p
}
func (x URLFetchRequest_RequestMethod) String() string {
return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
}
func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
if err != nil {
return err
}
*x = URLFetchRequest_RequestMethod(value)
return nil
}
type URLFetchServiceError struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
func (*URLFetchServiceError) ProtoMessage() {}
type URLFetchRequest struct {
Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
func (*URLFetchRequest) ProtoMessage() {}
const Default_URLFetchRequest_FollowRedirects bool = true
const Default_URLFetchRequest_MustValidateServerCertificate bool = true
func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
if m != nil && m.Method != nil {
return *m.Method
}
return URLFetchRequest_GET
}
func (m *URLFetchRequest) GetUrl() string {
if m != nil && m.Url != nil {
return *m.Url
}
return ""
}
func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
if m != nil {
return m.Header
}
return nil
}
func (m *URLFetchRequest) GetPayload() []byte {
if m != nil {
return m.Payload
}
return nil
}
func (m *URLFetchRequest) GetFollowRedirects() bool {
if m != nil && m.FollowRedirects != nil {
return *m.FollowRedirects
}
return Default_URLFetchRequest_FollowRedirects
}
func (m *URLFetchRequest) GetDeadline() float64 {
if m != nil && m.Deadline != nil {
return *m.Deadline
}
return 0
}
func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
if m != nil && m.MustValidateServerCertificate != nil {
return *m.MustValidateServerCertificate
}
return Default_URLFetchRequest_MustValidateServerCertificate
}
type URLFetchRequest_Header struct {
Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
func (*URLFetchRequest_Header) ProtoMessage() {}
func (m *URLFetchRequest_Header) GetKey() string {
if m != nil && m.Key != nil {
return *m.Key
}
return ""
}
func (m *URLFetchRequest_Header) GetValue() string {
if m != nil && m.Value != nil {
return *m.Value
}
return ""
}
type URLFetchResponse struct {
Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
func (*URLFetchResponse) ProtoMessage() {}
const Default_URLFetchResponse_ContentWasTruncated bool = false
const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
const Default_URLFetchResponse_ApiBytesSent int64 = 0
const Default_URLFetchResponse_ApiBytesReceived int64 = 0
func (m *URLFetchResponse) GetContent() []byte {
if m != nil {
return m.Content
}
return nil
}
func (m *URLFetchResponse) GetStatusCode() int32 {
if m != nil && m.StatusCode != nil {
return *m.StatusCode
}
return 0
}
func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
if m != nil {
return m.Header
}
return nil
}
func (m *URLFetchResponse) GetContentWasTruncated() bool {
if m != nil && m.ContentWasTruncated != nil {
return *m.ContentWasTruncated
}
return Default_URLFetchResponse_ContentWasTruncated
}
func (m *URLFetchResponse) GetExternalBytesSent() int64 {
if m != nil && m.ExternalBytesSent != nil {
return *m.ExternalBytesSent
}
return 0
}
func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
if m != nil && m.ExternalBytesReceived != nil {
return *m.ExternalBytesReceived
}
return 0
}
func (m *URLFetchResponse) GetFinalUrl() string {
if m != nil && m.FinalUrl != nil {
return *m.FinalUrl
}
return ""
}
func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
if m != nil && m.ApiCpuMilliseconds != nil {
return *m.ApiCpuMilliseconds
}
return Default_URLFetchResponse_ApiCpuMilliseconds
}
func (m *URLFetchResponse) GetApiBytesSent() int64 {
if m != nil && m.ApiBytesSent != nil {
return *m.ApiBytesSent
}
return Default_URLFetchResponse_ApiBytesSent
}
func (m *URLFetchResponse) GetApiBytesReceived() int64 {
if m != nil && m.ApiBytesReceived != nil {
return *m.ApiBytesReceived
}
return Default_URLFetchResponse_ApiBytesReceived
}
type URLFetchResponse_Header struct {
Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
func (*URLFetchResponse_Header) ProtoMessage() {}
func (m *URLFetchResponse_Header) GetKey() string {
if m != nil && m.Key != nil {
return *m.Key
}
return ""
}
func (m *URLFetchResponse_Header) GetValue() string {
if m != nil && m.Value != nil {
return *m.Value
}
return ""
}
func init() {
}
| vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.010976677760481834,
0.0005902184057049453,
0.00016214055358432233,
0.00016889341350179166,
0.0017938562668859959
] |
{
"id": 10,
"code_window": [
"$ vault read mysql/creds/readonly\n",
"Key \tValue\n",
"lease_id \tmysql/creds/readonly/bd404e98-0f35-b378-269a-b7770ef01897\n",
"lease_duration\t3600\n",
"password \t132ae3ef-5a64-7499-351e-bfe59f3a2a21\n",
"username \troot-aefa635a-18\n",
"```\n",
"\n",
"By reading from the `creds/readonly` path, Vault has generated a new\n",
"set of credentials using the `readonly` role configuration. Here we\n",
"see the dynamically generated username and password, along with a one\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"username \treadonly-aefa635a-18\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 94
} | ---
layout: "http"
page_title: "HTTP API: /sys/generate-root/"
sidebar_current: "docs-http-sys-generate-root"
description: |-
The `/sys/generate-root/` endpoints are used to create a new root key for Vault.
---
# /sys/generate-root/attempt
## GET
<dl>
<dt>Description</dt>
<dd>
Reads the configuration and progress of the current root generation
attempt.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>URL</dt>
<dd>`/sys/generate-root/attempt`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
If a root generation is started, `progress` is how many unseal keys have
been provided for this generation attempt, where `required` must be reached
to complete. The `nonce` for the current attempt and whether the attempt is
complete is also displayed. If a PGP key is being used to encrypt the final
root token, its fingerprint will be returned. Note that if an OTP is being
used to encode the final root token, it will never be returned.
```javascript
{
"started": true,
"nonce": "2dbd10f1-8528-6246-09e7-82b25b8aba63",
"progress": 1,
"required": 3,
"pgp_fingerprint": "",
"complete": false
}
```
</dd>
</dl>
## PUT
<dl>
<dt>Description</dt>
<dd>
Initializes a new root generation attempt. Only a single root generation
attempt can take place at a time. One (and only one) of `otp` or `pgp_key`
are required.
</dd>
<dt>Method</dt>
<dd>PUT</dd>
<dt>URL</dt>
<dd>`/sys/generate-root/attempt`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">otp</span>
<span class="param-flags">optional</span>
A base64-encoded 16-byte value. The raw bytes of the token will be
XOR'd with this value before being returned to the final unseal key
provider.
</li>
<li>
<span class="param">pgp_key</span>
<span class="param-flags">optional</span>
A base64-encoded PGP public key. The raw bytes of the token will be
encrypted with this value before being returned to the final unseal key
provider.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
The current progress.
```javascript
{
"started": true,
"nonce": "2dbd10f1-8528-6246-09e7-82b25b8aba63",
"progress": 1,
"required": 3,
"pgp_fingerprint": "816938b8a29146fbe245dd29e7cbaf8e011db793",
"complete": false
}
```
</dd>
</dl>
## DELETE
<dl>
<dt>Description</dt>
<dd>
Cancels any in-progress root generation attempt. This clears any progress
made. This must be called to change the OTP or PGP key being used.
</dd>
<dt>Method</dt>
<dd>DELETE</dd>
<dt>URL</dt>
<dd>`/sys/generate-root/attempt`</dd>
<dt>Parameters</dt>
<dd>None
</dd>
<dt>Returns</dt>
<dd>`204` response code.
</dd>
</dl>
# /sys/generate-root/update
## PUT
<dl>
<dt>Description</dt>
<dd>
Enter a single master key share to progress the root generation attempt.
If the threshold number of master key shares is reached, Vault will
complete the root generation and issue the new token. Otherwise, this API
must be called multiple times until that threshold is met. The attempt
nonce must be provided with each call.
</dd>
<dt>Method</dt>
<dd>PUT</dd>
<dt>URL</dt>
<dd>`/sys/generate-root/update`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">key</span>
<span class="param-flags">required</span>
A single master share key.
</li>
<li>
<span class="param">nonce</span>
<span class="param-flags">required</span>
The nonce of the attempt.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
A JSON-encoded object indicating the attempt nonce, and completion status,
and the encoded root token, if the attempt is complete.
```javascript
{
"started": true,
"nonce": "2dbd10f1-8528-6246-09e7-82b25b8aba63",
"progress": 3,
"required": 3,
"pgp_fingerprint": "",
"complete": true,
"encoded_root_token": "FPzkNBvwNDeFh4SmGA8c+w=="
}
```
</dd>
</dl>
| website/source/docs/http/sys-generate-root.html.md | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0005232452531345189,
0.00018694580649025738,
0.00016245795995928347,
0.00016649870667606592,
0.00007951291627250612
] |
{
"id": 10,
"code_window": [
"$ vault read mysql/creds/readonly\n",
"Key \tValue\n",
"lease_id \tmysql/creds/readonly/bd404e98-0f35-b378-269a-b7770ef01897\n",
"lease_duration\t3600\n",
"password \t132ae3ef-5a64-7499-351e-bfe59f3a2a21\n",
"username \troot-aefa635a-18\n",
"```\n",
"\n",
"By reading from the `creds/readonly` path, Vault has generated a new\n",
"set of credentials using the `readonly` role configuration. Here we\n",
"see the dynamically generated username and password, along with a one\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"username \treadonly-aefa635a-18\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 94
} | // Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package internal
// This file has code for accessing metadata.
//
// References:
// https://cloud.google.com/compute/docs/metadata
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
)
const (
metadataHost = "metadata"
metadataPath = "/computeMetadata/v1/"
)
var (
metadataRequestHeaders = http.Header{
"Metadata-Flavor": []string{"Google"},
}
)
// TODO(dsymonds): Do we need to support default values, like Python?
func mustGetMetadata(key string) []byte {
b, err := getMetadata(key)
if err != nil {
log.Fatalf("Metadata fetch failed: %v", err)
}
return b
}
func getMetadata(key string) ([]byte, error) {
// TODO(dsymonds): May need to use url.Parse to support keys with query args.
req := &http.Request{
Method: "GET",
URL: &url.URL{
Scheme: "http",
Host: metadataHost,
Path: metadataPath + key,
},
Header: metadataRequestHeaders,
Host: metadataHost,
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
}
return ioutil.ReadAll(resp.Body)
}
| vendor/google.golang.org/appengine/internal/metadata.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017439611838199198,
0.00016780495934654027,
0.0001628354366403073,
0.0001671405480010435,
0.000004293780875741504
] |
{
"id": 11,
"code_window": [
"Using ACLs, it is possible to restrict using the mysql backend such\n",
"that trusted operators can manage the role definitions, and both\n",
"users and applications are restricted in the credentials they are\n",
"allowed to read.\n",
"\n",
"Optionally, you may configure the number of character from the role\n",
"name that are truncated to form the mysql usernamed interpolated into\n",
"the `{{name}}` field: the default is 10. Note that versions of\n",
"mysql prior to 5.8 have a 16 character total limit on user names, so\n",
"it is probably not safe to increase this above the default on versions\n",
"prior to that.\n",
"\n",
"## API\n",
"\n",
"### /mysql/config/connection\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"Optionally, you may configure both the number of characters from the role name\n",
"that are truncated to form the display name portion of the mysql username\n",
"interpolated into the `{{name}}` field: the default is 10. \n",
"\n",
"You may also configure the total number of characters allowed in the entire\n",
"generated username (the sum of the display name and uuid poritions); the\n",
"default is 16. Note that versions of MySQL prior to 5.8 have a 16 character\n",
"total limit on user names, so it is probably not safe to increase this above\n",
"the default on versions prior to that.\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 107
} | package mysql
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func pathListRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/?$",
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ListOperation: b.pathRoleList,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func pathRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the role.",
},
"sql": &framework.FieldSchema{
Type: framework.TypeString,
Description: "SQL string to create a user. See help for more info.",
},
"username_length": &framework.FieldSchema{
Type: framework.TypeInt,
Description: "number of characters to truncate generated mysql usernames to (default 10)",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathRoleRead,
logical.UpdateOperation: b.pathRoleCreate,
logical.DeleteOperation: b.pathRoleDelete,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
entry, err := s.Get("role/" + n)
if err != nil {
return nil, err
}
if entry == nil {
return nil, nil
}
var result roleEntry
if err := entry.DecodeJSON(&result); err != nil {
return nil, err
}
return &result, nil
}
func (b *backend) pathRoleDelete(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
err := req.Storage.Delete("role/" + data.Get("name").(string))
if err != nil {
return nil, err
}
return nil, nil
}
func (b *backend) pathRoleRead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
role, err := b.Role(req.Storage, data.Get("name").(string))
if err != nil {
return nil, err
}
if role == nil {
return nil, nil
}
return &logical.Response{
Data: map[string]interface{}{
"sql": role.SQL,
},
}, nil
}
func (b *backend) pathRoleList(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
entries, err := req.Storage.List("role/")
if err != nil {
return nil, err
}
return logical.ListResponse(entries), nil
}
func (b *backend) pathRoleCreate(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
name := data.Get("name").(string)
sql := data.Get("sql").(string)
username_length := data.Get("username_length").(int)
// Get our connection
db, err := b.DB(req.Storage)
if err != nil {
return nil, err
}
// Test the query by trying to prepare it
for _, query := range SplitSQL(sql) {
stmt, err := db.Prepare(Query(query, map[string]string{
"name": "foo",
"password": "bar",
}))
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error testing query: %s", err)), nil
}
stmt.Close()
}
// Store it
entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
SQL: sql,
USERNAME_LENGTH: username_length,
})
if err != nil {
return nil, err
}
if err := req.Storage.Put(entry); err != nil {
return nil, err
}
return nil, nil
}
type roleEntry struct {
SQL string `json:"sql"`
USERNAME_LENGTH int `json:"username_length"`
}
const pathRoleHelpSyn = `
Manage the roles that can be created with this backend.
`
const pathRoleHelpDesc = `
This path lets you manage the roles that can be created with this backend.
The "sql" parameter customizes the SQL string used to create the role.
This can be a sequence of SQL queries, each semi-colon seperated. Some
substitution will be done to the SQL string for certain keys.
The names of the variables must be surrounded by "{{" and "}}" to be replaced.
* "name" - The random username generated for the DB user.
* "password" - The random password generated for the DB user.
Example of a decent SQL query to use:
CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
GRANT ALL ON db1.* TO '{{name}}'@'%';
Note the above user would be able to access anything in db1. Please see the MySQL
manual on the GRANT command to learn how to do more fine grained access.
The "username_length" parameter determines how many characters of the
role name will be used in creating the generated mysql username; the
default is 10. Note that mysql versions prior to 5.8 have a 16 character
total limit on usernames.
`
| builtin/logical/mysql/path_roles.go | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.004181437660008669,
0.0008436238276772201,
0.00016410156968049705,
0.00035293909604661167,
0.0011189557844772935
] |
{
"id": 11,
"code_window": [
"Using ACLs, it is possible to restrict using the mysql backend such\n",
"that trusted operators can manage the role definitions, and both\n",
"users and applications are restricted in the credentials they are\n",
"allowed to read.\n",
"\n",
"Optionally, you may configure the number of character from the role\n",
"name that are truncated to form the mysql usernamed interpolated into\n",
"the `{{name}}` field: the default is 10. Note that versions of\n",
"mysql prior to 5.8 have a 16 character total limit on user names, so\n",
"it is probably not safe to increase this above the default on versions\n",
"prior to that.\n",
"\n",
"## API\n",
"\n",
"### /mysql/config/connection\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"Optionally, you may configure both the number of characters from the role name\n",
"that are truncated to form the display name portion of the mysql username\n",
"interpolated into the `{{name}}` field: the default is 10. \n",
"\n",
"You may also configure the total number of characters allowed in the entire\n",
"generated username (the sum of the display name and uuid poritions); the\n",
"default is 16. Note that versions of MySQL prior to 5.8 have a 16 character\n",
"total limit on user names, so it is probably not safe to increase this above\n",
"the default on versions prior to that.\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 107
} | package mssql
var cp1251 *charsetMap = &charsetMap{
sb: [256]rune{
0x0000, //NULL
0x0001, //START OF HEADING
0x0002, //START OF TEXT
0x0003, //END OF TEXT
0x0004, //END OF TRANSMISSION
0x0005, //ENQUIRY
0x0006, //ACKNOWLEDGE
0x0007, //BELL
0x0008, //BACKSPACE
0x0009, //HORIZONTAL TABULATION
0x000A, //LINE FEED
0x000B, //VERTICAL TABULATION
0x000C, //FORM FEED
0x000D, //CARRIAGE RETURN
0x000E, //SHIFT OUT
0x000F, //SHIFT IN
0x0010, //DATA LINK ESCAPE
0x0011, //DEVICE CONTROL ONE
0x0012, //DEVICE CONTROL TWO
0x0013, //DEVICE CONTROL THREE
0x0014, //DEVICE CONTROL FOUR
0x0015, //NEGATIVE ACKNOWLEDGE
0x0016, //SYNCHRONOUS IDLE
0x0017, //END OF TRANSMISSION BLOCK
0x0018, //CANCEL
0x0019, //END OF MEDIUM
0x001A, //SUBSTITUTE
0x001B, //ESCAPE
0x001C, //FILE SEPARATOR
0x001D, //GROUP SEPARATOR
0x001E, //RECORD SEPARATOR
0x001F, //UNIT SEPARATOR
0x0020, //SPACE
0x0021, //EXCLAMATION MARK
0x0022, //QUOTATION MARK
0x0023, //NUMBER SIGN
0x0024, //DOLLAR SIGN
0x0025, //PERCENT SIGN
0x0026, //AMPERSAND
0x0027, //APOSTROPHE
0x0028, //LEFT PARENTHESIS
0x0029, //RIGHT PARENTHESIS
0x002A, //ASTERISK
0x002B, //PLUS SIGN
0x002C, //COMMA
0x002D, //HYPHEN-MINUS
0x002E, //FULL STOP
0x002F, //SOLIDUS
0x0030, //DIGIT ZERO
0x0031, //DIGIT ONE
0x0032, //DIGIT TWO
0x0033, //DIGIT THREE
0x0034, //DIGIT FOUR
0x0035, //DIGIT FIVE
0x0036, //DIGIT SIX
0x0037, //DIGIT SEVEN
0x0038, //DIGIT EIGHT
0x0039, //DIGIT NINE
0x003A, //COLON
0x003B, //SEMICOLON
0x003C, //LESS-THAN SIGN
0x003D, //EQUALS SIGN
0x003E, //GREATER-THAN SIGN
0x003F, //QUESTION MARK
0x0040, //COMMERCIAL AT
0x0041, //LATIN CAPITAL LETTER A
0x0042, //LATIN CAPITAL LETTER B
0x0043, //LATIN CAPITAL LETTER C
0x0044, //LATIN CAPITAL LETTER D
0x0045, //LATIN CAPITAL LETTER E
0x0046, //LATIN CAPITAL LETTER F
0x0047, //LATIN CAPITAL LETTER G
0x0048, //LATIN CAPITAL LETTER H
0x0049, //LATIN CAPITAL LETTER I
0x004A, //LATIN CAPITAL LETTER J
0x004B, //LATIN CAPITAL LETTER K
0x004C, //LATIN CAPITAL LETTER L
0x004D, //LATIN CAPITAL LETTER M
0x004E, //LATIN CAPITAL LETTER N
0x004F, //LATIN CAPITAL LETTER O
0x0050, //LATIN CAPITAL LETTER P
0x0051, //LATIN CAPITAL LETTER Q
0x0052, //LATIN CAPITAL LETTER R
0x0053, //LATIN CAPITAL LETTER S
0x0054, //LATIN CAPITAL LETTER T
0x0055, //LATIN CAPITAL LETTER U
0x0056, //LATIN CAPITAL LETTER V
0x0057, //LATIN CAPITAL LETTER W
0x0058, //LATIN CAPITAL LETTER X
0x0059, //LATIN CAPITAL LETTER Y
0x005A, //LATIN CAPITAL LETTER Z
0x005B, //LEFT SQUARE BRACKET
0x005C, //REVERSE SOLIDUS
0x005D, //RIGHT SQUARE BRACKET
0x005E, //CIRCUMFLEX ACCENT
0x005F, //LOW LINE
0x0060, //GRAVE ACCENT
0x0061, //LATIN SMALL LETTER A
0x0062, //LATIN SMALL LETTER B
0x0063, //LATIN SMALL LETTER C
0x0064, //LATIN SMALL LETTER D
0x0065, //LATIN SMALL LETTER E
0x0066, //LATIN SMALL LETTER F
0x0067, //LATIN SMALL LETTER G
0x0068, //LATIN SMALL LETTER H
0x0069, //LATIN SMALL LETTER I
0x006A, //LATIN SMALL LETTER J
0x006B, //LATIN SMALL LETTER K
0x006C, //LATIN SMALL LETTER L
0x006D, //LATIN SMALL LETTER M
0x006E, //LATIN SMALL LETTER N
0x006F, //LATIN SMALL LETTER O
0x0070, //LATIN SMALL LETTER P
0x0071, //LATIN SMALL LETTER Q
0x0072, //LATIN SMALL LETTER R
0x0073, //LATIN SMALL LETTER S
0x0074, //LATIN SMALL LETTER T
0x0075, //LATIN SMALL LETTER U
0x0076, //LATIN SMALL LETTER V
0x0077, //LATIN SMALL LETTER W
0x0078, //LATIN SMALL LETTER X
0x0079, //LATIN SMALL LETTER Y
0x007A, //LATIN SMALL LETTER Z
0x007B, //LEFT CURLY BRACKET
0x007C, //VERTICAL LINE
0x007D, //RIGHT CURLY BRACKET
0x007E, //TILDE
0x007F, //DELETE
0x0402, //CYRILLIC CAPITAL LETTER DJE
0x0403, //CYRILLIC CAPITAL LETTER GJE
0x201A, //SINGLE LOW-9 QUOTATION MARK
0x0453, //CYRILLIC SMALL LETTER GJE
0x201E, //DOUBLE LOW-9 QUOTATION MARK
0x2026, //HORIZONTAL ELLIPSIS
0x2020, //DAGGER
0x2021, //DOUBLE DAGGER
0x20AC, //EURO SIGN
0x2030, //PER MILLE SIGN
0x0409, //CYRILLIC CAPITAL LETTER LJE
0x2039, //SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x040A, //CYRILLIC CAPITAL LETTER NJE
0x040C, //CYRILLIC CAPITAL LETTER KJE
0x040B, //CYRILLIC CAPITAL LETTER TSHE
0x040F, //CYRILLIC CAPITAL LETTER DZHE
0x0452, //CYRILLIC SMALL LETTER DJE
0x2018, //LEFT SINGLE QUOTATION MARK
0x2019, //RIGHT SINGLE QUOTATION MARK
0x201C, //LEFT DOUBLE QUOTATION MARK
0x201D, //RIGHT DOUBLE QUOTATION MARK
0x2022, //BULLET
0x2013, //EN DASH
0x2014, //EM DASH
0xFFFD, //UNDEFINED
0x2122, //TRADE MARK SIGN
0x0459, //CYRILLIC SMALL LETTER LJE
0x203A, //SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x045A, //CYRILLIC SMALL LETTER NJE
0x045C, //CYRILLIC SMALL LETTER KJE
0x045B, //CYRILLIC SMALL LETTER TSHE
0x045F, //CYRILLIC SMALL LETTER DZHE
0x00A0, //NO-BREAK SPACE
0x040E, //CYRILLIC CAPITAL LETTER SHORT U
0x045E, //CYRILLIC SMALL LETTER SHORT U
0x0408, //CYRILLIC CAPITAL LETTER JE
0x00A4, //CURRENCY SIGN
0x0490, //CYRILLIC CAPITAL LETTER GHE WITH UPTURN
0x00A6, //BROKEN BAR
0x00A7, //SECTION SIGN
0x0401, //CYRILLIC CAPITAL LETTER IO
0x00A9, //COPYRIGHT SIGN
0x0404, //CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00AB, //LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00AC, //NOT SIGN
0x00AD, //SOFT HYPHEN
0x00AE, //REGISTERED SIGN
0x0407, //CYRILLIC CAPITAL LETTER YI
0x00B0, //DEGREE SIGN
0x00B1, //PLUS-MINUS SIGN
0x0406, //CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x0456, //CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x0491, //CYRILLIC SMALL LETTER GHE WITH UPTURN
0x00B5, //MICRO SIGN
0x00B6, //PILCROW SIGN
0x00B7, //MIDDLE DOT
0x0451, //CYRILLIC SMALL LETTER IO
0x2116, //NUMERO SIGN
0x0454, //CYRILLIC SMALL LETTER UKRAINIAN IE
0x00BB, //RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x0458, //CYRILLIC SMALL LETTER JE
0x0405, //CYRILLIC CAPITAL LETTER DZE
0x0455, //CYRILLIC SMALL LETTER DZE
0x0457, //CYRILLIC SMALL LETTER YI
0x0410, //CYRILLIC CAPITAL LETTER A
0x0411, //CYRILLIC CAPITAL LETTER BE
0x0412, //CYRILLIC CAPITAL LETTER VE
0x0413, //CYRILLIC CAPITAL LETTER GHE
0x0414, //CYRILLIC CAPITAL LETTER DE
0x0415, //CYRILLIC CAPITAL LETTER IE
0x0416, //CYRILLIC CAPITAL LETTER ZHE
0x0417, //CYRILLIC CAPITAL LETTER ZE
0x0418, //CYRILLIC CAPITAL LETTER I
0x0419, //CYRILLIC CAPITAL LETTER SHORT I
0x041A, //CYRILLIC CAPITAL LETTER KA
0x041B, //CYRILLIC CAPITAL LETTER EL
0x041C, //CYRILLIC CAPITAL LETTER EM
0x041D, //CYRILLIC CAPITAL LETTER EN
0x041E, //CYRILLIC CAPITAL LETTER O
0x041F, //CYRILLIC CAPITAL LETTER PE
0x0420, //CYRILLIC CAPITAL LETTER ER
0x0421, //CYRILLIC CAPITAL LETTER ES
0x0422, //CYRILLIC CAPITAL LETTER TE
0x0423, //CYRILLIC CAPITAL LETTER U
0x0424, //CYRILLIC CAPITAL LETTER EF
0x0425, //CYRILLIC CAPITAL LETTER HA
0x0426, //CYRILLIC CAPITAL LETTER TSE
0x0427, //CYRILLIC CAPITAL LETTER CHE
0x0428, //CYRILLIC CAPITAL LETTER SHA
0x0429, //CYRILLIC CAPITAL LETTER SHCHA
0x042A, //CYRILLIC CAPITAL LETTER HARD SIGN
0x042B, //CYRILLIC CAPITAL LETTER YERU
0x042C, //CYRILLIC CAPITAL LETTER SOFT SIGN
0x042D, //CYRILLIC CAPITAL LETTER E
0x042E, //CYRILLIC CAPITAL LETTER YU
0x042F, //CYRILLIC CAPITAL LETTER YA
0x0430, //CYRILLIC SMALL LETTER A
0x0431, //CYRILLIC SMALL LETTER BE
0x0432, //CYRILLIC SMALL LETTER VE
0x0433, //CYRILLIC SMALL LETTER GHE
0x0434, //CYRILLIC SMALL LETTER DE
0x0435, //CYRILLIC SMALL LETTER IE
0x0436, //CYRILLIC SMALL LETTER ZHE
0x0437, //CYRILLIC SMALL LETTER ZE
0x0438, //CYRILLIC SMALL LETTER I
0x0439, //CYRILLIC SMALL LETTER SHORT I
0x043A, //CYRILLIC SMALL LETTER KA
0x043B, //CYRILLIC SMALL LETTER EL
0x043C, //CYRILLIC SMALL LETTER EM
0x043D, //CYRILLIC SMALL LETTER EN
0x043E, //CYRILLIC SMALL LETTER O
0x043F, //CYRILLIC SMALL LETTER PE
0x0440, //CYRILLIC SMALL LETTER ER
0x0441, //CYRILLIC SMALL LETTER ES
0x0442, //CYRILLIC SMALL LETTER TE
0x0443, //CYRILLIC SMALL LETTER U
0x0444, //CYRILLIC SMALL LETTER EF
0x0445, //CYRILLIC SMALL LETTER HA
0x0446, //CYRILLIC SMALL LETTER TSE
0x0447, //CYRILLIC SMALL LETTER CHE
0x0448, //CYRILLIC SMALL LETTER SHA
0x0449, //CYRILLIC SMALL LETTER SHCHA
0x044A, //CYRILLIC SMALL LETTER HARD SIGN
0x044B, //CYRILLIC SMALL LETTER YERU
0x044C, //CYRILLIC SMALL LETTER SOFT SIGN
0x044D, //CYRILLIC SMALL LETTER E
0x044E, //CYRILLIC SMALL LETTER YU
0x044F, //CYRILLIC SMALL LETTER YA
},
}
| vendor/github.com/denisenkom/go-mssqldb/cp1251.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017009006114676595,
0.00016625381249468774,
0.0001634375803405419,
0.00016609326121397316,
0.0000013660752529176534
] |
{
"id": 11,
"code_window": [
"Using ACLs, it is possible to restrict using the mysql backend such\n",
"that trusted operators can manage the role definitions, and both\n",
"users and applications are restricted in the credentials they are\n",
"allowed to read.\n",
"\n",
"Optionally, you may configure the number of character from the role\n",
"name that are truncated to form the mysql usernamed interpolated into\n",
"the `{{name}}` field: the default is 10. Note that versions of\n",
"mysql prior to 5.8 have a 16 character total limit on user names, so\n",
"it is probably not safe to increase this above the default on versions\n",
"prior to that.\n",
"\n",
"## API\n",
"\n",
"### /mysql/config/connection\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"Optionally, you may configure both the number of characters from the role name\n",
"that are truncated to form the display name portion of the mysql username\n",
"interpolated into the `{{name}}` field: the default is 10. \n",
"\n",
"You may also configure the total number of characters allowed in the entire\n",
"generated username (the sum of the display name and uuid poritions); the\n",
"default is 16. Note that versions of MySQL prior to 5.8 have a 16 character\n",
"total limit on user names, so it is probably not safe to increase this above\n",
"the default on versions prior to that.\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 107
} | package mssql
import (
"encoding/binary"
"io"
"strconv"
"strings"
)
// token ids
const (
tokenReturnStatus = 121 // 0x79
tokenColMetadata = 129 // 0x81
tokenOrder = 169 // 0xA9
tokenError = 170 // 0xAA
tokenInfo = 171 // 0xAB
tokenLoginAck = 173 // 0xad
tokenRow = 209 // 0xd1
tokenNbcRow = 210 // 0xd2
tokenEnvChange = 227 // 0xE3
tokenSSPI = 237 // 0xED
tokenDone = 253 // 0xFD
tokenDoneProc = 254
tokenDoneInProc = 255
)
// done flags
const (
doneFinal = 0
doneMore = 1
doneError = 2
doneInxact = 4
doneCount = 0x10
doneAttn = 0x20
doneSrvError = 0x100
)
// ENVCHANGE types
// http://msdn.microsoft.com/en-us/library/dd303449.aspx
const (
envTypDatabase = 1
envTypLanguage = 2
envTypCharset = 3
envTypPacketSize = 4
envTypBeginTran = 8
envTypCommitTran = 9
envTypRollbackTran = 10
envDatabaseMirrorPartner = 13
envRouting = 20
)
// interface for all tokens
type tokenStruct interface{}
type orderStruct struct {
ColIds []uint16
}
type doneStruct struct {
Status uint16
CurCmd uint16
RowCount uint64
}
type doneInProcStruct doneStruct
var doneFlags2str = map[uint16]string{
doneFinal: "final",
doneMore: "more",
doneError: "error",
doneInxact: "inxact",
doneCount: "count",
doneAttn: "attn",
doneSrvError: "srverror",
}
func doneFlags2Str(flags uint16) string {
strs := make([]string, 0, len(doneFlags2str))
for flag, tag := range doneFlags2str {
if flags&flag != 0 {
strs = append(strs, tag)
}
}
return strings.Join(strs, "|")
}
// ENVCHANGE stream
// http://msdn.microsoft.com/en-us/library/dd303449.aspx
func processEnvChg(sess *tdsSession) {
size := sess.buf.uint16()
r := &io.LimitedReader{R: sess.buf, N: int64(size)}
for {
var err error
var envtype uint8
err = binary.Read(r, binary.LittleEndian, &envtype)
if err == io.EOF {
return
}
if err != nil {
badStreamPanic(err)
}
switch envtype {
case envTypDatabase:
sess.database, err = readBVarChar(r)
if err != nil {
badStreamPanic(err)
}
_, err = readBVarChar(r)
if err != nil {
badStreamPanic(err)
}
case envTypPacketSize:
packetsize, err := readBVarChar(r)
if err != nil {
badStreamPanic(err)
}
_, err = readBVarChar(r)
if err != nil {
badStreamPanic(err)
}
packetsizei, err := strconv.Atoi(packetsize)
if err != nil {
badStreamPanicf("Invalid Packet size value returned from server (%s): %s", packetsize, err.Error())
}
if len(sess.buf.buf) != packetsizei {
newbuf := make([]byte, packetsizei)
copy(newbuf, sess.buf.buf)
sess.buf.buf = newbuf
}
case envTypBeginTran:
tranid, err := readBVarByte(r)
if len(tranid) != 8 {
badStreamPanicf("invalid size of transaction identifier: %d", len(tranid))
}
sess.tranid = binary.LittleEndian.Uint64(tranid)
if err != nil {
badStreamPanic(err)
}
if sess.logFlags&logTransaction != 0 {
sess.log.Printf("BEGIN TRANSACTION %x\n", sess.tranid)
}
_, err = readBVarByte(r)
if err != nil {
badStreamPanic(err)
}
case envTypCommitTran, envTypRollbackTran:
_, err = readBVarByte(r)
if err != nil {
badStreamPanic(err)
}
_, err = readBVarByte(r)
if err != nil {
badStreamPanic(err)
}
if sess.logFlags&logTransaction != 0 {
if envtype == envTypCommitTran {
sess.log.Printf("COMMIT TRANSACTION %x\n", sess.tranid)
} else {
sess.log.Printf("ROLLBACK TRANSACTION %x\n", sess.tranid)
}
}
sess.tranid = 0
case envDatabaseMirrorPartner:
sess.partner, err = readBVarChar(r)
if err != nil {
badStreamPanic(err)
}
_, err = readBVarChar(r)
if err != nil {
badStreamPanic(err)
}
case envRouting:
// RoutingData message is:
// ValueLength USHORT
// Protocol (TCP = 0) BYTE
// ProtocolProperty (new port) USHORT
// AlternateServer US_VARCHAR
_, err := readUshort(r)
if err != nil {
badStreamPanic(err)
}
protocol, err := readByte(r)
if err != nil || protocol != 0 {
badStreamPanic(err)
}
newPort, err := readUshort(r)
if err != nil {
badStreamPanic(err)
}
newServer, err := readUsVarChar(r)
if err != nil {
badStreamPanic(err)
}
// consume the OLDVALUE = %x00 %x00
_, err = readUshort(r)
if err != nil {
badStreamPanic(err)
}
sess.routedServer = newServer
sess.routedPort = newPort
default:
// ignore unknown env change types
_, err = readBVarByte(r)
if err != nil {
badStreamPanic(err)
}
_, err = readBVarByte(r)
if err != nil {
badStreamPanic(err)
}
}
}
}
type returnStatus int32
// http://msdn.microsoft.com/en-us/library/dd358180.aspx
func parseReturnStatus(r *tdsBuffer) returnStatus {
return returnStatus(r.int32())
}
func parseOrder(r *tdsBuffer) (res orderStruct) {
len := int(r.uint16())
res.ColIds = make([]uint16, len/2)
for i := 0; i < len/2; i++ {
res.ColIds[i] = r.uint16()
}
return res
}
func parseDone(r *tdsBuffer) (res doneStruct) {
res.Status = r.uint16()
res.CurCmd = r.uint16()
res.RowCount = r.uint64()
return res
}
func parseDoneInProc(r *tdsBuffer) (res doneInProcStruct) {
res.Status = r.uint16()
res.CurCmd = r.uint16()
res.RowCount = r.uint64()
return res
}
type sspiMsg []byte
func parseSSPIMsg(r *tdsBuffer) sspiMsg {
size := r.uint16()
buf := make([]byte, size)
r.ReadFull(buf)
return sspiMsg(buf)
}
type loginAckStruct struct {
Interface uint8
TDSVersion uint32
ProgName string
ProgVer uint32
}
func parseLoginAck(r *tdsBuffer) loginAckStruct {
size := r.uint16()
buf := make([]byte, size)
r.ReadFull(buf)
var res loginAckStruct
res.Interface = buf[0]
res.TDSVersion = binary.BigEndian.Uint32(buf[1:])
prognamelen := buf[1+4]
var err error
if res.ProgName, err = ucs22str(buf[1+4+1 : 1+4+1+prognamelen*2]); err != nil {
badStreamPanic(err)
}
res.ProgVer = binary.BigEndian.Uint32(buf[size-4:])
return res
}
// http://msdn.microsoft.com/en-us/library/dd357363.aspx
func parseColMetadata72(r *tdsBuffer) (columns []columnStruct) {
count := r.uint16()
if count == 0xffff {
// no metadata is sent
return nil
}
columns = make([]columnStruct, count)
for i := range columns {
column := &columns[i]
column.UserType = r.uint32()
column.Flags = r.uint16()
// parsing TYPE_INFO structure
column.ti = readTypeInfo(r)
column.ColName = r.BVarChar()
}
return columns
}
// http://msdn.microsoft.com/en-us/library/dd357254.aspx
func parseRow(r *tdsBuffer, columns []columnStruct, row []interface{}) {
for i, column := range columns {
row[i] = column.ti.Reader(&column.ti, r)
}
}
// http://msdn.microsoft.com/en-us/library/dd304783.aspx
func parseNbcRow(r *tdsBuffer, columns []columnStruct, row []interface{}) {
bitlen := (len(columns) + 7) / 8
pres := make([]byte, bitlen)
r.ReadFull(pres)
for i, col := range columns {
if pres[i/8]&(1<<(uint(i)%8)) != 0 {
row[i] = nil
continue
}
row[i] = col.ti.Reader(&col.ti, r)
}
}
// http://msdn.microsoft.com/en-us/library/dd304156.aspx
func parseError72(r *tdsBuffer) (res Error) {
length := r.uint16()
_ = length // ignore length
res.Number = r.int32()
res.State = r.byte()
res.Class = r.byte()
res.Message = r.UsVarChar()
res.ServerName = r.BVarChar()
res.ProcName = r.BVarChar()
res.LineNo = r.int32()
return
}
// http://msdn.microsoft.com/en-us/library/dd304156.aspx
func parseInfo(r *tdsBuffer) (res Error) {
length := r.uint16()
_ = length // ignore length
res.Number = r.int32()
res.State = r.byte()
res.Class = r.byte()
res.Message = r.UsVarChar()
res.ServerName = r.BVarChar()
res.ProcName = r.BVarChar()
res.LineNo = r.int32()
return
}
func processResponse(sess *tdsSession, ch chan tokenStruct) {
defer func() {
if err := recover(); err != nil {
ch <- err
}
close(ch)
}()
packet_type, err := sess.buf.BeginRead()
if err != nil {
ch <- err
return
}
if packet_type != packReply {
badStreamPanicf("invalid response packet type, expected REPLY, actual: %d", packet_type)
}
var columns []columnStruct
var lastError Error
var failed bool
for {
token := sess.buf.byte()
switch token {
case tokenSSPI:
ch <- parseSSPIMsg(sess.buf)
return
case tokenReturnStatus:
returnStatus := parseReturnStatus(sess.buf)
ch <- returnStatus
case tokenLoginAck:
loginAck := parseLoginAck(sess.buf)
ch <- loginAck
case tokenOrder:
order := parseOrder(sess.buf)
ch <- order
case tokenDoneInProc:
done := parseDoneInProc(sess.buf)
if sess.logFlags&logRows != 0 && done.Status&doneCount != 0 {
sess.log.Printf("(%d row(s) affected)\n", done.RowCount)
}
ch <- done
case tokenDone, tokenDoneProc:
done := parseDone(sess.buf)
if sess.logFlags&logRows != 0 && done.Status&doneCount != 0 {
sess.log.Printf("(%d row(s) affected)\n", done.RowCount)
}
if done.Status&doneError != 0 || failed {
ch <- lastError
return
}
if done.Status&doneSrvError != 0 {
lastError.Message = "Server Error"
ch <- lastError
return
}
ch <- done
if done.Status&doneMore == 0 {
return
}
case tokenColMetadata:
columns = parseColMetadata72(sess.buf)
ch <- columns
case tokenRow:
row := make([]interface{}, len(columns))
parseRow(sess.buf, columns, row)
ch <- row
case tokenNbcRow:
row := make([]interface{}, len(columns))
parseNbcRow(sess.buf, columns, row)
ch <- row
case tokenEnvChange:
processEnvChg(sess)
case tokenError:
lastError = parseError72(sess.buf)
failed = true
if sess.logFlags&logErrors != 0 {
sess.log.Println(lastError.Message)
}
case tokenInfo:
info := parseInfo(sess.buf)
if sess.logFlags&logMessages != 0 {
sess.log.Println(info.Message)
}
default:
badStreamPanicf("Unknown token type: %d", token)
}
}
}
| vendor/github.com/denisenkom/go-mssqldb/token.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017367771943099797,
0.00016884114302229136,
0.00016209618479479104,
0.0001688269549049437,
0.0000024899215986806666
] |
{
"id": 11,
"code_window": [
"Using ACLs, it is possible to restrict using the mysql backend such\n",
"that trusted operators can manage the role definitions, and both\n",
"users and applications are restricted in the credentials they are\n",
"allowed to read.\n",
"\n",
"Optionally, you may configure the number of character from the role\n",
"name that are truncated to form the mysql usernamed interpolated into\n",
"the `{{name}}` field: the default is 10. Note that versions of\n",
"mysql prior to 5.8 have a 16 character total limit on user names, so\n",
"it is probably not safe to increase this above the default on versions\n",
"prior to that.\n",
"\n",
"## API\n",
"\n",
"### /mysql/config/connection\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"Optionally, you may configure both the number of characters from the role name\n",
"that are truncated to form the display name portion of the mysql username\n",
"interpolated into the `{{name}}` field: the default is 10. \n",
"\n",
"You may also configure the total number of characters allowed in the entire\n",
"generated username (the sum of the display name and uuid poritions); the\n",
"default is 16. Note that versions of MySQL prior to 5.8 have a 16 character\n",
"total limit on user names, so it is probably not safe to increase this above\n",
"the default on versions prior to that.\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 107
} | package vault
import (
"log"
"strings"
"sync"
"time"
"github.com/armon/go-metrics"
"github.com/hashicorp/vault/logical"
)
const (
// rollbackPeriod is how often we attempt rollbacks for all the backends
rollbackPeriod = time.Minute
)
// RollbackManager is responsible for performing rollbacks of partial
// secrets within logical backends.
//
// During normal operations, it is possible for logical backends to
// error partially through an operation. These are called "partial secrets":
// they are never sent back to a user, but they do need to be cleaned up.
// This manager handles that by periodically (on a timer) requesting that the
// backends clean up.
//
// The RollbackManager periodically initiates a logical.RollbackOperation
// on every mounted logical backend. It ensures that only one rollback operation
// is in-flight at any given time within a single seal/unseal phase.
type RollbackManager struct {
logger *log.Logger
// This gives the current mount table of both logical and credential backends,
// plus a RWMutex that is locked for reading. It is up to the caller to RUnlock
// it when done with the mount table.
backends func() []*MountEntry
router *Router
period time.Duration
inflightAll sync.WaitGroup
inflight map[string]*rollbackState
inflightLock sync.Mutex
doneCh chan struct{}
shutdown bool
shutdownCh chan struct{}
shutdownLock sync.Mutex
}
// rollbackState is used to track the state of a single rollback attempt
type rollbackState struct {
lastError error
sync.WaitGroup
}
// NewRollbackManager is used to create a new rollback manager
func NewRollbackManager(logger *log.Logger, backendsFunc func() []*MountEntry, router *Router) *RollbackManager {
r := &RollbackManager{
logger: logger,
backends: backendsFunc,
router: router,
period: rollbackPeriod,
inflight: make(map[string]*rollbackState),
doneCh: make(chan struct{}),
shutdownCh: make(chan struct{}),
}
return r
}
// Start starts the rollback manager
func (m *RollbackManager) Start() {
go m.run()
}
// Stop stops the running manager. This will wait for any in-flight
// rollbacks to complete.
func (m *RollbackManager) Stop() {
m.shutdownLock.Lock()
defer m.shutdownLock.Unlock()
if !m.shutdown {
m.shutdown = true
close(m.shutdownCh)
<-m.doneCh
}
m.inflightAll.Wait()
}
// run is a long running routine to periodically invoke rollback
func (m *RollbackManager) run() {
m.logger.Printf("[INFO] rollback: starting rollback manager")
tick := time.NewTicker(m.period)
defer tick.Stop()
defer close(m.doneCh)
for {
select {
case <-tick.C:
m.triggerRollbacks()
case <-m.shutdownCh:
m.logger.Printf("[INFO] rollback: stopping rollback manager")
return
}
}
}
// triggerRollbacks is used to trigger the rollbacks across all the backends
func (m *RollbackManager) triggerRollbacks() {
m.inflightLock.Lock()
defer m.inflightLock.Unlock()
backends := m.backends()
for _, e := range backends {
path := e.Path
if e.Table == credentialTableType {
path = "auth/" + path
}
if _, ok := m.inflight[path]; !ok {
m.startRollback(path)
}
}
}
// startRollback is used to start an async rollback attempt.
// This must be called with the inflightLock held.
func (m *RollbackManager) startRollback(path string) *rollbackState {
rs := &rollbackState{}
rs.Add(1)
m.inflightAll.Add(1)
m.inflight[path] = rs
go m.attemptRollback(path, rs)
return rs
}
// attemptRollback invokes a RollbackOperation for the given path
func (m *RollbackManager) attemptRollback(path string, rs *rollbackState) (err error) {
defer metrics.MeasureSince([]string{"rollback", "attempt", strings.Replace(path, "/", "-", -1)}, time.Now())
m.logger.Printf("[DEBUG] rollback: attempting rollback on %s", path)
defer func() {
rs.lastError = err
rs.Done()
m.inflightAll.Done()
m.inflightLock.Lock()
delete(m.inflight, path)
m.inflightLock.Unlock()
}()
// Invoke a RollbackOperation
req := &logical.Request{
Operation: logical.RollbackOperation,
Path: path,
}
_, err = m.router.Route(req)
// If the error is an unsupported operation, then it doesn't
// matter, the backend doesn't support it.
if err == logical.ErrUnsupportedOperation {
err = nil
}
if err != nil {
m.logger.Printf("[ERR] rollback: error rolling back %s: %s",
path, err)
}
return
}
// Rollback is used to trigger an immediate rollback of the path,
// or to join an existing rollback operation if in flight.
func (m *RollbackManager) Rollback(path string) error {
// Check for an existing attempt and start one if none
m.inflightLock.Lock()
rs, ok := m.inflight[path]
if !ok {
rs = m.startRollback(path)
}
m.inflightLock.Unlock()
// Wait for the attempt to finish
rs.Wait()
// Return the last error
return rs.lastError
}
// The methods below are the hooks from core that are called pre/post seal.
// startRollback is used to start the rollback manager after unsealing
func (c *Core) startRollback() error {
backendsFunc := func() []*MountEntry {
ret := []*MountEntry{}
c.mountsLock.RLock()
defer c.mountsLock.RUnlock()
for _, entry := range c.mounts.Entries {
ret = append(ret, entry)
}
c.authLock.RLock()
defer c.authLock.RUnlock()
for _, entry := range c.auth.Entries {
ret = append(ret, entry)
}
return ret
}
c.rollback = NewRollbackManager(c.logger, backendsFunc, c.router)
c.rollback.Start()
return nil
}
// stopRollback is used to stop running the rollback manager before sealing
func (c *Core) stopRollback() error {
if c.rollback != nil {
c.rollback.Stop()
c.rollback = nil
}
return nil
}
| vault/rollback.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0007765123737044632,
0.00020355159358587116,
0.00016205089923460037,
0.00016802710888441652,
0.0001297610142501071
] |
{
"id": 12,
"code_window": [
" The SQL statements executed to create and configure the role.\n",
" Must be semi-colon separated. The '{{name}}' and '{{password}}'\n",
" values will be substituted.\n",
" </li>\n",
" <li>\n",
" <span class=\"param\">username_length</span>\n",
" <span class=\"param-flags\">optional</span>\n",
" Determines how many characters from the role name will be used\n",
" to form the mysql username interpolated into the '{{name}}' field\n",
" of the sql parameter.\n",
" </li>\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" <span class=\"param\">displayname_length</span>\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 244
} | package mysql
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func pathListRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/?$",
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ListOperation: b.pathRoleList,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func pathRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the role.",
},
"sql": &framework.FieldSchema{
Type: framework.TypeString,
Description: "SQL string to create a user. See help for more info.",
},
"username_length": &framework.FieldSchema{
Type: framework.TypeInt,
Description: "number of characters to truncate generated mysql usernames to (default 10)",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathRoleRead,
logical.UpdateOperation: b.pathRoleCreate,
logical.DeleteOperation: b.pathRoleDelete,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
entry, err := s.Get("role/" + n)
if err != nil {
return nil, err
}
if entry == nil {
return nil, nil
}
var result roleEntry
if err := entry.DecodeJSON(&result); err != nil {
return nil, err
}
return &result, nil
}
func (b *backend) pathRoleDelete(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
err := req.Storage.Delete("role/" + data.Get("name").(string))
if err != nil {
return nil, err
}
return nil, nil
}
func (b *backend) pathRoleRead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
role, err := b.Role(req.Storage, data.Get("name").(string))
if err != nil {
return nil, err
}
if role == nil {
return nil, nil
}
return &logical.Response{
Data: map[string]interface{}{
"sql": role.SQL,
},
}, nil
}
func (b *backend) pathRoleList(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
entries, err := req.Storage.List("role/")
if err != nil {
return nil, err
}
return logical.ListResponse(entries), nil
}
func (b *backend) pathRoleCreate(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
name := data.Get("name").(string)
sql := data.Get("sql").(string)
username_length := data.Get("username_length").(int)
// Get our connection
db, err := b.DB(req.Storage)
if err != nil {
return nil, err
}
// Test the query by trying to prepare it
for _, query := range SplitSQL(sql) {
stmt, err := db.Prepare(Query(query, map[string]string{
"name": "foo",
"password": "bar",
}))
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error testing query: %s", err)), nil
}
stmt.Close()
}
// Store it
entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
SQL: sql,
USERNAME_LENGTH: username_length,
})
if err != nil {
return nil, err
}
if err := req.Storage.Put(entry); err != nil {
return nil, err
}
return nil, nil
}
type roleEntry struct {
SQL string `json:"sql"`
USERNAME_LENGTH int `json:"username_length"`
}
const pathRoleHelpSyn = `
Manage the roles that can be created with this backend.
`
const pathRoleHelpDesc = `
This path lets you manage the roles that can be created with this backend.
The "sql" parameter customizes the SQL string used to create the role.
This can be a sequence of SQL queries, each semi-colon seperated. Some
substitution will be done to the SQL string for certain keys.
The names of the variables must be surrounded by "{{" and "}}" to be replaced.
* "name" - The random username generated for the DB user.
* "password" - The random password generated for the DB user.
Example of a decent SQL query to use:
CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
GRANT ALL ON db1.* TO '{{name}}'@'%';
Note the above user would be able to access anything in db1. Please see the MySQL
manual on the GRANT command to learn how to do more fine grained access.
The "username_length" parameter determines how many characters of the
role name will be used in creating the generated mysql username; the
default is 10. Note that mysql versions prior to 5.8 have a 16 character
total limit on usernames.
`
| builtin/logical/mysql/path_roles.go | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.007525403518229723,
0.001450250274501741,
0.00016123248497024179,
0.0003746984584722668,
0.001970128621906042
] |
{
"id": 12,
"code_window": [
" The SQL statements executed to create and configure the role.\n",
" Must be semi-colon separated. The '{{name}}' and '{{password}}'\n",
" values will be substituted.\n",
" </li>\n",
" <li>\n",
" <span class=\"param\">username_length</span>\n",
" <span class=\"param-flags\">optional</span>\n",
" Determines how many characters from the role name will be used\n",
" to form the mysql username interpolated into the '{{name}}' field\n",
" of the sql parameter.\n",
" </li>\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" <span class=\"param\">displayname_length</span>\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 244
} | // Copyright 2015 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import "fmt"
// Scope models a GitHub authorization scope.
//
// GitHub API docs:https://developer.github.com/v3/oauth/#scopes
type Scope string
// This is the set of scopes for GitHub API V3
const (
ScopeNone Scope = "(no scope)" // REVISIT: is this actually returned, or just a documentation artifact?
ScopeUser Scope = "user"
ScopeUserEmail Scope = "user:email"
ScopeUserFollow Scope = "user:follow"
ScopePublicRepo Scope = "public_repo"
ScopeRepo Scope = "repo"
ScopeRepoDeployment Scope = "repo_deployment"
ScopeRepoStatus Scope = "repo:status"
ScopeDeleteRepo Scope = "delete_repo"
ScopeNotifications Scope = "notifications"
ScopeGist Scope = "gist"
ScopeReadRepoHook Scope = "read:repo_hook"
ScopeWriteRepoHook Scope = "write:repo_hook"
ScopeAdminRepoHook Scope = "admin:repo_hook"
ScopeAdminOrgHook Scope = "admin:org_hook"
ScopeReadOrg Scope = "read:org"
ScopeWriteOrg Scope = "write:org"
ScopeAdminOrg Scope = "admin:org"
ScopeReadPublicKey Scope = "read:public_key"
ScopeWritePublicKey Scope = "write:public_key"
ScopeAdminPublicKey Scope = "admin:public_key"
ScopeReadGPGKey Scope = "read:gpg_key"
ScopeWriteGPGKey Scope = "write:gpg_key"
ScopeAdminGPGKey Scope = "admin:gpg_key"
)
// AuthorizationsService handles communication with the authorization related
// methods of the GitHub API.
//
// This service requires HTTP Basic Authentication; it cannot be accessed using
// an OAuth token.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/
type AuthorizationsService service
// Authorization represents an individual GitHub authorization.
type Authorization struct {
ID *int `json:"id,omitempty"`
URL *string `json:"url,omitempty"`
Scopes []Scope `json:"scopes,omitempty"`
Token *string `json:"token,omitempty"`
TokenLastEight *string `json:"token_last_eight,omitempty"`
HashedToken *string `json:"hashed_token,omitempty"`
App *AuthorizationApp `json:"app,omitempty"`
Note *string `json:"note,omitempty"`
NoteURL *string `json:"note_url,omitempty"`
UpdateAt *Timestamp `json:"updated_at,omitempty"`
CreatedAt *Timestamp `json:"created_at,omitempty"`
Fingerprint *string `json:"fingerprint,omitempty"`
// User is only populated by the Check and Reset methods.
User *User `json:"user,omitempty"`
}
func (a Authorization) String() string {
return Stringify(a)
}
// AuthorizationApp represents an individual GitHub app (in the context of authorization).
type AuthorizationApp struct {
URL *string `json:"url,omitempty"`
Name *string `json:"name,omitempty"`
ClientID *string `json:"client_id,omitempty"`
}
func (a AuthorizationApp) String() string {
return Stringify(a)
}
// Grant represents an OAuth application that has been granted access to an account.
type Grant struct {
ID *int `json:"id,omitempty"`
URL *string `json:"url,omitempty"`
App *AuthorizationApp `json:"app,omitempty"`
CreatedAt *Timestamp `json:"created_at,omitempty"`
UpdatedAt *Timestamp `json:"updated_at,omitempty"`
Scopes []string `json:"scopes,omitempty"`
}
func (g Grant) String() string {
return Stringify(g)
}
// AuthorizationRequest represents a request to create an authorization.
type AuthorizationRequest struct {
Scopes []Scope `json:"scopes,omitempty"`
Note *string `json:"note,omitempty"`
NoteURL *string `json:"note_url,omitempty"`
ClientID *string `json:"client_id,omitempty"`
ClientSecret *string `json:"client_secret,omitempty"`
Fingerprint *string `json:"fingerprint,omitempty"`
}
func (a AuthorizationRequest) String() string {
return Stringify(a)
}
// AuthorizationUpdateRequest represents a request to update an authorization.
//
// Note that for any one update, you must only provide one of the "scopes"
// fields. That is, you may provide only one of "Scopes", or "AddScopes", or
// "RemoveScopes".
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#update-an-existing-authorization
type AuthorizationUpdateRequest struct {
Scopes []string `json:"scopes,omitempty"`
AddScopes []string `json:"add_scopes,omitempty"`
RemoveScopes []string `json:"remove_scopes,omitempty"`
Note *string `json:"note,omitempty"`
NoteURL *string `json:"note_url,omitempty"`
Fingerprint *string `json:"fingerprint,omitempty"`
}
func (a AuthorizationUpdateRequest) String() string {
return Stringify(a)
}
// List the authorizations for the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#list-your-authorizations
func (s *AuthorizationsService) List(opt *ListOptions) ([]*Authorization, *Response, error) {
u := "authorizations"
u, err := addOptions(u, opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
auths := new([]*Authorization)
resp, err := s.client.Do(req, auths)
if err != nil {
return nil, resp, err
}
return *auths, resp, err
}
// Get a single authorization.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#get-a-single-authorization
func (s *AuthorizationsService) Get(id int) (*Authorization, *Response, error) {
u := fmt.Sprintf("authorizations/%d", id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
a := new(Authorization)
resp, err := s.client.Do(req, a)
if err != nil {
return nil, resp, err
}
return a, resp, err
}
// Create a new authorization for the specified OAuth application.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#create-a-new-authorization
func (s *AuthorizationsService) Create(auth *AuthorizationRequest) (*Authorization, *Response, error) {
u := "authorizations"
req, err := s.client.NewRequest("POST", u, auth)
if err != nil {
return nil, nil, err
}
a := new(Authorization)
resp, err := s.client.Do(req, a)
if err != nil {
return nil, resp, err
}
return a, resp, err
}
// GetOrCreateForApp creates a new authorization for the specified OAuth
// application, only if an authorization for that application doesn’t already
// exist for the user.
//
// If a new token is created, the HTTP status code will be "201 Created", and
// the returned Authorization.Token field will be populated. If an existing
// token is returned, the status code will be "200 OK" and the
// Authorization.Token field will be empty.
//
// clientID is the OAuth Client ID with which to create the token.
//
// GitHub API docs:
// - https://developer.github.com/v3/oauth_authorizations/#get-or-create-an-authorization-for-a-specific-app
// - https://developer.github.com/v3/oauth_authorizations/#get-or-create-an-authorization-for-a-specific-app-and-fingerprint
func (s *AuthorizationsService) GetOrCreateForApp(clientID string, auth *AuthorizationRequest) (*Authorization, *Response, error) {
var u string
if auth.Fingerprint == nil || *auth.Fingerprint == "" {
u = fmt.Sprintf("authorizations/clients/%v", clientID)
} else {
u = fmt.Sprintf("authorizations/clients/%v/%v", clientID, *auth.Fingerprint)
}
req, err := s.client.NewRequest("PUT", u, auth)
if err != nil {
return nil, nil, err
}
a := new(Authorization)
resp, err := s.client.Do(req, a)
if err != nil {
return nil, resp, err
}
return a, resp, err
}
// Edit a single authorization.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#update-an-existing-authorization
func (s *AuthorizationsService) Edit(id int, auth *AuthorizationUpdateRequest) (*Authorization, *Response, error) {
u := fmt.Sprintf("authorizations/%d", id)
req, err := s.client.NewRequest("PATCH", u, auth)
if err != nil {
return nil, nil, err
}
a := new(Authorization)
resp, err := s.client.Do(req, a)
if err != nil {
return nil, resp, err
}
return a, resp, err
}
// Delete a single authorization.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#delete-an-authorization
func (s *AuthorizationsService) Delete(id int) (*Response, error) {
u := fmt.Sprintf("authorizations/%d", id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(req, nil)
}
// Check if an OAuth token is valid for a specific app.
//
// Note that this operation requires the use of BasicAuth, but where the
// username is the OAuth application clientID, and the password is its
// clientSecret. Invalid tokens will return a 404 Not Found.
//
// The returned Authorization.User field will be populated.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#check-an-authorization
func (s *AuthorizationsService) Check(clientID string, token string) (*Authorization, *Response, error) {
u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
a := new(Authorization)
resp, err := s.client.Do(req, a)
if err != nil {
return nil, resp, err
}
return a, resp, err
}
// Reset is used to reset a valid OAuth token without end user involvement.
// Applications must save the "token" property in the response, because changes
// take effect immediately.
//
// Note that this operation requires the use of BasicAuth, but where the
// username is the OAuth application clientID, and the password is its
// clientSecret. Invalid tokens will return a 404 Not Found.
//
// The returned Authorization.User field will be populated.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#reset-an-authorization
func (s *AuthorizationsService) Reset(clientID string, token string) (*Authorization, *Response, error) {
u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token)
req, err := s.client.NewRequest("POST", u, nil)
if err != nil {
return nil, nil, err
}
a := new(Authorization)
resp, err := s.client.Do(req, a)
if err != nil {
return nil, resp, err
}
return a, resp, err
}
// Revoke an authorization for an application.
//
// Note that this operation requires the use of BasicAuth, but where the
// username is the OAuth application clientID, and the password is its
// clientSecret. Invalid tokens will return a 404 Not Found.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#revoke-an-authorization-for-an-application
func (s *AuthorizationsService) Revoke(clientID string, token string) (*Response, error) {
u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(req, nil)
}
// ListGrants lists the set of OAuth applications that have been granted
// access to a user's account. This will return one entry for each application
// that has been granted access to the account, regardless of the number of
// tokens an application has generated for the user.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#list-your-grants
func (s *AuthorizationsService) ListGrants() ([]*Grant, *Response, error) {
req, err := s.client.NewRequest("GET", "applications/grants", nil)
if err != nil {
return nil, nil, err
}
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeOAuthGrantAuthorizationsPreview)
grants := []*Grant{}
resp, err := s.client.Do(req, &grants)
if err != nil {
return nil, resp, err
}
return grants, resp, err
}
// GetGrant gets a single OAuth application grant.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#get-a-single-grant
func (s *AuthorizationsService) GetGrant(id int) (*Grant, *Response, error) {
u := fmt.Sprintf("applications/grants/%d", id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeOAuthGrantAuthorizationsPreview)
grant := new(Grant)
resp, err := s.client.Do(req, grant)
if err != nil {
return nil, resp, err
}
return grant, resp, err
}
// DeleteGrant deletes an OAuth application grant. Deleting an application's
// grant will also delete all OAuth tokens associated with the application for
// the user.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#delete-a-grant
func (s *AuthorizationsService) DeleteGrant(id int) (*Response, error) {
u := fmt.Sprintf("applications/grants/%d", id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeOAuthGrantAuthorizationsPreview)
return s.client.Do(req, nil)
}
| vendor/github.com/google/go-github/github/authorizations.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00033534885733388364,
0.00017522038251627237,
0.0001629137113923207,
0.00016813311958685517,
0.000030512044759234414
] |
{
"id": 12,
"code_window": [
" The SQL statements executed to create and configure the role.\n",
" Must be semi-colon separated. The '{{name}}' and '{{password}}'\n",
" values will be substituted.\n",
" </li>\n",
" <li>\n",
" <span class=\"param\">username_length</span>\n",
" <span class=\"param-flags\">optional</span>\n",
" Determines how many characters from the role name will be used\n",
" to form the mysql username interpolated into the '{{name}}' field\n",
" of the sql parameter.\n",
" </li>\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" <span class=\"param\">displayname_length</span>\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 244
} | // THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package iam
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/query"
)
// AWS Identity and Access Management (IAM) is a web service that you can use
// to manage users and user permissions under your AWS account. This guide provides
// descriptions of IAM actions that you can call programmatically. For general
// information about IAM, see AWS Identity and Access Management (IAM) (http://aws.amazon.com/iam/).
// For the user guide for IAM, see Using IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/).
//
// AWS provides SDKs that consist of libraries and sample code for various
// programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.).
// The SDKs provide a convenient way to create programmatic access to IAM and
// AWS. For example, the SDKs take care of tasks such as cryptographically signing
// requests (see below), managing errors, and retrying requests automatically.
// For information about the AWS SDKs, including how to download and install
// them, see the Tools for Amazon Web Services (http://aws.amazon.com/tools/)
// page.
//
// We recommend that you use the AWS SDKs to make programmatic API calls to
// IAM. However, you can also use the IAM Query API to make direct calls to
// the IAM web service. To learn more about the IAM Query API, see Making Query
// Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
// in the Using IAM guide. IAM supports GET and POST requests for all actions.
// That is, the API does not require you to use GET for some actions and POST
// for others. However, GET requests are subject to the limitation size of a
// URL. Therefore, for operations that require larger sizes, use a POST request.
//
// Signing Requests
//
// Requests must be signed using an access key ID and a secret access key.
// We strongly recommend that you do not use your AWS account access key ID
// and secret access key for everyday work with IAM. You can use the access
// key ID and secret access key for an IAM user or you can use the AWS Security
// Token Service to generate temporary security credentials and use those to
// sign requests.
//
// To sign requests, we recommend that you use Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
// If you have an existing application that uses Signature Version 2, you do
// not have to update it to use Signature Version 4. However, some operations
// now require Signature Version 4. The documentation for operations that require
// version 4 indicate this requirement.
//
// Additional Resources
//
// For more information, see the following:
//
// AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html).
// This topic provides general information about the types of credentials used
// for accessing AWS.
//
// IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAMBestPractices.html).
// This topic presents a list of suggestions for using the IAM service to help
// secure your AWS resources.
//
// Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html).
// This set of topics walk you through the process of signing a request using
// an access key ID and secret access key.
//The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type IAM struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// A ServiceName is the name of the service the client will make API calls to.
const ServiceName = "iam"
// New creates a new instance of the IAM client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a IAM client from just a session.
// svc := iam.New(mySession)
//
// // Create a IAM client with additional configuration
// svc := iam.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *IAM {
c := p.ClientConfig(ServiceName, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *IAM {
svc := &IAM{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2010-05-08",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(query.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a IAM operation and runs any
// custom request initialization.
func (c *IAM) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}
| vendor/github.com/aws/aws-sdk-go/service/iam/service.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017241464229300618,
0.00016683779540471733,
0.00016074434097390622,
0.00016723292355891317,
0.0000033462738429079764
] |
{
"id": 12,
"code_window": [
" The SQL statements executed to create and configure the role.\n",
" Must be semi-colon separated. The '{{name}}' and '{{password}}'\n",
" values will be substituted.\n",
" </li>\n",
" <li>\n",
" <span class=\"param\">username_length</span>\n",
" <span class=\"param-flags\">optional</span>\n",
" Determines how many characters from the role name will be used\n",
" to form the mysql username interpolated into the '{{name}}' field\n",
" of the sql parameter.\n",
" </li>\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" <span class=\"param\">displayname_length</span>\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 244
} | package awsec2
import (
"fmt"
"sync/atomic"
"time"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func pathTidyRoletagBlacklist(b *backend) *framework.Path {
return &framework.Path{
Pattern: "tidy/roletag-blacklist$",
Fields: map[string]*framework.FieldSchema{
"safety_buffer": &framework.FieldSchema{
Type: framework.TypeDurationSecond,
Default: 259200, // 72h
Description: `The amount of extra time that must have passed beyond the roletag
expiration, before it is removed from the backend storage.`,
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathTidyRoletagBlacklistUpdate,
},
HelpSynopsis: pathTidyRoletagBlacklistSyn,
HelpDescription: pathTidyRoletagBlacklistDesc,
}
}
// tidyBlacklistRoleTag is used to clean-up the entries in the role tag blacklist.
func (b *backend) tidyBlacklistRoleTag(s logical.Storage, safety_buffer int) error {
grabbed := atomic.CompareAndSwapUint32(&b.tidyBlacklistCASGuard, 0, 1)
if grabbed {
defer atomic.StoreUint32(&b.tidyBlacklistCASGuard, 0)
} else {
return fmt.Errorf("roletag blacklist tidy operation already running")
}
bufferDuration := time.Duration(safety_buffer) * time.Second
tags, err := s.List("blacklist/roletag/")
if err != nil {
return err
}
for _, tag := range tags {
tagEntry, err := s.Get("blacklist/roletag/" + tag)
if err != nil {
return fmt.Errorf("error fetching tag %s: %s", tag, err)
}
if tagEntry == nil {
return fmt.Errorf("tag entry for tag %s is nil", tag)
}
if tagEntry.Value == nil || len(tagEntry.Value) == 0 {
return fmt.Errorf("found entry for tag %s but actual tag is empty", tag)
}
var result roleTagBlacklistEntry
if err := tagEntry.DecodeJSON(&result); err != nil {
return err
}
if time.Now().UTC().After(result.ExpirationTime.Add(bufferDuration)) {
if err := s.Delete("blacklist/roletag" + tag); err != nil {
return fmt.Errorf("error deleting tag %s from storage: %s", tag, err)
}
}
}
return nil
}
// pathTidyRoletagBlacklistUpdate is used to clean-up the entries in the role tag blacklist.
func (b *backend) pathTidyRoletagBlacklistUpdate(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
return nil, b.tidyBlacklistRoleTag(req.Storage, data.Get("safety_buffer").(int))
}
const pathTidyRoletagBlacklistSyn = `
Clean-up the blacklist role tag entries.
`
const pathTidyRoletagBlacklistDesc = `
When a role tag is blacklisted, the expiration time of the blacklist entry is
set based on the maximum 'max_ttl' value set on: the role, the role tag and the
backend's mount.
When this endpoint is invoked, all the entries that are expired will be deleted.
A 'safety_buffer' (duration in seconds) can be provided, to ensure deletion of
only those entries that are expired before 'safety_buffer' seconds.
`
| builtin/credential/aws-ec2/path_tidy_roletag_blacklist.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0016820239834487438,
0.0003524567582644522,
0.00016297803085763007,
0.00017100328113883734,
0.0004484869132284075
] |
{
"id": 13,
"code_window": [
" Determines how many characters from the role name will be used\n",
" to form the mysql username interpolated into the '{{name}}' field\n",
" of the sql parameter.\n",
" </li>\n",
" </ul>\n",
" </dd>\n",
"\n",
" <dt>Returns</dt>\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" <li>\n",
" <span class=\"param\">username_length</span>\n",
" <span class=\"param-flags\">optional</span>\n",
" Determines the maximum total length in characters of the\n",
" mysql username interpolated into the '{{name}}' field\n",
" of the sql parameter.\n",
" </li>\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "add",
"edit_start_line_idx": 250
} | ---
layout: "docs"
page_title: "Secret Backend: MySQL"
sidebar_current: "docs-secrets-mysql"
description: |-
The MySQL secret backend for Vault generates database credentials to access MySQL.
---
# MySQL Secret Backend
Name: `mysql`
The MySQL secret backend for Vault generates database credentials
dynamically based on configured roles. This means that services that need
to access a database no longer need to hardcode credentials: they can request
them from Vault, and use Vault's leasing mechanism to more easily roll keys.
Additionally, it introduces a new ability: with every service accessing
the database with unique credentials, it makes auditing much easier when
questionable data access is discovered: you can track it down to the specific
instance of a service based on the SQL username.
Vault makes use of its own internal revocation system to ensure that users
become invalid within a reasonable time of the lease expiring.
This page will show a quick start for this backend. For detailed documentation
on every path, use `vault path-help` after mounting the backend.
## Quick Start
The first step to using the mysql backend is to mount it.
Unlike the `generic` backend, the `mysql` backend is not mounted by default.
```
$ vault mount mysql
Successfully mounted 'mysql' at 'mysql'!
```
Next, we must configure Vault to know how to connect to the MySQL
instance. This is done by providing a DSN (Data Source Name):
```
$ vault write mysql/config/connection \
connection_url="root:root@tcp(192.168.33.10:3306)/"
Success! Data written to: mysql/config/connection
```
In this case, we've configured Vault with the user "root" and password "root,
connecting to an instance at "192.168.33.10" on port 3306. It is not necessary
that Vault has the root user, but the user must have privileges to create
other users, namely the `GRANT OPTION` privilege.
Optionally, we can configure the lease settings for credentials generated
by Vault. This is done by writing to the `config/lease` key:
```
$ vault write mysql/config/lease \
lease=1h \
lease_max=24h
Success! Data written to: mysql/config/lease
```
This restricts each credential to being valid or leased for 1 hour
at a time, with a maximum use period of 24 hours. This forces an
application to renew their credentials at least hourly, and to recycle
them once per day.
The next step is to configure a role. A role is a logical name that maps
to a policy used to generate those credentials. For example, lets create
a "readonly" role:
```
$ vault write mysql/roles/readonly \
sql="CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';"
Success! Data written to: mysql/roles/readonly
```
By writing to the `roles/readonly` path we are defining the `readonly` role.
This role will be created by evaluating the given `sql` statements. By
default, the `{{name}}` and `{{password}}` fields will be populated by
Vault with dynamically generated values. This SQL statement is creating
the named user, and then granting it `SELECT` or read-only privileges
to tables in the database. More complex `GRANT` queries can be used to
customize the privileges of the role. See the [MySQL manual](https://dev.mysql.com/doc/refman/5.7/en/grant.html)
for more information.
To generate a new set of credentials, we simply read from that role:
```
$ vault read mysql/creds/readonly
Key Value
lease_id mysql/creds/readonly/bd404e98-0f35-b378-269a-b7770ef01897
lease_duration 3600
password 132ae3ef-5a64-7499-351e-bfe59f3a2a21
username root-aefa635a-18
```
By reading from the `creds/readonly` path, Vault has generated a new
set of credentials using the `readonly` role configuration. Here we
see the dynamically generated username and password, along with a one
hour lease.
Using ACLs, it is possible to restrict using the mysql backend such
that trusted operators can manage the role definitions, and both
users and applications are restricted in the credentials they are
allowed to read.
Optionally, you may configure the number of character from the role
name that are truncated to form the mysql usernamed interpolated into
the `{{name}}` field: the default is 10. Note that versions of
mysql prior to 5.8 have a 16 character total limit on user names, so
it is probably not safe to increase this above the default on versions
prior to that.
## API
### /mysql/config/connection
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Configures the connection DSN used to communicate with MySQL.
This is a root protected endpoint.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/mysql/config/connection`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">connection_url</span>
<span class="param-flags">required</span>
The MySQL DSN
</li>
</ul>
</dd>
<dd>
<ul>
<li>
<span class="param">value</span>
<span class="param-flags">optional</span>
</li>
</ul>
</dd>
<dd>
<ul>
<li>
<span class="param">max_open_connections</span>
<span class="param-flags">optional</span>
Maximum number of open connections to the database.
Defaults to 2.
</li>
</ul>
</dd>
<dd>
<ul>
<li>
<span class="param">verify-connection</span>
<span class="param-flags">optional</span>
If set, connection_url is verified by actually connecting to the database.
Defaults to true.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
A `204` response code.
</dd>
</dl>
### /mysql/config/lease
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Configures the lease settings for generated credentials.
If not configured, leases default to 1 hour. This is a root
protected endpoint.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/mysql/config/lease`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">lease</span>
<span class="param-flags">required</span>
The lease value provided as a string duration
with time suffix. Hour is the largest suffix.
</li>
<li>
<span class="param">lease_max</span>
<span class="param-flags">required</span>
The maximum lease value provided as a string duration
with time suffix. Hour is the largest suffix.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
A `204` response code.
</dd>
</dl>
### /mysql/roles/
#### POST
<dl class="api">
<dt>Description</dt>
<dd>
Creates or updates the role definition.
</dd>
<dt>Method</dt>
<dd>POST</dd>
<dt>URL</dt>
<dd>`/mysql/roles/<name>`</dd>
<dt>Parameters</dt>
<dd>
<ul>
<li>
<span class="param">sql</span>
<span class="param-flags">required</span>
The SQL statements executed to create and configure the role.
Must be semi-colon separated. The '{{name}}' and '{{password}}'
values will be substituted.
</li>
<li>
<span class="param">username_length</span>
<span class="param-flags">optional</span>
Determines how many characters from the role name will be used
to form the mysql username interpolated into the '{{name}}' field
of the sql parameter.
</li>
</ul>
</dd>
<dt>Returns</dt>
<dd>
A `204` response code.
</dd>
</dl>
#### GET
<dl class="api">
<dt>Description</dt>
<dd>
Queries the role definition.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>URL</dt>
<dd>`/mysql/roles/<name>`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"data": {
"sql": "CREATE USER..."
}
}
```
</dd>
</dl>
#### LIST
<dl class="api">
<dt>Description</dt>
<dd>
Returns a list of available roles. Only the role names are returned, not
any values.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>URL</dt>
<dd>`/roles/?list=true`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"auth": null,
"data": {
"keys": ["dev", "prod"]
},
"lease_duration": 2592000,
"lease_id": "",
"renewable": false
}
```
</dd>
</dl>
#### DELETE
<dl class="api">
<dt>Description</dt>
<dd>
Deletes the role definition.
</dd>
<dt>Method</dt>
<dd>DELETE</dd>
<dt>URL</dt>
<dd>`/mysql/roles/<name>`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
A `204` response code.
</dd>
</dl>
### /mysql/creds/
#### GET
<dl class="api">
<dt>Description</dt>
<dd>
Generates a new set of dynamic credentials based on the named role.
</dd>
<dt>Method</dt>
<dd>GET</dd>
<dt>URL</dt>
<dd>`/mysql/creds/<name>`</dd>
<dt>Parameters</dt>
<dd>
None
</dd>
<dt>Returns</dt>
<dd>
```javascript
{
"data": {
"username": "root-aefa635a-18",
"password": "132ae3ef-5a64-7499-351e-bfe59f3a2a21"
}
}
```
</dd>
</dl>
| website/source/docs/secrets/mysql/index.html.md | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0400894396007061,
0.001518203760497272,
0.000161985561135225,
0.00037635385524481535,
0.006192504428327084
] |
{
"id": 13,
"code_window": [
" Determines how many characters from the role name will be used\n",
" to form the mysql username interpolated into the '{{name}}' field\n",
" of the sql parameter.\n",
" </li>\n",
" </ul>\n",
" </dd>\n",
"\n",
" <dt>Returns</dt>\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" <li>\n",
" <span class=\"param\">username_length</span>\n",
" <span class=\"param-flags\">optional</span>\n",
" Determines the maximum total length in characters of the\n",
" mysql username interpolated into the '{{name}}' field\n",
" of the sql parameter.\n",
" </li>\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "add",
"edit_start_line_idx": 250
} | // mksyscall.pl -l32 -arm syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
// +build arm,freebsd
package unix
import (
"syscall"
"unsafe"
)
var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setgroups(ngid int, gid *_Gid_t) (err error) {
_, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
wpid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socket(domain int, typ int, proto int) (fd int, err error) {
r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
_, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
_, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Shutdown(s int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
_, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
var _p0 unsafe.Pointer
if len(mib) > 0 {
_p0 = unsafe.Pointer(&mib[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
use(_p0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func utimes(path string, timeval *[2]Timeval) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func futimes(fd int, timeval *[2]Timeval) (err error) {
_, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fcntl(fd int, cmd int, arg int) (val int, err error) {
r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe() (r int, w int, err error) {
r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0)
r = int(r0)
w = int(r1)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Access(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
_, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chflags(path string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chmod(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chown(path string, uid int, gid int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chroot(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Close(fd int) (err error) {
_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup(fd int) (nfd int, err error) {
r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0)
nfd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup2(from int, to int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Exit(code int) {
Syscall(SYS_EXIT, uintptr(code), 0, 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(attrname)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0)
use(unsafe.Pointer(_p0))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(attrname)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0)
use(unsafe.Pointer(_p0))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(attrname)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) {
r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0)
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(file)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attrname)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(file)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attrname)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(file)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attrname)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)))
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(file)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0)
use(unsafe.Pointer(_p0))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(link)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attrname)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(link)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attrname)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(link)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(attrname)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)))
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(link)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0)
use(unsafe.Pointer(_p0))
ret = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
_, _, e1 := Syscall9(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchdir(fd int) (err error) {
_, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchflags(fd int, flags int) (err error) {
_, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmod(fd int, mode uint32) (err error) {
_, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchown(fd int, uid int, gid int) (err error) {
_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Flock(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fpathconf(fd int, name int) (val int, err error) {
r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0)
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatfs(fd int, stat *Statfs_t) (err error) {
_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fsync(fd int) (err error) {
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ftruncate(fd int, length int64) (err error) {
_, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getdtablesize() (size int) {
r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0)
size = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getegid() (egid int) {
r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
egid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Geteuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getgid() (gid int) {
r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
gid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgid(pid int) (pgid int, err error) {
r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
pgid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgrp() (pgrp int) {
r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0)
pgrp = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpid() (pid int) {
r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
pid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getppid() (ppid int) {
r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
ppid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpriority(which int, who int) (prio int, err error) {
r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
prio = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrlimit(which int, lim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrusage(who int, rusage *Rusage) (err error) {
_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getsid(pid int) (sid int, err error) {
r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
sid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Gettimeofday(tv *Timeval) (err error) {
_, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getuid() (uid int) {
r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Issetugid() (tainted bool) {
r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0)
tainted = bool(r0 != 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kill(pid int, signum syscall.Signal) (err error) {
_, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kqueue() (fd int, err error) {
r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lchown(path string, uid int, gid int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Link(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(link)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, backlog int) (err error) {
_, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lstat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdir(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkfifo(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknod(path string, mode uint32, dev int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlock(b []byte) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlockall(flags int) (err error) {
_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mprotect(b []byte, prot int) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlock(b []byte) (err error) {
var _p0 unsafe.Pointer
if len(b) > 0 {
_p0 = unsafe.Pointer(&b[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlockall() (err error) {
_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Open(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
use(unsafe.Pointer(_p0))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pathconf(path string, name int) (val int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
use(unsafe.Pointer(_p0))
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pread(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func read(fd int, p []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Readlink(path string, buf []byte) (n int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(buf) > 0 {
_p1 = unsafe.Pointer(&buf[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
use(unsafe.Pointer(_p0))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rename(from string, to string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(from)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(to)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Revoke(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rmdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0)
newoffset = int64(int64(r1)<<32 | int64(r0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) {
_, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setegid(egid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seteuid(euid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setgid(gid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setlogin(name string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(name)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpgid(pid int, pgid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpriority(which int, who int, prio int) (err error) {
_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setregid(rgid int, egid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setreuid(ruid int, euid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresgid(rgid int, egid int, sgid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresuid(ruid int, euid int, suid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setrlimit(which int, lim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setsid() (pid int, err error) {
r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
pid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Settimeofday(tp *Timeval) (err error) {
_, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setuid(uid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Stat(path string, stat *Stat_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Statfs(path string, stat *Statfs_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Symlink(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(link)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Sync() (err error) {
_, _, e1 := Syscall(SYS_SYNC, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Truncate(path string, length int64) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Umask(newmask int) (oldmask int) {
r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0)
oldmask = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Undelete(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unlink(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unmount(path string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func write(fd int, p []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0)
ret = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func munmap(addr uintptr, length uintptr) (err error) {
_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
nfd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
| vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0016730775823816657,
0.0002394761104369536,
0.00016059970948845148,
0.00016747554764151573,
0.0002584939356893301
] |
{
"id": 13,
"code_window": [
" Determines how many characters from the role name will be used\n",
" to form the mysql username interpolated into the '{{name}}' field\n",
" of the sql parameter.\n",
" </li>\n",
" </ul>\n",
" </dd>\n",
"\n",
" <dt>Returns</dt>\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" <li>\n",
" <span class=\"param\">username_length</span>\n",
" <span class=\"param-flags\">optional</span>\n",
" Determines the maximum total length in characters of the\n",
" mysql username interpolated into the '{{name}}' field\n",
" of the sql parameter.\n",
" </li>\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "add",
"edit_start_line_idx": 250
} | // Copyright (c) 2012 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import (
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"runtime"
"strings"
"sync"
"time"
)
const (
protoDirectionMask = 0x80
protoVersionMask = 0x7F
protoVersion1 = 0x01
protoVersion2 = 0x02
protoVersion3 = 0x03
protoVersion4 = 0x04
maxFrameSize = 256 * 1024 * 1024
)
type protoVersion byte
func (p protoVersion) request() bool {
return p&protoDirectionMask == 0x00
}
func (p protoVersion) response() bool {
return p&protoDirectionMask == 0x80
}
func (p protoVersion) version() byte {
return byte(p) & protoVersionMask
}
func (p protoVersion) String() string {
dir := "REQ"
if p.response() {
dir = "RESP"
}
return fmt.Sprintf("[version=%d direction=%s]", p.version(), dir)
}
type frameOp byte
const (
// header ops
opError frameOp = 0x00
opStartup frameOp = 0x01
opReady frameOp = 0x02
opAuthenticate frameOp = 0x03
opOptions frameOp = 0x05
opSupported frameOp = 0x06
opQuery frameOp = 0x07
opResult frameOp = 0x08
opPrepare frameOp = 0x09
opExecute frameOp = 0x0A
opRegister frameOp = 0x0B
opEvent frameOp = 0x0C
opBatch frameOp = 0x0D
opAuthChallenge frameOp = 0x0E
opAuthResponse frameOp = 0x0F
opAuthSuccess frameOp = 0x10
)
func (f frameOp) String() string {
switch f {
case opError:
return "ERROR"
case opStartup:
return "STARTUP"
case opReady:
return "READY"
case opAuthenticate:
return "AUTHENTICATE"
case opOptions:
return "OPTIONS"
case opSupported:
return "SUPPORTED"
case opQuery:
return "QUERY"
case opResult:
return "RESULT"
case opPrepare:
return "PREPARE"
case opExecute:
return "EXECUTE"
case opRegister:
return "REGISTER"
case opEvent:
return "EVENT"
case opBatch:
return "BATCH"
case opAuthChallenge:
return "AUTH_CHALLENGE"
case opAuthResponse:
return "AUTH_RESPONSE"
case opAuthSuccess:
return "AUTH_SUCCESS"
default:
return fmt.Sprintf("UNKNOWN_OP_%d", f)
}
}
const (
// result kind
resultKindVoid = 1
resultKindRows = 2
resultKindKeyspace = 3
resultKindPrepared = 4
resultKindSchemaChanged = 5
// rows flags
flagGlobalTableSpec int = 0x01
flagHasMorePages int = 0x02
flagNoMetaData int = 0x04
// query flags
flagValues byte = 0x01
flagSkipMetaData byte = 0x02
flagPageSize byte = 0x04
flagWithPagingState byte = 0x08
flagWithSerialConsistency byte = 0x10
flagDefaultTimestamp byte = 0x20
flagWithNameValues byte = 0x40
// header flags
flagCompress byte = 0x01
flagTracing byte = 0x02
flagCustomPayload byte = 0x04
flagWarning byte = 0x08
)
type Consistency uint16
const (
Any Consistency = 0x00
One Consistency = 0x01
Two Consistency = 0x02
Three Consistency = 0x03
Quorum Consistency = 0x04
All Consistency = 0x05
LocalQuorum Consistency = 0x06
EachQuorum Consistency = 0x07
LocalOne Consistency = 0x0A
)
func (c Consistency) String() string {
switch c {
case Any:
return "ANY"
case One:
return "ONE"
case Two:
return "TWO"
case Three:
return "THREE"
case Quorum:
return "QUORUM"
case All:
return "ALL"
case LocalQuorum:
return "LOCAL_QUORUM"
case EachQuorum:
return "EACH_QUORUM"
case LocalOne:
return "LOCAL_ONE"
default:
return fmt.Sprintf("UNKNOWN_CONS_0x%x", uint16(c))
}
}
func ParseConsistency(s string) Consistency {
switch strings.ToUpper(s) {
case "ANY":
return Any
case "ONE":
return One
case "TWO":
return Two
case "THREE":
return Three
case "QUORUM":
return Quorum
case "ALL":
return All
case "LOCAL_QUORUM":
return LocalQuorum
case "EACH_QUORUM":
return EachQuorum
case "LOCAL_ONE":
return LocalOne
default:
panic("invalid consistency: " + s)
}
}
type SerialConsistency uint16
const (
Serial SerialConsistency = 0x08
LocalSerial SerialConsistency = 0x09
)
func (s SerialConsistency) String() string {
switch s {
case Serial:
return "SERIAL"
case LocalSerial:
return "LOCAL_SERIAL"
default:
return fmt.Sprintf("UNKNOWN_SERIAL_CONS_0x%x", uint16(s))
}
}
const (
apacheCassandraTypePrefix = "org.apache.cassandra.db.marshal."
)
var (
ErrFrameTooBig = errors.New("frame length is bigger than the maximum allowed")
)
const maxFrameHeaderSize = 9
func writeInt(p []byte, n int32) {
p[0] = byte(n >> 24)
p[1] = byte(n >> 16)
p[2] = byte(n >> 8)
p[3] = byte(n)
}
func readInt(p []byte) int32 {
return int32(p[0])<<24 | int32(p[1])<<16 | int32(p[2])<<8 | int32(p[3])
}
func writeShort(p []byte, n uint16) {
p[0] = byte(n >> 8)
p[1] = byte(n)
}
func readShort(p []byte) uint16 {
return uint16(p[0])<<8 | uint16(p[1])
}
type frameHeader struct {
version protoVersion
flags byte
stream int
op frameOp
length int
customPayload map[string][]byte
}
func (f frameHeader) String() string {
return fmt.Sprintf("[header version=%s flags=0x%x stream=%d op=%s length=%d]", f.version, f.flags, f.stream, f.op, f.length)
}
func (f frameHeader) Header() frameHeader {
return f
}
const defaultBufSize = 128
var framerPool = sync.Pool{
New: func() interface{} {
return &framer{
wbuf: make([]byte, defaultBufSize),
readBuffer: make([]byte, defaultBufSize),
}
},
}
// a framer is responsible for reading, writing and parsing frames on a single stream
type framer struct {
r io.Reader
w io.Writer
proto byte
// flags are for outgoing flags, enabling compression and tracing etc
flags byte
compres Compressor
headSize int
// if this frame was read then the header will be here
header *frameHeader
// if tracing flag is set this is not nil
traceID []byte
// holds a ref to the whole byte slice for rbuf so that it can be reset to
// 0 after a read.
readBuffer []byte
rbuf []byte
wbuf []byte
}
func newFramer(r io.Reader, w io.Writer, compressor Compressor, version byte) *framer {
f := framerPool.Get().(*framer)
var flags byte
if compressor != nil {
flags |= flagCompress
}
version &= protoVersionMask
headSize := 8
if version > protoVersion2 {
headSize = 9
}
f.compres = compressor
f.proto = version
f.flags = flags
f.headSize = headSize
f.r = r
f.rbuf = f.readBuffer[:0]
f.w = w
f.wbuf = f.wbuf[:0]
f.header = nil
f.traceID = nil
return f
}
type frame interface {
Header() frameHeader
}
func readHeader(r io.Reader, p []byte) (head frameHeader, err error) {
_, err = io.ReadFull(r, p[:1])
if err != nil {
return frameHeader{}, err
}
version := p[0] & protoVersionMask
if version < protoVersion1 || version > protoVersion4 {
return frameHeader{}, fmt.Errorf("gocql: unsupported response version: %d", version)
}
headSize := 9
if version < protoVersion3 {
headSize = 8
}
_, err = io.ReadFull(r, p[1:headSize])
if err != nil {
return frameHeader{}, err
}
p = p[:headSize]
head.version = protoVersion(p[0])
head.flags = p[1]
if version > protoVersion2 {
if len(p) != 9 {
return frameHeader{}, fmt.Errorf("not enough bytes to read header require 9 got: %d", len(p))
}
head.stream = int(int16(p[2])<<8 | int16(p[3]))
head.op = frameOp(p[4])
head.length = int(readInt(p[5:]))
} else {
if len(p) != 8 {
return frameHeader{}, fmt.Errorf("not enough bytes to read header require 8 got: %d", len(p))
}
head.stream = int(int8(p[2]))
head.op = frameOp(p[3])
head.length = int(readInt(p[4:]))
}
return head, nil
}
// explicitly enables tracing for the framers outgoing requests
func (f *framer) trace() {
f.flags |= flagTracing
}
// reads a frame form the wire into the framers buffer
func (f *framer) readFrame(head *frameHeader) error {
if head.length < 0 {
return fmt.Errorf("frame body length can not be less than 0: %d", head.length)
} else if head.length > maxFrameSize {
// need to free up the connection to be used again
_, err := io.CopyN(ioutil.Discard, f.r, int64(head.length))
if err != nil {
return fmt.Errorf("error whilst trying to discard frame with invalid length: %v", err)
}
return ErrFrameTooBig
}
if cap(f.readBuffer) >= head.length {
f.rbuf = f.readBuffer[:head.length]
} else {
f.readBuffer = make([]byte, head.length)
f.rbuf = f.readBuffer
}
// assume the underlying reader takes care of timeouts and retries
n, err := io.ReadFull(f.r, f.rbuf)
if err != nil {
return fmt.Errorf("unable to read frame body: read %d/%d bytes: %v", n, head.length, err)
}
if head.flags&flagCompress == flagCompress {
if f.compres == nil {
return NewErrProtocol("no compressor available with compressed frame body")
}
f.rbuf, err = f.compres.Decode(f.rbuf)
if err != nil {
return err
}
}
f.header = head
return nil
}
func (f *framer) parseFrame() (frame frame, err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
err = r.(error)
}
}()
if f.header.version.request() {
return nil, NewErrProtocol("got a request frame from server: %v", f.header.version)
}
if f.header.flags&flagTracing == flagTracing {
f.readTrace()
}
if f.header.flags&flagWarning == flagWarning {
warnings := f.readStringList()
// what to do with warnings?
for _, v := range warnings {
log.Println(v)
}
}
if f.header.flags&flagCustomPayload == flagCustomPayload {
f.header.customPayload = f.readBytesMap()
}
// assumes that the frame body has been read into rbuf
switch f.header.op {
case opError:
frame = f.parseErrorFrame()
case opReady:
frame = f.parseReadyFrame()
case opResult:
frame, err = f.parseResultFrame()
case opSupported:
frame = f.parseSupportedFrame()
case opAuthenticate:
frame = f.parseAuthenticateFrame()
case opAuthChallenge:
frame = f.parseAuthChallengeFrame()
case opAuthSuccess:
frame = f.parseAuthSuccessFrame()
case opEvent:
frame = f.parseEventFrame()
default:
return nil, NewErrProtocol("unknown op in frame header: %s", f.header.op)
}
return
}
func (f *framer) parseErrorFrame() frame {
code := f.readInt()
msg := f.readString()
errD := errorFrame{
frameHeader: *f.header,
code: code,
message: msg,
}
switch code {
case errUnavailable:
cl := f.readConsistency()
required := f.readInt()
alive := f.readInt()
return &RequestErrUnavailable{
errorFrame: errD,
Consistency: cl,
Required: required,
Alive: alive,
}
case errWriteTimeout:
cl := f.readConsistency()
received := f.readInt()
blockfor := f.readInt()
writeType := f.readString()
return &RequestErrWriteTimeout{
errorFrame: errD,
Consistency: cl,
Received: received,
BlockFor: blockfor,
WriteType: writeType,
}
case errReadTimeout:
cl := f.readConsistency()
received := f.readInt()
blockfor := f.readInt()
dataPresent := f.readByte()
return &RequestErrReadTimeout{
errorFrame: errD,
Consistency: cl,
Received: received,
BlockFor: blockfor,
DataPresent: dataPresent,
}
case errAlreadyExists:
ks := f.readString()
table := f.readString()
return &RequestErrAlreadyExists{
errorFrame: errD,
Keyspace: ks,
Table: table,
}
case errUnprepared:
stmtId := f.readShortBytes()
return &RequestErrUnprepared{
errorFrame: errD,
StatementId: copyBytes(stmtId), // defensivly copy
}
case errReadFailure:
res := &RequestErrReadFailure{
errorFrame: errD,
}
res.Consistency = f.readConsistency()
res.Received = f.readInt()
res.BlockFor = f.readInt()
res.DataPresent = f.readByte() != 0
return res
case errWriteFailure:
res := &RequestErrWriteFailure{
errorFrame: errD,
}
res.Consistency = f.readConsistency()
res.Received = f.readInt()
res.BlockFor = f.readInt()
res.NumFailures = f.readInt()
res.WriteType = f.readString()
return res
case errFunctionFailure:
res := RequestErrFunctionFailure{
errorFrame: errD,
}
res.Keyspace = f.readString()
res.Function = f.readString()
res.ArgTypes = f.readStringList()
return res
case errInvalid, errBootstrapping, errConfig, errCredentials, errOverloaded,
errProtocol, errServer, errSyntax, errTruncate, errUnauthorized:
// TODO(zariel): we should have some distinct types for these errors
return errD
default:
panic(fmt.Errorf("unknown error code: 0x%x", errD.code))
}
}
func (f *framer) writeHeader(flags byte, op frameOp, stream int) {
f.wbuf = f.wbuf[:0]
f.wbuf = append(f.wbuf,
f.proto,
flags,
)
if f.proto > protoVersion2 {
f.wbuf = append(f.wbuf,
byte(stream>>8),
byte(stream),
)
} else {
f.wbuf = append(f.wbuf,
byte(stream),
)
}
// pad out length
f.wbuf = append(f.wbuf,
byte(op),
0,
0,
0,
0,
)
}
func (f *framer) setLength(length int) {
p := 4
if f.proto > protoVersion2 {
p = 5
}
f.wbuf[p+0] = byte(length >> 24)
f.wbuf[p+1] = byte(length >> 16)
f.wbuf[p+2] = byte(length >> 8)
f.wbuf[p+3] = byte(length)
}
func (f *framer) finishWrite() error {
if len(f.wbuf) > maxFrameSize {
// huge app frame, lets remove it so it doesn't bloat the heap
f.wbuf = make([]byte, defaultBufSize)
return ErrFrameTooBig
}
if f.wbuf[1]&flagCompress == flagCompress {
if f.compres == nil {
panic("compress flag set with no compressor")
}
// TODO: only compress frames which are big enough
compressed, err := f.compres.Encode(f.wbuf[f.headSize:])
if err != nil {
return err
}
f.wbuf = append(f.wbuf[:f.headSize], compressed...)
}
length := len(f.wbuf) - f.headSize
f.setLength(length)
_, err := f.w.Write(f.wbuf)
if err != nil {
return err
}
return nil
}
func (f *framer) readTrace() {
f.traceID = f.readUUID().Bytes()
}
type readyFrame struct {
frameHeader
}
func (f *framer) parseReadyFrame() frame {
return &readyFrame{
frameHeader: *f.header,
}
}
type supportedFrame struct {
frameHeader
supported map[string][]string
}
// TODO: if we move the body buffer onto the frameHeader then we only need a single
// framer, and can move the methods onto the header.
func (f *framer) parseSupportedFrame() frame {
return &supportedFrame{
frameHeader: *f.header,
supported: f.readStringMultiMap(),
}
}
type writeStartupFrame struct {
opts map[string]string
}
func (w writeStartupFrame) String() string {
return fmt.Sprintf("[startup opts=%+v]", w.opts)
}
func (w *writeStartupFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeStartupFrame(streamID, w.opts)
}
func (f *framer) writeStartupFrame(streamID int, options map[string]string) error {
f.writeHeader(f.flags&^flagCompress, opStartup, streamID)
f.writeStringMap(options)
return f.finishWrite()
}
type writePrepareFrame struct {
statement string
}
func (w *writePrepareFrame) writeFrame(framer *framer, streamID int) error {
return framer.writePrepareFrame(streamID, w.statement)
}
func (f *framer) writePrepareFrame(stream int, statement string) error {
f.writeHeader(f.flags, opPrepare, stream)
f.writeLongString(statement)
return f.finishWrite()
}
func (f *framer) readTypeInfo() TypeInfo {
// TODO: factor this out so the same code paths can be used to parse custom
// types and other types, as much of the logic will be duplicated.
id := f.readShort()
simple := NativeType{
proto: f.proto,
typ: Type(id),
}
if simple.typ == TypeCustom {
simple.custom = f.readString()
if cassType := getApacheCassandraType(simple.custom); cassType != TypeCustom {
simple.typ = cassType
}
}
switch simple.typ {
case TypeTuple:
n := f.readShort()
tuple := TupleTypeInfo{
NativeType: simple,
Elems: make([]TypeInfo, n),
}
for i := 0; i < int(n); i++ {
tuple.Elems[i] = f.readTypeInfo()
}
return tuple
case TypeUDT:
udt := UDTTypeInfo{
NativeType: simple,
}
udt.KeySpace = f.readString()
udt.Name = f.readString()
n := f.readShort()
udt.Elements = make([]UDTField, n)
for i := 0; i < int(n); i++ {
field := &udt.Elements[i]
field.Name = f.readString()
field.Type = f.readTypeInfo()
}
return udt
case TypeMap, TypeList, TypeSet:
collection := CollectionType{
NativeType: simple,
}
if simple.typ == TypeMap {
collection.Key = f.readTypeInfo()
}
collection.Elem = f.readTypeInfo()
return collection
}
return simple
}
type preparedMetadata struct {
resultMetadata
// proto v4+
pkeyColumns []int
}
func (r preparedMetadata) String() string {
return fmt.Sprintf("[prepared flags=0x%x pkey=%v paging_state=% X columns=%v col_count=%d actual_col_count=%d]", r.flags, r.pkeyColumns, r.pagingState, r.columns, r.colCount, r.actualColCount)
}
func (f *framer) parsePreparedMetadata() preparedMetadata {
// TODO: deduplicate this from parseMetadata
meta := preparedMetadata{}
meta.flags = f.readInt()
meta.colCount = f.readInt()
if meta.colCount < 0 {
panic(fmt.Errorf("received negative column count: %d", meta.colCount))
}
meta.actualColCount = meta.colCount
if f.proto >= protoVersion4 {
pkeyCount := f.readInt()
pkeys := make([]int, pkeyCount)
for i := 0; i < pkeyCount; i++ {
pkeys[i] = int(f.readShort())
}
meta.pkeyColumns = pkeys
}
if meta.flags&flagHasMorePages == flagHasMorePages {
meta.pagingState = f.readBytes()
}
if meta.flags&flagNoMetaData == flagNoMetaData {
return meta
}
var keyspace, table string
globalSpec := meta.flags&flagGlobalTableSpec == flagGlobalTableSpec
if globalSpec {
keyspace = f.readString()
table = f.readString()
}
var cols []ColumnInfo
if meta.colCount < 1000 {
// preallocate columninfo to avoid excess copying
cols = make([]ColumnInfo, meta.colCount)
for i := 0; i < meta.colCount; i++ {
f.readCol(&cols[i], &meta.resultMetadata, globalSpec, keyspace, table)
}
} else {
// use append, huge number of columns usually indicates a corrupt frame or
// just a huge row.
for i := 0; i < meta.colCount; i++ {
var col ColumnInfo
f.readCol(&col, &meta.resultMetadata, globalSpec, keyspace, table)
cols = append(cols, col)
}
}
meta.columns = cols
return meta
}
type resultMetadata struct {
flags int
// only if flagPageState
pagingState []byte
columns []ColumnInfo
colCount int
// this is a count of the total number of columns which can be scanned,
// it is at minimum len(columns) but may be larger, for instance when a column
// is a UDT or tuple.
actualColCount int
}
func (r resultMetadata) String() string {
return fmt.Sprintf("[metadata flags=0x%x paging_state=% X columns=%v]", r.flags, r.pagingState, r.columns)
}
func (f *framer) readCol(col *ColumnInfo, meta *resultMetadata, globalSpec bool, keyspace, table string) {
if !globalSpec {
col.Keyspace = f.readString()
col.Table = f.readString()
} else {
col.Keyspace = keyspace
col.Table = table
}
col.Name = f.readString()
col.TypeInfo = f.readTypeInfo()
switch v := col.TypeInfo.(type) {
// maybe also UDT
case TupleTypeInfo:
// -1 because we already included the tuple column
meta.actualColCount += len(v.Elems) - 1
}
}
func (f *framer) parseResultMetadata() resultMetadata {
var meta resultMetadata
meta.flags = f.readInt()
meta.colCount = f.readInt()
if meta.colCount < 0 {
panic(fmt.Errorf("received negative column count: %d", meta.colCount))
}
meta.actualColCount = meta.colCount
if meta.flags&flagHasMorePages == flagHasMorePages {
meta.pagingState = f.readBytes()
}
if meta.flags&flagNoMetaData == flagNoMetaData {
return meta
}
var keyspace, table string
globalSpec := meta.flags&flagGlobalTableSpec == flagGlobalTableSpec
if globalSpec {
keyspace = f.readString()
table = f.readString()
}
var cols []ColumnInfo
if meta.colCount < 1000 {
// preallocate columninfo to avoid excess copying
cols = make([]ColumnInfo, meta.colCount)
for i := 0; i < meta.colCount; i++ {
f.readCol(&cols[i], &meta, globalSpec, keyspace, table)
}
} else {
// use append, huge number of columns usually indicates a corrupt frame or
// just a huge row.
for i := 0; i < meta.colCount; i++ {
var col ColumnInfo
f.readCol(&col, &meta, globalSpec, keyspace, table)
cols = append(cols, col)
}
}
meta.columns = cols
return meta
}
type resultVoidFrame struct {
frameHeader
}
func (f *resultVoidFrame) String() string {
return "[result_void]"
}
func (f *framer) parseResultFrame() (frame, error) {
kind := f.readInt()
switch kind {
case resultKindVoid:
return &resultVoidFrame{frameHeader: *f.header}, nil
case resultKindRows:
return f.parseResultRows(), nil
case resultKindKeyspace:
return f.parseResultSetKeyspace(), nil
case resultKindPrepared:
return f.parseResultPrepared(), nil
case resultKindSchemaChanged:
return f.parseResultSchemaChange(), nil
}
return nil, NewErrProtocol("unknown result kind: %x", kind)
}
type resultRowsFrame struct {
frameHeader
meta resultMetadata
// dont parse the rows here as we only need to do it once
numRows int
}
func (f *resultRowsFrame) String() string {
return fmt.Sprintf("[result_rows meta=%v]", f.meta)
}
func (f *framer) parseResultRows() frame {
result := &resultRowsFrame{}
result.meta = f.parseResultMetadata()
result.numRows = f.readInt()
if result.numRows < 0 {
panic(fmt.Errorf("invalid row_count in result frame: %d", result.numRows))
}
return result
}
type resultKeyspaceFrame struct {
frameHeader
keyspace string
}
func (r *resultKeyspaceFrame) String() string {
return fmt.Sprintf("[result_keyspace keyspace=%s]", r.keyspace)
}
func (f *framer) parseResultSetKeyspace() frame {
return &resultKeyspaceFrame{
frameHeader: *f.header,
keyspace: f.readString(),
}
}
type resultPreparedFrame struct {
frameHeader
preparedID []byte
reqMeta preparedMetadata
respMeta resultMetadata
}
func (f *framer) parseResultPrepared() frame {
frame := &resultPreparedFrame{
frameHeader: *f.header,
preparedID: f.readShortBytes(),
reqMeta: f.parsePreparedMetadata(),
}
if f.proto < protoVersion2 {
return frame
}
frame.respMeta = f.parseResultMetadata()
return frame
}
type schemaChangeKeyspace struct {
frameHeader
change string
keyspace string
}
func (f schemaChangeKeyspace) String() string {
return fmt.Sprintf("[event schema_change_keyspace change=%q keyspace=%q]", f.change, f.keyspace)
}
type schemaChangeTable struct {
frameHeader
change string
keyspace string
object string
}
func (f schemaChangeTable) String() string {
return fmt.Sprintf("[event schema_change change=%q keyspace=%q object=%q]", f.change, f.keyspace, f.object)
}
type schemaChangeFunction struct {
frameHeader
change string
keyspace string
name string
args []string
}
func (f *framer) parseResultSchemaChange() frame {
if f.proto <= protoVersion2 {
change := f.readString()
keyspace := f.readString()
table := f.readString()
if table != "" {
return &schemaChangeTable{
frameHeader: *f.header,
change: change,
keyspace: keyspace,
object: table,
}
} else {
return &schemaChangeKeyspace{
frameHeader: *f.header,
change: change,
keyspace: keyspace,
}
}
} else {
change := f.readString()
target := f.readString()
// TODO: could just use a separate type for each target
switch target {
case "KEYSPACE":
frame := &schemaChangeKeyspace{
frameHeader: *f.header,
change: change,
}
frame.keyspace = f.readString()
return frame
case "TABLE", "TYPE":
frame := &schemaChangeTable{
frameHeader: *f.header,
change: change,
}
frame.keyspace = f.readString()
frame.object = f.readString()
return frame
case "FUNCTION", "AGGREGATE":
frame := &schemaChangeFunction{
frameHeader: *f.header,
change: change,
}
frame.keyspace = f.readString()
frame.name = f.readString()
frame.args = f.readStringList()
return frame
default:
panic(fmt.Errorf("gocql: unknown SCHEMA_CHANGE target: %q change: %q", target, change))
}
}
}
type authenticateFrame struct {
frameHeader
class string
}
func (a *authenticateFrame) String() string {
return fmt.Sprintf("[authenticate class=%q]", a.class)
}
func (f *framer) parseAuthenticateFrame() frame {
return &authenticateFrame{
frameHeader: *f.header,
class: f.readString(),
}
}
type authSuccessFrame struct {
frameHeader
data []byte
}
func (a *authSuccessFrame) String() string {
return fmt.Sprintf("[auth_success data=%q]", a.data)
}
func (f *framer) parseAuthSuccessFrame() frame {
return &authSuccessFrame{
frameHeader: *f.header,
data: f.readBytes(),
}
}
type authChallengeFrame struct {
frameHeader
data []byte
}
func (a *authChallengeFrame) String() string {
return fmt.Sprintf("[auth_challenge data=%q]", a.data)
}
func (f *framer) parseAuthChallengeFrame() frame {
return &authChallengeFrame{
frameHeader: *f.header,
data: f.readBytes(),
}
}
type statusChangeEventFrame struct {
frameHeader
change string
host net.IP
port int
}
func (t statusChangeEventFrame) String() string {
return fmt.Sprintf("[status_change change=%s host=%v port=%v]", t.change, t.host, t.port)
}
// essentially the same as statusChange
type topologyChangeEventFrame struct {
frameHeader
change string
host net.IP
port int
}
func (t topologyChangeEventFrame) String() string {
return fmt.Sprintf("[topology_change change=%s host=%v port=%v]", t.change, t.host, t.port)
}
func (f *framer) parseEventFrame() frame {
eventType := f.readString()
switch eventType {
case "TOPOLOGY_CHANGE":
frame := &topologyChangeEventFrame{frameHeader: *f.header}
frame.change = f.readString()
frame.host, frame.port = f.readInet()
return frame
case "STATUS_CHANGE":
frame := &statusChangeEventFrame{frameHeader: *f.header}
frame.change = f.readString()
frame.host, frame.port = f.readInet()
return frame
case "SCHEMA_CHANGE":
// this should work for all versions
return f.parseResultSchemaChange()
default:
panic(fmt.Errorf("gocql: unknown event type: %q", eventType))
}
}
type writeAuthResponseFrame struct {
data []byte
}
func (a *writeAuthResponseFrame) String() string {
return fmt.Sprintf("[auth_response data=%q]", a.data)
}
func (a *writeAuthResponseFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeAuthResponseFrame(streamID, a.data)
}
func (f *framer) writeAuthResponseFrame(streamID int, data []byte) error {
f.writeHeader(f.flags, opAuthResponse, streamID)
f.writeBytes(data)
return f.finishWrite()
}
type queryValues struct {
value []byte
// optional name, will set With names for values flag
name string
}
type queryParams struct {
consistency Consistency
// v2+
skipMeta bool
values []queryValues
pageSize int
pagingState []byte
serialConsistency SerialConsistency
// v3+
defaultTimestamp bool
defaultTimestampValue int64
}
func (q queryParams) String() string {
return fmt.Sprintf("[query_params consistency=%v skip_meta=%v page_size=%d paging_state=%q serial_consistency=%v default_timestamp=%v values=%v]",
q.consistency, q.skipMeta, q.pageSize, q.pagingState, q.serialConsistency, q.defaultTimestamp, q.values)
}
func (f *framer) writeQueryParams(opts *queryParams) {
f.writeConsistency(opts.consistency)
if f.proto == protoVersion1 {
return
}
var flags byte
if len(opts.values) > 0 {
flags |= flagValues
}
if opts.skipMeta {
flags |= flagSkipMetaData
}
if opts.pageSize > 0 {
flags |= flagPageSize
}
if len(opts.pagingState) > 0 {
flags |= flagWithPagingState
}
if opts.serialConsistency > 0 {
flags |= flagWithSerialConsistency
}
names := false
// protoV3 specific things
if f.proto > protoVersion2 {
if opts.defaultTimestamp {
flags |= flagDefaultTimestamp
}
if len(opts.values) > 0 && opts.values[0].name != "" {
flags |= flagWithNameValues
names = true
}
}
f.writeByte(flags)
if n := len(opts.values); n > 0 {
f.writeShort(uint16(n))
for i := 0; i < n; i++ {
if names {
f.writeString(opts.values[i].name)
}
f.writeBytes(opts.values[i].value)
}
}
if opts.pageSize > 0 {
f.writeInt(int32(opts.pageSize))
}
if len(opts.pagingState) > 0 {
f.writeBytes(opts.pagingState)
}
if opts.serialConsistency > 0 {
f.writeConsistency(Consistency(opts.serialConsistency))
}
if f.proto > protoVersion2 && opts.defaultTimestamp {
// timestamp in microseconds
var ts int64
if opts.defaultTimestampValue != 0 {
ts = opts.defaultTimestampValue
} else {
ts = time.Now().UnixNano() / 1000
}
f.writeLong(ts)
}
}
type writeQueryFrame struct {
statement string
params queryParams
}
func (w *writeQueryFrame) String() string {
return fmt.Sprintf("[query statement=%q params=%v]", w.statement, w.params)
}
func (w *writeQueryFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeQueryFrame(streamID, w.statement, &w.params)
}
func (f *framer) writeQueryFrame(streamID int, statement string, params *queryParams) error {
f.writeHeader(f.flags, opQuery, streamID)
f.writeLongString(statement)
f.writeQueryParams(params)
return f.finishWrite()
}
type frameWriter interface {
writeFrame(framer *framer, streamID int) error
}
type frameWriterFunc func(framer *framer, streamID int) error
func (f frameWriterFunc) writeFrame(framer *framer, streamID int) error {
return f(framer, streamID)
}
type writeExecuteFrame struct {
preparedID []byte
params queryParams
}
func (e *writeExecuteFrame) String() string {
return fmt.Sprintf("[execute id=% X params=%v]", e.preparedID, &e.params)
}
func (e *writeExecuteFrame) writeFrame(fr *framer, streamID int) error {
return fr.writeExecuteFrame(streamID, e.preparedID, &e.params)
}
func (f *framer) writeExecuteFrame(streamID int, preparedID []byte, params *queryParams) error {
f.writeHeader(f.flags, opExecute, streamID)
f.writeShortBytes(preparedID)
if f.proto > protoVersion1 {
f.writeQueryParams(params)
} else {
n := len(params.values)
f.writeShort(uint16(n))
for i := 0; i < n; i++ {
f.writeBytes(params.values[i].value)
}
f.writeConsistency(params.consistency)
}
return f.finishWrite()
}
// TODO: can we replace BatchStatemt with batchStatement? As they prety much
// duplicate each other
type batchStatment struct {
preparedID []byte
statement string
values []queryValues
}
type writeBatchFrame struct {
typ BatchType
statements []batchStatment
consistency Consistency
// v3+
serialConsistency SerialConsistency
defaultTimestamp bool
defaultTimestampValue int64
}
func (w *writeBatchFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeBatchFrame(streamID, w)
}
func (f *framer) writeBatchFrame(streamID int, w *writeBatchFrame) error {
f.writeHeader(f.flags, opBatch, streamID)
f.writeByte(byte(w.typ))
n := len(w.statements)
f.writeShort(uint16(n))
var flags byte
for i := 0; i < n; i++ {
b := &w.statements[i]
if len(b.preparedID) == 0 {
f.writeByte(0)
f.writeLongString(b.statement)
} else {
f.writeByte(1)
f.writeShortBytes(b.preparedID)
}
f.writeShort(uint16(len(b.values)))
for j := range b.values {
col := &b.values[j]
if f.proto > protoVersion2 && col.name != "" {
// TODO: move this check into the caller and set a flag on writeBatchFrame
// to indicate using named values
flags |= flagWithNameValues
f.writeString(col.name)
}
f.writeBytes(col.value)
}
}
f.writeConsistency(w.consistency)
if f.proto > protoVersion2 {
if w.serialConsistency > 0 {
flags |= flagWithSerialConsistency
}
if w.defaultTimestamp {
flags |= flagDefaultTimestamp
}
f.writeByte(flags)
if w.serialConsistency > 0 {
f.writeConsistency(Consistency(w.serialConsistency))
}
if w.defaultTimestamp {
var ts int64
if w.defaultTimestampValue != 0 {
ts = w.defaultTimestampValue
} else {
ts = time.Now().UnixNano() / 1000
}
f.writeLong(ts)
}
}
return f.finishWrite()
}
type writeOptionsFrame struct{}
func (w *writeOptionsFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeOptionsFrame(streamID, w)
}
func (f *framer) writeOptionsFrame(stream int, _ *writeOptionsFrame) error {
f.writeHeader(f.flags, opOptions, stream)
return f.finishWrite()
}
type writeRegisterFrame struct {
events []string
}
func (w *writeRegisterFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeRegisterFrame(streamID, w)
}
func (f *framer) writeRegisterFrame(streamID int, w *writeRegisterFrame) error {
f.writeHeader(f.flags, opRegister, streamID)
f.writeStringList(w.events)
return f.finishWrite()
}
func (f *framer) readByte() byte {
if len(f.rbuf) < 1 {
panic(fmt.Errorf("not enough bytes in buffer to read byte require 1 got: %d", len(f.rbuf)))
}
b := f.rbuf[0]
f.rbuf = f.rbuf[1:]
return b
}
func (f *framer) readInt() (n int) {
if len(f.rbuf) < 4 {
panic(fmt.Errorf("not enough bytes in buffer to read int require 4 got: %d", len(f.rbuf)))
}
n = int(int32(f.rbuf[0])<<24 | int32(f.rbuf[1])<<16 | int32(f.rbuf[2])<<8 | int32(f.rbuf[3]))
f.rbuf = f.rbuf[4:]
return
}
func (f *framer) readShort() (n uint16) {
if len(f.rbuf) < 2 {
panic(fmt.Errorf("not enough bytes in buffer to read short require 2 got: %d", len(f.rbuf)))
}
n = uint16(f.rbuf[0])<<8 | uint16(f.rbuf[1])
f.rbuf = f.rbuf[2:]
return
}
func (f *framer) readLong() (n int64) {
if len(f.rbuf) < 8 {
panic(fmt.Errorf("not enough bytes in buffer to read long require 8 got: %d", len(f.rbuf)))
}
n = int64(f.rbuf[0])<<56 | int64(f.rbuf[1])<<48 | int64(f.rbuf[2])<<40 | int64(f.rbuf[3])<<32 |
int64(f.rbuf[4])<<24 | int64(f.rbuf[5])<<16 | int64(f.rbuf[6])<<8 | int64(f.rbuf[7])
f.rbuf = f.rbuf[8:]
return
}
func (f *framer) readString() (s string) {
size := f.readShort()
if len(f.rbuf) < int(size) {
panic(fmt.Errorf("not enough bytes in buffer to read string require %d got: %d", size, len(f.rbuf)))
}
s = string(f.rbuf[:size])
f.rbuf = f.rbuf[size:]
return
}
func (f *framer) readLongString() (s string) {
size := f.readInt()
if len(f.rbuf) < size {
panic(fmt.Errorf("not enough bytes in buffer to read long string require %d got: %d", size, len(f.rbuf)))
}
s = string(f.rbuf[:size])
f.rbuf = f.rbuf[size:]
return
}
func (f *framer) readUUID() *UUID {
if len(f.rbuf) < 16 {
panic(fmt.Errorf("not enough bytes in buffer to read uuid require %d got: %d", 16, len(f.rbuf)))
}
// TODO: how to handle this error, if it is a uuid, then sureley, problems?
u, _ := UUIDFromBytes(f.rbuf[:16])
f.rbuf = f.rbuf[16:]
return &u
}
func (f *framer) readStringList() []string {
size := f.readShort()
l := make([]string, size)
for i := 0; i < int(size); i++ {
l[i] = f.readString()
}
return l
}
func (f *framer) readBytesInternal() ([]byte, error) {
size := f.readInt()
if size < 0 {
return nil, nil
}
if len(f.rbuf) < size {
return nil, fmt.Errorf("not enough bytes in buffer to read bytes require %d got: %d", size, len(f.rbuf))
}
l := f.rbuf[:size]
f.rbuf = f.rbuf[size:]
return l, nil
}
func (f *framer) readBytes() []byte {
l, err := f.readBytesInternal()
if err != nil {
panic(err)
}
return l
}
func (f *framer) readShortBytes() []byte {
size := f.readShort()
if len(f.rbuf) < int(size) {
panic(fmt.Errorf("not enough bytes in buffer to read short bytes: require %d got %d", size, len(f.rbuf)))
}
l := f.rbuf[:size]
f.rbuf = f.rbuf[size:]
return l
}
func (f *framer) readInet() (net.IP, int) {
if len(f.rbuf) < 1 {
panic(fmt.Errorf("not enough bytes in buffer to read inet size require %d got: %d", 1, len(f.rbuf)))
}
size := f.rbuf[0]
f.rbuf = f.rbuf[1:]
if !(size == 4 || size == 16) {
panic(fmt.Errorf("invalid IP size: %d", size))
}
if len(f.rbuf) < 1 {
panic(fmt.Errorf("not enough bytes in buffer to read inet require %d got: %d", size, len(f.rbuf)))
}
ip := make([]byte, size)
copy(ip, f.rbuf[:size])
f.rbuf = f.rbuf[size:]
port := f.readInt()
return net.IP(ip), port
}
func (f *framer) readConsistency() Consistency {
return Consistency(f.readShort())
}
func (f *framer) readStringMap() map[string]string {
size := f.readShort()
m := make(map[string]string)
for i := 0; i < int(size); i++ {
k := f.readString()
v := f.readString()
m[k] = v
}
return m
}
func (f *framer) readBytesMap() map[string][]byte {
size := f.readShort()
m := make(map[string][]byte)
for i := 0; i < int(size); i++ {
k := f.readString()
v := f.readBytes()
m[k] = v
}
return m
}
func (f *framer) readStringMultiMap() map[string][]string {
size := f.readShort()
m := make(map[string][]string)
for i := 0; i < int(size); i++ {
k := f.readString()
v := f.readStringList()
m[k] = v
}
return m
}
func (f *framer) writeByte(b byte) {
f.wbuf = append(f.wbuf, b)
}
func appendBytes(p []byte, d []byte) []byte {
if d == nil {
return appendInt(p, -1)
}
p = appendInt(p, int32(len(d)))
p = append(p, d...)
return p
}
func appendShort(p []byte, n uint16) []byte {
return append(p,
byte(n>>8),
byte(n),
)
}
func appendInt(p []byte, n int32) []byte {
return append(p, byte(n>>24),
byte(n>>16),
byte(n>>8),
byte(n))
}
func appendLong(p []byte, n int64) []byte {
return append(p,
byte(n>>56),
byte(n>>48),
byte(n>>40),
byte(n>>32),
byte(n>>24),
byte(n>>16),
byte(n>>8),
byte(n),
)
}
// these are protocol level binary types
func (f *framer) writeInt(n int32) {
f.wbuf = appendInt(f.wbuf, n)
}
func (f *framer) writeShort(n uint16) {
f.wbuf = appendShort(f.wbuf, n)
}
func (f *framer) writeLong(n int64) {
f.wbuf = appendLong(f.wbuf, n)
}
func (f *framer) writeString(s string) {
f.writeShort(uint16(len(s)))
f.wbuf = append(f.wbuf, s...)
}
func (f *framer) writeLongString(s string) {
f.writeInt(int32(len(s)))
f.wbuf = append(f.wbuf, s...)
}
func (f *framer) writeUUID(u *UUID) {
f.wbuf = append(f.wbuf, u[:]...)
}
func (f *framer) writeStringList(l []string) {
f.writeShort(uint16(len(l)))
for _, s := range l {
f.writeString(s)
}
}
func (f *framer) writeBytes(p []byte) {
// TODO: handle null case correctly,
// [bytes] A [int] n, followed by n bytes if n >= 0. If n < 0,
// no byte should follow and the value represented is `null`.
if p == nil {
f.writeInt(-1)
} else {
f.writeInt(int32(len(p)))
f.wbuf = append(f.wbuf, p...)
}
}
func (f *framer) writeShortBytes(p []byte) {
f.writeShort(uint16(len(p)))
f.wbuf = append(f.wbuf, p...)
}
func (f *framer) writeInet(ip net.IP, port int) {
f.wbuf = append(f.wbuf,
byte(len(ip)),
)
f.wbuf = append(f.wbuf,
[]byte(ip)...,
)
f.writeInt(int32(port))
}
func (f *framer) writeConsistency(cons Consistency) {
f.writeShort(uint16(cons))
}
func (f *framer) writeStringMap(m map[string]string) {
f.writeShort(uint16(len(m)))
for k, v := range m {
f.writeString(k)
f.writeString(v)
}
}
| vendor/github.com/gocql/gocql/frame.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0009889793582260609,
0.00018070527585223317,
0.0001607572048669681,
0.0001703018497209996,
0.00006643802771577612
] |
{
"id": 13,
"code_window": [
" Determines how many characters from the role name will be used\n",
" to form the mysql username interpolated into the '{{name}}' field\n",
" of the sql parameter.\n",
" </li>\n",
" </ul>\n",
" </dd>\n",
"\n",
" <dt>Returns</dt>\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" <li>\n",
" <span class=\"param\">username_length</span>\n",
" <span class=\"param-flags\">optional</span>\n",
" Determines the maximum total length in characters of the\n",
" mysql username interpolated into the '{{name}}' field\n",
" of the sql parameter.\n",
" </li>\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "add",
"edit_start_line_idx": 250
} | package vault
import (
"reflect"
"testing"
"github.com/hashicorp/vault/logical"
)
func mockPolicyStore(t *testing.T) *PolicyStore {
_, barrier, _ := mockBarrier(t)
view := NewBarrierView(barrier, "foo/")
p := NewPolicyStore(view, logical.TestSystemView())
return p
}
func mockPolicyStoreNoCache(t *testing.T) *PolicyStore {
sysView := logical.TestSystemView()
sysView.CachingDisabledVal = true
_, barrier, _ := mockBarrier(t)
view := NewBarrierView(barrier, "foo/")
p := NewPolicyStore(view, sysView)
return p
}
func TestPolicyStore_Root(t *testing.T) {
ps := mockPolicyStore(t)
// Get should return a special policy
p, err := ps.GetPolicy("root")
if err != nil {
t.Fatalf("err: %v", err)
}
if p == nil {
t.Fatalf("bad: %v", p)
}
if p.Name != "root" {
t.Fatalf("bad: %v", p)
}
// Set should fail
err = ps.SetPolicy(p)
if err.Error() != "cannot update root policy" {
t.Fatalf("err: %v", err)
}
// Delete should fail
err = ps.DeletePolicy("root")
if err.Error() != "cannot delete root policy" {
t.Fatalf("err: %v", err)
}
}
func TestPolicyStore_CRUD(t *testing.T) {
ps := mockPolicyStore(t)
testPolicyStore_CRUD(t, ps)
ps = mockPolicyStoreNoCache(t)
testPolicyStore_CRUD(t, ps)
}
func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) {
// Get should return nothing
p, err := ps.GetPolicy("dev")
if err != nil {
t.Fatalf("err: %v", err)
}
if p != nil {
t.Fatalf("bad: %v", p)
}
// Delete should be no-op
err = ps.DeletePolicy("dev")
if err != nil {
t.Fatalf("err: %v", err)
}
// List should be blank
out, err := ps.ListPolicies()
if err != nil {
t.Fatalf("err: %v", err)
}
if len(out) != 0 {
t.Fatalf("bad: %v", out)
}
// Set should work
policy, _ := Parse(aclPolicy)
err = ps.SetPolicy(policy)
if err != nil {
t.Fatalf("err: %v", err)
}
// Get should work
p, err = ps.GetPolicy("dev")
if err != nil {
t.Fatalf("err: %v", err)
}
if !reflect.DeepEqual(p, policy) {
t.Fatalf("bad: %v", p)
}
// List should be one element
out, err = ps.ListPolicies()
if err != nil {
t.Fatalf("err: %v", err)
}
if len(out) != 1 || out[0] != "dev" {
t.Fatalf("bad: %v", out)
}
// Delete should be clear the entry
err = ps.DeletePolicy("dev")
if err != nil {
t.Fatalf("err: %v", err)
}
// Get should fail
p, err = ps.GetPolicy("dev")
if err != nil {
t.Fatalf("err: %v", err)
}
if p != nil {
t.Fatalf("bad: %v", p)
}
}
// Test predefined policy handling
func TestPolicyStore_Predefined(t *testing.T) {
core, _, _ := TestCoreUnsealed(t)
// Ensure both default policies are created
err := core.setupPolicyStore()
if err != nil {
t.Fatalf("err: %v", err)
}
// List should be two elements
out, err := core.policyStore.ListPolicies()
if err != nil {
t.Fatalf("err: %v", err)
}
if len(out) != 2 || out[0] != "default" || out[1] != "response-wrapping" {
t.Fatalf("bad: %v", out)
}
pCubby, err := core.policyStore.GetPolicy("response-wrapping")
if err != nil {
t.Fatalf("err: %v", err)
}
if pCubby.Raw != cubbyholeResponseWrappingPolicy {
t.Fatalf("bad: expected\n%s\ngot\n%s\n", cubbyholeResponseWrappingPolicy, pCubby.Raw)
}
pRoot, err := core.policyStore.GetPolicy("root")
if err != nil {
t.Fatalf("err: %v", err)
}
err = core.policyStore.SetPolicy(pCubby)
if err == nil {
t.Fatalf("expected err setting %s", pCubby.Name)
}
err = core.policyStore.SetPolicy(pRoot)
if err == nil {
t.Fatalf("expected err setting %s", pRoot.Name)
}
err = core.policyStore.DeletePolicy(pCubby.Name)
if err == nil {
t.Fatalf("expected err deleting %s", pCubby.Name)
}
err = core.policyStore.DeletePolicy(pRoot.Name)
if err == nil {
t.Fatalf("expected err deleting %s", pRoot.Name)
}
}
func TestPolicyStore_ACL(t *testing.T) {
ps := mockPolicyStore(t)
policy, _ := Parse(aclPolicy)
err := ps.SetPolicy(policy)
if err != nil {
t.Fatalf("err: %v", err)
}
policy, _ = Parse(aclPolicy2)
err = ps.SetPolicy(policy)
if err != nil {
t.Fatalf("err: %v", err)
}
acl, err := ps.ACL("dev", "ops")
if err != nil {
t.Fatalf("err: %v", err)
}
testLayeredACL(t, acl)
}
func TestPolicyStore_v1Upgrade(t *testing.T) {
ps := mockPolicyStore(t)
// Put a V1 record
raw := `path "foo" { policy = "read" }`
ps.view.Put(&logical.StorageEntry{Key: "old", Value: []byte(raw)})
// Do a read
p, err := ps.GetPolicy("old")
if err != nil {
t.Fatalf("err: %v", err)
}
if p == nil || len(p.Paths) != 1 {
t.Fatalf("bad policy: %#v", p)
}
// Check that glob is enabled
if !p.Paths[0].Glob {
t.Fatalf("should enable glob")
}
}
| vault/policy_store_test.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00020198033598717302,
0.00017334456788375974,
0.0001603019773028791,
0.00016996689373627305,
0.00000928180725168204
] |
{
"id": 14,
"code_window": [
"\n",
" ```javascript\n",
" {\n",
" \"data\": {\n",
" \"username\": \"root-aefa635a-18\",\n",
" \"password\": \"132ae3ef-5a64-7499-351e-bfe59f3a2a21\"\n",
" }\n",
" }\n",
" ```\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"username\": \"rolename-aefa635a-18\",\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 381
} | package mysql
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func pathListRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/?$",
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ListOperation: b.pathRoleList,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func pathRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/" + framework.GenericNameRegex("name"),
Fields: map[string]*framework.FieldSchema{
"name": &framework.FieldSchema{
Type: framework.TypeString,
Description: "Name of the role.",
},
"sql": &framework.FieldSchema{
Type: framework.TypeString,
Description: "SQL string to create a user. See help for more info.",
},
"username_length": &framework.FieldSchema{
Type: framework.TypeInt,
Description: "number of characters to truncate generated mysql usernames to (default 10)",
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: b.pathRoleRead,
logical.UpdateOperation: b.pathRoleCreate,
logical.DeleteOperation: b.pathRoleDelete,
},
HelpSynopsis: pathRoleHelpSyn,
HelpDescription: pathRoleHelpDesc,
}
}
func (b *backend) Role(s logical.Storage, n string) (*roleEntry, error) {
entry, err := s.Get("role/" + n)
if err != nil {
return nil, err
}
if entry == nil {
return nil, nil
}
var result roleEntry
if err := entry.DecodeJSON(&result); err != nil {
return nil, err
}
return &result, nil
}
func (b *backend) pathRoleDelete(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
err := req.Storage.Delete("role/" + data.Get("name").(string))
if err != nil {
return nil, err
}
return nil, nil
}
func (b *backend) pathRoleRead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
role, err := b.Role(req.Storage, data.Get("name").(string))
if err != nil {
return nil, err
}
if role == nil {
return nil, nil
}
return &logical.Response{
Data: map[string]interface{}{
"sql": role.SQL,
},
}, nil
}
func (b *backend) pathRoleList(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
entries, err := req.Storage.List("role/")
if err != nil {
return nil, err
}
return logical.ListResponse(entries), nil
}
func (b *backend) pathRoleCreate(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
name := data.Get("name").(string)
sql := data.Get("sql").(string)
username_length := data.Get("username_length").(int)
// Get our connection
db, err := b.DB(req.Storage)
if err != nil {
return nil, err
}
// Test the query by trying to prepare it
for _, query := range SplitSQL(sql) {
stmt, err := db.Prepare(Query(query, map[string]string{
"name": "foo",
"password": "bar",
}))
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error testing query: %s", err)), nil
}
stmt.Close()
}
// Store it
entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{
SQL: sql,
USERNAME_LENGTH: username_length,
})
if err != nil {
return nil, err
}
if err := req.Storage.Put(entry); err != nil {
return nil, err
}
return nil, nil
}
type roleEntry struct {
SQL string `json:"sql"`
USERNAME_LENGTH int `json:"username_length"`
}
const pathRoleHelpSyn = `
Manage the roles that can be created with this backend.
`
const pathRoleHelpDesc = `
This path lets you manage the roles that can be created with this backend.
The "sql" parameter customizes the SQL string used to create the role.
This can be a sequence of SQL queries, each semi-colon seperated. Some
substitution will be done to the SQL string for certain keys.
The names of the variables must be surrounded by "{{" and "}}" to be replaced.
* "name" - The random username generated for the DB user.
* "password" - The random password generated for the DB user.
Example of a decent SQL query to use:
CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';
GRANT ALL ON db1.* TO '{{name}}'@'%';
Note the above user would be able to access anything in db1. Please see the MySQL
manual on the GRANT command to learn how to do more fine grained access.
The "username_length" parameter determines how many characters of the
role name will be used in creating the generated mysql username; the
default is 10. Note that mysql versions prior to 5.8 have a 16 character
total limit on usernames.
`
| builtin/logical/mysql/path_roles.go | 1 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0002706894010771066,
0.0001757253921823576,
0.00016304968448821455,
0.00016756211698520929,
0.000023879063519416377
] |
{
"id": 14,
"code_window": [
"\n",
" ```javascript\n",
" {\n",
" \"data\": {\n",
" \"username\": \"root-aefa635a-18\",\n",
" \"password\": \"132ae3ef-5a64-7499-351e-bfe59f3a2a21\"\n",
" }\n",
" }\n",
" ```\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"username\": \"rolename-aefa635a-18\",\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 381
} | package pki
import (
"encoding/base64"
"fmt"
"time"
"github.com/hashicorp/vault/helper/certutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func pathIssue(b *backend) *framework.Path {
ret := &framework.Path{
Pattern: "issue/" + framework.GenericNameRegex("role"),
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathIssue,
},
HelpSynopsis: pathIssueHelpSyn,
HelpDescription: pathIssueHelpDesc,
}
ret.Fields = addNonCACommonFields(map[string]*framework.FieldSchema{})
return ret
}
func pathSign(b *backend) *framework.Path {
ret := &framework.Path{
Pattern: "sign/" + framework.GenericNameRegex("role"),
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathSign,
},
HelpSynopsis: pathSignHelpSyn,
HelpDescription: pathSignHelpDesc,
}
ret.Fields = addNonCACommonFields(map[string]*framework.FieldSchema{})
ret.Fields["csr"] = &framework.FieldSchema{
Type: framework.TypeString,
Default: "",
Description: `PEM-format CSR to be signed.`,
}
return ret
}
func pathSignVerbatim(b *backend) *framework.Path {
ret := &framework.Path{
Pattern: "sign-verbatim/" + framework.GenericNameRegex("role"),
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathSignVerbatim,
},
HelpSynopsis: pathSignHelpSyn,
HelpDescription: pathSignHelpDesc,
}
ret.Fields = addNonCACommonFields(map[string]*framework.FieldSchema{})
ret.Fields["csr"] = &framework.FieldSchema{
Type: framework.TypeString,
Default: "",
Description: `PEM-format CSR to be signed. Values will be
taken verbatim from the CSR, except for
basic constraints.`,
}
return ret
}
// pathIssue issues a certificate and private key from given parameters,
// subject to role restrictions
func (b *backend) pathIssue(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
roleName := data.Get("role").(string)
// Get the role
role, err := b.getRole(req.Storage, roleName)
if err != nil {
return nil, err
}
if role == nil {
return logical.ErrorResponse(fmt.Sprintf("Unknown role: %s", roleName)), nil
}
return b.pathIssueSignCert(req, data, role, false, false)
}
// pathSign issues a certificate from a submitted CSR, subject to role
// restrictions
func (b *backend) pathSign(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
roleName := data.Get("role").(string)
// Get the role
role, err := b.getRole(req.Storage, roleName)
if err != nil {
return nil, err
}
if role == nil {
return logical.ErrorResponse(fmt.Sprintf("Unknown role: %s", roleName)), nil
}
return b.pathIssueSignCert(req, data, role, true, false)
}
// pathSignVerbatim issues a certificate from a submitted CSR, *not* subject to
// role restrictions
func (b *backend) pathSignVerbatim(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
ttl := b.System().DefaultLeaseTTL()
role := &roleEntry{
TTL: ttl.String(),
AllowLocalhost: true,
AllowAnyName: true,
AllowIPSANs: true,
EnforceHostnames: false,
KeyType: "any",
}
return b.pathIssueSignCert(req, data, role, true, true)
}
func (b *backend) pathIssueSignCert(
req *logical.Request, data *framework.FieldData, role *roleEntry, useCSR, useCSRValues bool) (*logical.Response, error) {
format := getFormat(data)
if format == "" {
return logical.ErrorResponse(
`The "format" path parameter must be "pem", "der", or "pem_bundle"`), nil
}
var caErr error
signingBundle, caErr := fetchCAInfo(req)
switch caErr.(type) {
case certutil.UserError:
return nil, certutil.UserError{Err: fmt.Sprintf(
"Could not fetch the CA certificate (was one set?): %s", caErr)}
case certutil.InternalError:
return nil, certutil.InternalError{Err: fmt.Sprintf(
"Error fetching CA certificate: %s", caErr)}
}
var parsedBundle *certutil.ParsedCertBundle
var err error
if useCSR {
parsedBundle, err = signCert(b, role, signingBundle, false, useCSRValues, req, data)
} else {
parsedBundle, err = generateCert(b, role, signingBundle, false, req, data)
}
if err != nil {
switch err.(type) {
case certutil.UserError:
return logical.ErrorResponse(err.Error()), nil
case certutil.InternalError:
return nil, err
}
}
cb, err := parsedBundle.ToCertBundle()
if err != nil {
return nil, fmt.Errorf("Error converting raw cert bundle to cert bundle: %s", err)
}
resp := b.Secret(SecretCertsType).Response(
map[string]interface{}{
"certificate": cb.Certificate,
"issuing_ca": cb.IssuingCA,
"serial_number": cb.SerialNumber,
},
map[string]interface{}{
"serial_number": cb.SerialNumber,
})
switch format {
case "pem":
resp.Data["issuing_ca"] = cb.IssuingCA
resp.Data["certificate"] = cb.Certificate
if !useCSR {
resp.Data["private_key"] = cb.PrivateKey
resp.Data["private_key_type"] = cb.PrivateKeyType
}
case "pem_bundle":
resp.Data["issuing_ca"] = cb.IssuingCA
resp.Data["certificate"] = fmt.Sprintf("%s\n%s", cb.Certificate, cb.IssuingCA)
if !useCSR {
resp.Data["private_key"] = cb.PrivateKey
resp.Data["private_key_type"] = cb.PrivateKeyType
resp.Data["certificate"] = fmt.Sprintf("%s\n%s\n%s", cb.PrivateKey, cb.Certificate, cb.IssuingCA)
}
case "der":
resp.Data["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes)
resp.Data["issuing_ca"] = base64.StdEncoding.EncodeToString(parsedBundle.IssuingCABytes)
if !useCSR {
resp.Data["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes)
}
}
resp.Secret.TTL = parsedBundle.Certificate.NotAfter.Sub(time.Now())
err = req.Storage.Put(&logical.StorageEntry{
Key: "certs/" + cb.SerialNumber,
Value: parsedBundle.CertificateBytes,
})
if err != nil {
return nil, fmt.Errorf("Unable to store certificate locally")
}
return resp, nil
}
const pathIssueHelpSyn = `
Request a certificate using a certain role with the provided details.
`
const pathIssueHelpDesc = `
This path allows requesting a certificate to be issued according to the
policy of the given role. The certificate will only be issued if the
requested details are allowed by the role policy.
This path returns a certificate and a private key. If you want a workflow
that does not expose a private key, generate a CSR locally and use the
sign path instead.
`
const pathSignHelpSyn = `
Request certificates using a certain role with the provided details.
`
const pathSignHelpDesc = `
This path allows requesting certificates to be issued according to the
policy of the given role. The certificate will only be issued if the
requested common name is allowed by the role policy.
This path requires a CSR; if you want Vault to generate a private key
for you, use the issue path instead.
`
| builtin/logical/pki/path_issue_sign.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.00017463573021814227,
0.0001690784702077508,
0.00016214424977079034,
0.00016926784883253276,
0.000003063265239688917
] |
{
"id": 14,
"code_window": [
"\n",
" ```javascript\n",
" {\n",
" \"data\": {\n",
" \"username\": \"root-aefa635a-18\",\n",
" \"password\": \"132ae3ef-5a64-7499-351e-bfe59f3a2a21\"\n",
" }\n",
" }\n",
" ```\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"username\": \"rolename-aefa635a-18\",\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 381
} | package pq
import (
"bytes"
"database/sql/driver"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math"
"strconv"
"strings"
"sync"
"time"
"github.com/lib/pq/oid"
)
func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte {
switch v := x.(type) {
case []byte:
return v
default:
return encode(parameterStatus, x, oid.T_unknown)
}
}
func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte {
switch v := x.(type) {
case int64:
return strconv.AppendInt(nil, v, 10)
case float64:
return strconv.AppendFloat(nil, v, 'f', -1, 64)
case []byte:
if pgtypOid == oid.T_bytea {
return encodeBytea(parameterStatus.serverVersion, v)
}
return v
case string:
if pgtypOid == oid.T_bytea {
return encodeBytea(parameterStatus.serverVersion, []byte(v))
}
return []byte(v)
case bool:
return strconv.AppendBool(nil, v)
case time.Time:
return formatTs(v)
default:
errorf("encode: unknown type for %T", v)
}
panic("not reached")
}
func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} {
switch f {
case formatBinary:
return binaryDecode(parameterStatus, s, typ)
case formatText:
return textDecode(parameterStatus, s, typ)
default:
panic("not reached")
}
}
func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_bytea:
return s
case oid.T_int8:
return int64(binary.BigEndian.Uint64(s))
case oid.T_int4:
return int64(int32(binary.BigEndian.Uint32(s)))
case oid.T_int2:
return int64(int16(binary.BigEndian.Uint16(s)))
default:
errorf("don't know how to decode binary parameter of type %d", uint32(typ))
}
panic("not reached")
}
func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_char, oid.T_varchar, oid.T_text:
return string(s)
case oid.T_bytea:
return parseBytea(s)
case oid.T_timestamptz:
return parseTs(parameterStatus.currentLocation, string(s))
case oid.T_timestamp, oid.T_date:
return parseTs(nil, string(s))
case oid.T_time:
return mustParse("15:04:05", typ, s)
case oid.T_timetz:
return mustParse("15:04:05-07", typ, s)
case oid.T_bool:
return s[0] == 't'
case oid.T_int8, oid.T_int4, oid.T_int2:
i, err := strconv.ParseInt(string(s), 10, 64)
if err != nil {
errorf("%s", err)
}
return i
case oid.T_float4, oid.T_float8:
bits := 64
if typ == oid.T_float4 {
bits = 32
}
f, err := strconv.ParseFloat(string(s), bits)
if err != nil {
errorf("%s", err)
}
return f
}
return s
}
// appendEncodedText encodes item in text format as required by COPY
// and appends to buf
func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte {
switch v := x.(type) {
case int64:
return strconv.AppendInt(buf, v, 10)
case float64:
return strconv.AppendFloat(buf, v, 'f', -1, 64)
case []byte:
encodedBytea := encodeBytea(parameterStatus.serverVersion, v)
return appendEscapedText(buf, string(encodedBytea))
case string:
return appendEscapedText(buf, v)
case bool:
return strconv.AppendBool(buf, v)
case time.Time:
return append(buf, formatTs(v)...)
case nil:
return append(buf, "\\N"...)
default:
errorf("encode: unknown type for %T", v)
}
panic("not reached")
}
func appendEscapedText(buf []byte, text string) []byte {
escapeNeeded := false
startPos := 0
var c byte
// check if we need to escape
for i := 0; i < len(text); i++ {
c = text[i]
if c == '\\' || c == '\n' || c == '\r' || c == '\t' {
escapeNeeded = true
startPos = i
break
}
}
if !escapeNeeded {
return append(buf, text...)
}
// copy till first char to escape, iterate the rest
result := append(buf, text[:startPos]...)
for i := startPos; i < len(text); i++ {
c = text[i]
switch c {
case '\\':
result = append(result, '\\', '\\')
case '\n':
result = append(result, '\\', 'n')
case '\r':
result = append(result, '\\', 'r')
case '\t':
result = append(result, '\\', 't')
default:
result = append(result, c)
}
}
return result
}
func mustParse(f string, typ oid.Oid, s []byte) time.Time {
str := string(s)
// check for a 30-minute-offset timezone
if (typ == oid.T_timestamptz || typ == oid.T_timetz) &&
str[len(str)-3] == ':' {
f += ":00"
}
t, err := time.Parse(f, str)
if err != nil {
errorf("decode: %s", err)
}
return t
}
var errInvalidTimestamp = errors.New("invalid timestamp")
type timestampParser struct {
err error
}
func (p *timestampParser) expect(str string, char byte, pos int) {
if p.err != nil {
return
}
if pos+1 > len(str) {
p.err = errInvalidTimestamp
return
}
if c := str[pos]; c != char && p.err == nil {
p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c)
}
}
func (p *timestampParser) mustAtoi(str string, begin int, end int) int {
if p.err != nil {
return 0
}
if begin < 0 || end < 0 || begin > end || end > len(str) {
p.err = errInvalidTimestamp
return 0
}
result, err := strconv.Atoi(str[begin:end])
if err != nil {
if p.err == nil {
p.err = fmt.Errorf("expected number; got '%v'", str)
}
return 0
}
return result
}
// The location cache caches the time zones typically used by the client.
type locationCache struct {
cache map[int]*time.Location
lock sync.Mutex
}
// All connections share the same list of timezones. Benchmarking shows that
// about 5% speed could be gained by putting the cache in the connection and
// losing the mutex, at the cost of a small amount of memory and a somewhat
// significant increase in code complexity.
var globalLocationCache = newLocationCache()
func newLocationCache() *locationCache {
return &locationCache{cache: make(map[int]*time.Location)}
}
// Returns the cached timezone for the specified offset, creating and caching
// it if necessary.
func (c *locationCache) getLocation(offset int) *time.Location {
c.lock.Lock()
defer c.lock.Unlock()
location, ok := c.cache[offset]
if !ok {
location = time.FixedZone("", offset)
c.cache[offset] = location
}
return location
}
var infinityTsEnabled = false
var infinityTsNegative time.Time
var infinityTsPositive time.Time
const (
infinityTsEnabledAlready = "pq: infinity timestamp enabled already"
infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive"
)
// EnableInfinityTs controls the handling of Postgres' "-infinity" and
// "infinity" "timestamp"s.
//
// If EnableInfinityTs is not called, "-infinity" and "infinity" will return
// []byte("-infinity") and []byte("infinity") respectively, and potentially
// cause error "sql: Scan error on column index 0: unsupported driver -> Scan
// pair: []uint8 -> *time.Time", when scanning into a time.Time value.
//
// Once EnableInfinityTs has been called, all connections created using this
// driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
// "timestamp with time zone" and "date" types to the predefined minimum and
// maximum times, respectively. When encoding time.Time values, any time which
// equals or precedes the predefined minimum time will be encoded to
// "-infinity". Any values at or past the maximum time will similarly be
// encoded to "infinity".
//
// If EnableInfinityTs is called with negative >= positive, it will panic.
// Calling EnableInfinityTs after a connection has been established results in
// undefined behavior. If EnableInfinityTs is called more than once, it will
// panic.
func EnableInfinityTs(negative time.Time, positive time.Time) {
if infinityTsEnabled {
panic(infinityTsEnabledAlready)
}
if !negative.Before(positive) {
panic(infinityTsNegativeMustBeSmaller)
}
infinityTsEnabled = true
infinityTsNegative = negative
infinityTsPositive = positive
}
/*
* Testing might want to toggle infinityTsEnabled
*/
func disableInfinityTs() {
infinityTsEnabled = false
}
// This is a time function specific to the Postgres default DateStyle
// setting ("ISO, MDY"), the only one we currently support. This
// accounts for the discrepancies between the parsing available with
// time.Parse and the Postgres date formatting quirks.
func parseTs(currentLocation *time.Location, str string) interface{} {
switch str {
case "-infinity":
if infinityTsEnabled {
return infinityTsNegative
}
return []byte(str)
case "infinity":
if infinityTsEnabled {
return infinityTsPositive
}
return []byte(str)
}
t, err := ParseTimestamp(currentLocation, str)
if err != nil {
panic(err)
}
return t
}
// ParseTimestamp parses Postgres' text format. It returns a time.Time in
// currentLocation iff that time's offset agrees with the offset sent from the
// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the
// fixed offset offset provided by the Postgres server.
func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) {
p := timestampParser{}
monSep := strings.IndexRune(str, '-')
// this is Gregorian year, not ISO Year
// In Gregorian system, the year 1 BC is followed by AD 1
year := p.mustAtoi(str, 0, monSep)
daySep := monSep + 3
month := p.mustAtoi(str, monSep+1, daySep)
p.expect(str, '-', daySep)
timeSep := daySep + 3
day := p.mustAtoi(str, daySep+1, timeSep)
var hour, minute, second int
if len(str) > monSep+len("01-01")+1 {
p.expect(str, ' ', timeSep)
minSep := timeSep + 3
p.expect(str, ':', minSep)
hour = p.mustAtoi(str, timeSep+1, minSep)
secSep := minSep + 3
p.expect(str, ':', secSep)
minute = p.mustAtoi(str, minSep+1, secSep)
secEnd := secSep + 3
second = p.mustAtoi(str, secSep+1, secEnd)
}
remainderIdx := monSep + len("01-01 00:00:00") + 1
// Three optional (but ordered) sections follow: the
// fractional seconds, the time zone offset, and the BC
// designation. We set them up here and adjust the other
// offsets if the preceding sections exist.
nanoSec := 0
tzOff := 0
if remainderIdx < len(str) && str[remainderIdx] == '.' {
fracStart := remainderIdx + 1
fracOff := strings.IndexAny(str[fracStart:], "-+ ")
if fracOff < 0 {
fracOff = len(str) - fracStart
}
fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff)
nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff))))
remainderIdx += fracOff + 1
}
if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') {
// time zone separator is always '-' or '+' (UTC is +00)
var tzSign int
switch c := str[tzStart]; c {
case '-':
tzSign = -1
case '+':
tzSign = +1
default:
return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
}
tzHours := p.mustAtoi(str, tzStart+1, tzStart+3)
remainderIdx += 3
var tzMin, tzSec int
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec)
}
var isoYear int
if remainderIdx+3 <= len(str) && str[remainderIdx:remainderIdx+3] == " BC" {
isoYear = 1 - year
remainderIdx += 3
} else {
isoYear = year
}
if remainderIdx < len(str) {
return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:])
}
t := time.Date(isoYear, time.Month(month), day,
hour, minute, second, nanoSec,
globalLocationCache.getLocation(tzOff))
if currentLocation != nil {
// Set the location of the returned Time based on the session's
// TimeZone value, but only if the local time zone database agrees with
// the remote database on the offset.
lt := t.In(currentLocation)
_, newOff := lt.Zone()
if newOff == tzOff {
t = lt
}
}
return t, p.err
}
// formatTs formats t into a format postgres understands.
func formatTs(t time.Time) []byte {
if infinityTsEnabled {
// t <= -infinity : ! (t > -infinity)
if !t.After(infinityTsNegative) {
return []byte("-infinity")
}
// t >= infinity : ! (!t < infinity)
if !t.Before(infinityTsPositive) {
return []byte("infinity")
}
}
return FormatTimestamp(t)
}
// FormatTimestamp formats t into Postgres' text format for timestamps.
func FormatTimestamp(t time.Time) []byte {
// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
// minus sign preferred by Go.
// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
bc := false
if t.Year() <= 0 {
// flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
t = t.AddDate((-t.Year())*2+1, 0, 0)
bc = true
}
b := []byte(t.Format(time.RFC3339Nano))
_, offset := t.Zone()
offset = offset % 60
if offset != 0 {
// RFC3339Nano already printed the minus sign
if offset < 0 {
offset = -offset
}
b = append(b, ':')
if offset < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(offset), 10)
}
if bc {
b = append(b, " BC"...)
}
return b
}
// Parse a bytea value received from the server. Both "hex" and the legacy
// "escape" format are supported.
func parseBytea(s []byte) (result []byte) {
if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) {
// bytea_output = hex
s = s[2:] // trim off leading "\\x"
result = make([]byte, hex.DecodedLen(len(s)))
_, err := hex.Decode(result, s)
if err != nil {
errorf("%s", err)
}
} else {
// bytea_output = escape
for len(s) > 0 {
if s[0] == '\\' {
// escaped '\\'
if len(s) >= 2 && s[1] == '\\' {
result = append(result, '\\')
s = s[2:]
continue
}
// '\\' followed by an octal number
if len(s) < 4 {
errorf("invalid bytea sequence %v", s)
}
r, err := strconv.ParseInt(string(s[1:4]), 8, 9)
if err != nil {
errorf("could not parse bytea value: %s", err.Error())
}
result = append(result, byte(r))
s = s[4:]
} else {
// We hit an unescaped, raw byte. Try to read in as many as
// possible in one go.
i := bytes.IndexByte(s, '\\')
if i == -1 {
result = append(result, s...)
break
}
result = append(result, s[:i]...)
s = s[i:]
}
}
}
return result
}
func encodeBytea(serverVersion int, v []byte) (result []byte) {
if serverVersion >= 90000 {
// Use the hex format if we know that the server supports it
result = make([]byte, 2+hex.EncodedLen(len(v)))
result[0] = '\\'
result[1] = 'x'
hex.Encode(result[2:], v)
} else {
// .. or resort to "escape"
for _, b := range v {
if b == '\\' {
result = append(result, '\\', '\\')
} else if b < 0x20 || b > 0x7e {
result = append(result, []byte(fmt.Sprintf("\\%03o", b))...)
} else {
result = append(result, b)
}
}
}
return result
}
// NullTime represents a time.Time that may be null. NullTime implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString.
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
// Scan implements the Scanner interface.
func (nt *NullTime) Scan(value interface{}) error {
nt.Time, nt.Valid = value.(time.Time)
return nil
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}
| vendor/github.com/lib/pq/encode.go | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0013818100560456514,
0.00019891974807251245,
0.00016350866644643247,
0.00016994918405544013,
0.0001626902521820739
] |
{
"id": 14,
"code_window": [
"\n",
" ```javascript\n",
" {\n",
" \"data\": {\n",
" \"username\": \"root-aefa635a-18\",\n",
" \"password\": \"132ae3ef-5a64-7499-351e-bfe59f3a2a21\"\n",
" }\n",
" }\n",
" ```\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"username\": \"rolename-aefa635a-18\",\n"
],
"file_path": "website/source/docs/secrets/mysql/index.html.md",
"type": "replace",
"edit_start_line_idx": 381
} | ## 0.6.1 (Unreleased)
DEPRECATIONS/BREAKING CHANGES:
* Issued certificates from the `pki` backend against new roles created or
modified after upgrading will contain a set of default key usages.
* In the Go API, the function signature for `Request.ToHTTP()` has changed.
FEATURES:
* **Convergent Encryption in `Transit`**: The `transit` backend now supports a
convergent encryption mode where the same plaintext will produce the same
ciphertext. Although very useful in some situations, this has security
implications, which are mostly mitigated by requiring the use of key
derivation when convergent encryption is enabled. See [the `transit`
documentation](https://www.vaultproject.io/docs/secrets/transit/index.html)
for more details. [GH-1537]
* **Key Usage Control in `PKI`**: Issued certificates from roles created or
modified after upgrading contain a set of default key usages for increased
compatibility with OpenVPN and some other software. This set can be changed
when writing a role definition. Existing roles are unaffected. [GH-1552]
* **Request Retrying in the CLI and Go API**: Requests that fail with a `5xx`
error code will now retry after a backoff. The minimum and maximum backoff
times, as well as the maximum total number of retries (including disabling
this functionality) can be set with environment variables. See the
[environment variable
documentation](https://www.vaultproject.io/docs/commands/environment.html)
for more details. [GH-1594]
IMPROVEMENTS:
* cli: Output formatting in the presence of warnings in the response object
[GH-1533]
* cli: `vault auth` command supports a `-path` option to take in the path at
which the auth backend is enabled, thereby allowing authenticating against
different paths using the command options [GH-1532]
* cli: `vault auth -methods` will now display the config settings of the mount
[GH-1531]
* cli: `vault read/write/unwrap -field` now allows selecting token response
fields [GH-1567]
* cli: `vault write -field` now allows selecting wrapped response fields
[GH-1567]
* core: Response wrapping is now enabled for login endpoints [GH-1588]
* credential/aws-ec2: Added a new constraint, 'bound_account_id' to the role
[GH-1523]
* physical/etcd: Support `ETCD_ADDR` env var for specifying addresses [GH-1576]
* secret/aws: Listing of roles is supported now [GH-1546]
* secret/cassandra: Add `connect_timeout` value for Cassandra connection
configuration [GH-1581]
* secret/mssql,mysql,postgresql: Reading of connection settings is supported
in all the sql backends [GH-1515]
BUG FIXES:
* credential/aws-ec2: Added a nil check for stored whitelist identity object
during renewal [GH-1542]
* core: Fix regression causing status codes to be `400` in most non-5xx error
cases [GH-1553]
* secret/postgresql(,mysql,mssql): Fix incorrect use of database over
transaction object which could lead to connection exhaustion [GH-1572]
* physical/postgres: Remove use of prepared statements as this causes
connection multiplexing software to break [GH-1548]
## 0.6.0 (June 14th, 2016)
SECURITY:
* Although `sys/revoke-prefix` was intended to revoke prefixes of secrets (via
lease IDs, which incorporate path information) and
`auth/token/revoke-prefix` was intended to revoke prefixes of tokens (using
the tokens' paths and, since 0.5.2, role information), in implementation
they both behaved exactly the same way since a single component in Vault is
responsible for managing lifetimes of both, and the type of the tracked
lifetime was not being checked. The end result was that either endpoint
could revoke both secret leases and tokens. We consider this a very minor
security issue as there are a number of mitigating factors: both endpoints
require `sudo` capability in addition to write capability, preventing
blanket ACL path globs from providing access; both work by using the prefix
to revoke as a part of the endpoint path, allowing them to be properly
ACL'd; and both are intended for emergency scenarios and users should
already not generally have access to either one. In order to prevent
confusion, we have simply removed `auth/token/revoke-prefix` in 0.6, and
`sys/revoke-prefix` will be meant for both leases and tokens instead.
DEPRECATIONS/BREAKING CHANGES:
* `auth/token/revoke-prefix` has been removed. See the security notice for
details. [GH-1280]
* Vault will now automatically register itself as the `vault` service when
using the `consul` backend and will perform its own health checks. See
the Consul backend documentation for information on how to disable
auto-registration and service checks.
* List operations that do not find any keys now return a `404` status code
rather than an empty response object [GH-1365]
* CA certificates issued from the `pki` backend no longer have associated
leases, and any CA certs already issued will ignore revocation requests from
the lease manager. This is to prevent CA certificates from being revoked
when the token used to issue the certificate expires; it was not be obvious
to users that they need to ensure that the token lifetime needed to be at
least as long as a potentially very long-lived CA cert.
FEATURES:
* **AWS EC2 Auth Backend**: Provides a secure introduction mechanism for AWS
EC2 instances allowing automated retrieval of Vault tokens. Unlike most
Vault authentication backends, this backend does not require first deploying
or provisioning security-sensitive credentials (tokens, username/password,
client certificates, etc). Instead, it treats AWS as a Trusted Third Party
and uses the cryptographically signed dynamic metadata information that
uniquely represents each EC2 instance. [Vault
Enterprise](https://www.hashicorp.com/vault.html) customers have access to a
turnkey client that speaks the backend API and makes access to a Vault token
easy.
* **Response Wrapping**: Nearly any response within Vault can now be wrapped
inside a single-use, time-limited token's cubbyhole, taking the [Cubbyhole
Authentication
Principles](https://www.hashicorp.com/blog/vault-cubbyhole-principles.html)
mechanism to its logical conclusion. Retrieving the original response is as
simple as a single API command or the new `vault unwrap` command. This makes
secret distribution easier and more secure, including secure introduction.
* **Azure Physical Backend**: You can now use Azure blob object storage as
your Vault physical data store [GH-1266]
* **Swift Physical Backend**: You can now use Swift blob object storage as
your Vault physical data store [GH-1425]
* **Consul Backend Health Checks**: The Consul backend will automatically
register a `vault` service and perform its own health checking. By default
the active node can be found at `active.vault.service.consul` and all with
standby nodes are `standby.vault.service.consul`. Sealed vaults are marked
critical and are not listed by default in Consul's service discovery. See
the documentation for details. [GH-1349]
* **Explicit Maximum Token TTLs**: You can now set explicit maximum TTLs on
tokens that do not honor changes in the system- or mount-set values. This is
useful, for instance, when the max TTL of the system or the `auth/token`
mount must be set high to accommodate certain needs but you want more
granular restrictions on tokens being issued directly from the Token
authentication backend at `auth/token`. [GH-1399]
* **Non-Renewable Tokens**: When creating tokens directly through the token
authentication backend, you can now specify in both token store roles and
the API whether or not a token should be renewable, defaulting to `true`.
* **RabbitMQ Secret Backend**: Vault can now generate credentials for
RabbitMQ. Vhosts and tags can be defined within roles. [GH-788]
IMPROVEMENTS:
* audit: Add the DisplayName value to the copy of the Request object embedded
in the associated Response, to match the original Request object [GH-1387]
* audit: Enable auditing of the `seal` and `step-down` commands [GH-1435]
* backends: Remove most `root`/`sudo` paths in favor of normal ACL mechanisms.
A particular exception are any current MFA paths. A few paths in `token` and
`sys` also require `root` or `sudo`. [GH-1478]
* command/auth: Restore the previous authenticated token if the `auth` command
fails to authenticate the provided token [GH-1233]
* command/write: `-format` and `-field` can now be used with the `write`
command [GH-1228]
* core: Add `mlock` support for FreeBSD, OpenBSD, and Darwin [GH-1297]
* core: Don't keep lease timers around when tokens are revoked [GH-1277]
* core: If using the `disable_cache` option, caches for the policy store and
the `transit` backend are now disabled as well [GH-1346]
* credential/cert: Renewal requests are rejected if the set of policies has
changed since the token was issued [GH-477]
* credential/cert: Check CRLs for specific non-CA certs configured in the
backend [GH-1404]
* credential/ldap: If `groupdn` is not configured, skip searching LDAP and
only return policies for local groups, plus a warning [GH-1283]
* credential/ldap: `vault list` support for users and groups [GH-1270]
* credential/ldap: Support for the `memberOf` attribute for group membership
searching [GH-1245]
* credential/userpass: Add list support for users [GH-911]
* credential/userpass: Remove user configuration paths from requiring sudo, in
favor of normal ACL mechanisms [GH-1312]
* credential/token: Sanitize policies and add `default` policies in appropriate
places [GH-1235]
* credential/token: Setting the renewable status of a token is now possible
via `vault token-create` and the API. The default is true, but tokens can be
specified as non-renewable. [GH-1499]
* secret/aws: Use chain credentials to allow environment/EC2 instance/shared
providers [GH-307]
* secret/aws: Support for STS AssumeRole functionality [GH-1318]
* secret/consul: Reading consul access configuration supported. The response
will contain non-sensitive information only [GH-1445]
* secret/pki: Added `exclude_cn_from_sans` field to prevent adding the CN to
DNS or Email Subject Alternate Names [GH-1220]
* secret/pki: Added list support for certificates [GH-1466]
* sys/capabilities: Enforce ACL checks for requests that query the capabilities
of a token on a given path [GH-1221]
* sys/health: Status information can now be retrieved with `HEAD` [GH-1509]
BUG FIXES:
* command/read: Fix panic when using `-field` with a non-string value [GH-1308]
* command/token-lookup: Fix TTL showing as 0 depending on how a token was
created. This only affected the value shown at lookup, not the token
behavior itself. [GH-1306]
* command/various: Tell the JSON decoder to not convert all numbers to floats;
fixes some various places where numbers were showing up in scientific
notation
* command/server: Prioritized `devRootTokenID` and `devListenAddress` flags
over their respective env vars [GH-1480]
* command/ssh: Provided option to disable host key checking. The automated
variant of `vault ssh` command uses `sshpass` which was failing to handle
host key checking presented by the `ssh` binary. [GH-1473]
* core: Properly persist mount-tuned TTLs for auth backends [GH-1371]
* core: Don't accidentally crosswire SIGINT to the reload handler [GH-1372]
* credential/github: Make organization comparison case-insensitive during
login [GH-1359]
* credential/github: Fix panic when renewing a token created with some earlier
versions of Vault [GH-1510]
* credential/github: The token used to log in via `vault auth` can now be
specified in the `VAULT_AUTH_GITHUB_TOKEN` environment variable [GH-1511]
* credential/ldap: Fix problem where certain error conditions when configuring
or opening LDAP connections would cause a panic instead of return a useful
error message [GH-1262]
* credential/token: Fall back to normal parent-token semantics if
`allowed_policies` is empty for a role. Using `allowed_policies` of
`default` resulted in the same behavior anyways. [GH-1276]
* credential/token: Fix issues renewing tokens when using the "suffix"
capability of token roles [GH-1331]
* credential/token: Fix lookup via POST showing the request token instead of
the desired token [GH-1354]
* credential/various: Fix renewal conditions when `default` policy is not
contained in the backend config [GH-1256]
* physical/s3: Don't panic in certain error cases from bad S3 responses [GH-1353]
* secret/consul: Use non-pooled Consul API client to avoid leaving files open
[GH-1428]
* secret/pki: Don't check whether a certificate is destined to be a CA
certificate if sign-verbatim endpoint is used [GH-1250]
## 0.5.3 (May 27th, 2016)
SECURITY:
* Consul ACL Token Revocation: An issue was reported to us indicating that
generated Consul ACL tokens were not being properly revoked. Upon
investigation, we found that this behavior was reproducible in a specific
scenario: when a generated lease for a Consul ACL token had been renewed
prior to revocation. In this case, the generated token was not being
properly persisted internally through the renewal function, leading to an
error during revocation due to the missing token. Unfortunately, this was
coded as a user error rather than an internal error, and the revocation
logic was expecting internal errors if revocation failed. As a result, the
revocation logic believed the revocation to have succeeded when it in fact
failed, causing the lease to be dropped while the token was still valid
within Consul. In this release, the Consul backend properly persists the
token through renewals, and the revocation logic has been changed to
consider any error type to have been a failure to revoke, causing the lease
to persist and attempt to be revoked later.
We have written an example shell script that searches through Consul's ACL
tokens and looks for those generated by Vault, which can be used as a template
for a revocation script as deemed necessary for any particular security
response. The script is available at
https://gist.github.com/jefferai/6233c2963f9407a858d84f9c27d725c0
Please note that any outstanding leases for Consul tokens produced prior to
0.5.3 that have been renewed will continue to exhibit this behavior. As a
result, we recommend either revoking all tokens produced by the backend and
issuing new ones, or if needed, a more advanced variant of the provided example
could use the timestamp embedded in each generated token's name to decide which
tokens are too old and should be deleted. This could then be run periodically
up until the maximum lease time for any outstanding pre-0.5.3 tokens has
expired.
This is a security-only release. There are no other code changes since 0.5.2.
The binaries have one additional change: they are built against Go 1.6.1 rather
than Go 1.6, as Go 1.6.1 contains two security fixes to the Go programming
language itself.
## 0.5.2 (March 16th, 2016)
FEATURES:
* **MSSQL Backend**: Generate dynamic unique MSSQL database credentials based
on configured roles [GH-998]
* **Token Accessors**: Vault now provides an accessor with each issued token.
This accessor is an identifier that can be used for a limited set of
actions, notably for token revocation. This value can be logged in
plaintext to audit logs, and in combination with the plaintext metadata
logged to audit logs, provides a searchable and straightforward way to
revoke particular users' or services' tokens in many cases. To enable
plaintext audit logging of these accessors, set `hmac_accessor=false` when
enabling an audit backend.
* **Token Credential Backend Roles**: Roles can now be created in the `token`
credential backend that allow modifying token behavior in ways that are not
otherwise exposed or easily delegated. This allows creating tokens with a
fixed set (or subset) of policies (rather than a subset of the calling
token's), periodic tokens with a fixed TTL but no expiration, specified
prefixes, and orphans.
* **Listener Certificate Reloading**: Vault's configured listeners now reload
their TLS certificate and private key when the Vault process receives a
SIGHUP.
IMPROVEMENTS:
* auth/token: Endpoints optionally accept tokens from the HTTP body rather
than just from the URLs [GH-1211]
* auth/token,sys/capabilities: Added new endpoints
`auth/token/lookup-accessor`, `auth/token/revoke-accessor` and
`sys/capabilities-accessor`, which enables performing the respective actions
with just the accessor of the tokens, without having access to the actual
token [GH-1188]
* core: Ignore leading `/` in policy paths [GH-1170]
* core: Ignore leading `/` in mount paths [GH-1172]
* command/policy-write: Provided HCL is now validated for format violations
and provides helpful information around where the violation occurred
[GH-1200]
* command/server: The initial root token ID when running in `-dev` mode can
now be specified via `-dev-root-token-id` or the environment variable
`VAULT_DEV_ROOT_TOKEN_ID` [GH-1162]
* command/server: The listen address when running in `-dev` mode can now be
specified via `-dev-listen-address` or the environment variable
`VAULT_DEV_LISTEN_ADDRESS` [GH-1169]
* command/server: The configured listeners now reload their TLS
certificates/keys when Vault is SIGHUP'd [GH-1196]
* command/step-down: New `vault step-down` command and API endpoint to force
the targeted node to give up active status, but without sealing. The node
will wait ten seconds before attempting to grab the lock again. [GH-1146]
* command/token-renew: Allow no token to be passed in; use `renew-self` in
this case. Change the behavior for any token being passed in to use `renew`.
[GH-1150]
* credential/app-id: Allow `app-id` parameter to be given in the login path;
this causes the `app-id` to be part of the token path, making it easier to
use with `revoke-prefix` [GH-424]
* credential/cert: Non-CA certificates can be used for authentication. They
must be matched exactly (issuer and serial number) for authentication, and
the certificate must carry the client authentication or 'any' extended usage
attributes. [GH-1153]
* credential/cert: Subject and Authority key IDs are output in metadata; this
allows more flexible searching/revocation in the audit logs [GH-1183]
* credential/cert: Support listing configured certs [GH-1212]
* credential/userpass: Add support for `create`/`update` capability
distinction in user path, and add user-specific endpoints to allow changing
the password and policies [GH-1216]
* credential/token: Add roles [GH-1155]
* secret/mssql: Add MSSQL backend [GH-998]
* secret/pki: Add revocation time (zero or Unix epoch) to `pki/cert/SERIAL`
endpoint [GH-1180]
* secret/pki: Sanitize serial number in `pki/revoke` endpoint to allow some
other formats [GH-1187]
* secret/ssh: Added documentation for `ssh/config/zeroaddress` endpoint.
[GH-1154]
* sys: Added new endpoints `sys/capabilities` and `sys/capabilities-self` to
fetch the capabilities of a token on a given path [GH-1171]
* sys: Added `sys/revoke-force`, which enables a user to ignore backend errors
when revoking a lease, necessary in some emergency/failure scenarios
[GH-1168]
* sys: The return codes from `sys/health` can now be user-specified via query
parameters [GH-1199]
BUG FIXES:
* logical/cassandra: Apply hyphen/underscore replacement to the entire
generated username, not just the UUID, in order to handle token display name
hyphens [GH-1140]
* physical/etcd: Output actual error when cluster sync fails [GH-1141]
* vault/expiration: Not letting the error responses from the backends to skip
during renewals [GH-1176]
## 0.5.1 (February 25th, 2016)
DEPRECATIONS/BREAKING CHANGES:
* RSA keys less than 2048 bits are no longer supported in the PKI backend.
1024-bit keys are considered unsafe and are disallowed in the Internet PKI.
The `pki` backend has enforced SHA256 hashes in signatures from the
beginning, and software that can handle these hashes should be able to
handle larger key sizes. [GH-1095]
* The PKI backend now does not automatically delete expired certificates,
including from the CRL. Doing so could lead to a situation where a time
mismatch between the Vault server and clients could result in a certificate
that would not be considered expired by a client being removed from the CRL.
The new `pki/tidy` endpoint can be used to trigger expirations. [GH-1129]
* The `cert` backend now performs a variant of channel binding at renewal time
for increased security. In order to not overly burden clients, a notion of
identity is used. This functionality can be disabled. See the 0.5.1 upgrade
guide for more specific information [GH-1127]
FEATURES:
* **Codebase Audit**: Vault's 0.5 codebase was audited by iSEC. (The terms of
the audit contract do not allow us to make the results public.) [GH-220]
IMPROVEMENTS:
* api: The `VAULT_TLS_SERVER_NAME` environment variable can be used to control
the SNI header during TLS connections [GH-1131]
* api/health: Add the server's time in UTC to health responses [GH-1117]
* command/rekey and command/generate-root: These now return the status at
attempt initialization time, rather than requiring a separate fetch for the
nonce [GH-1054]
* credential/cert: Don't require root/sudo tokens for the `certs/` and `crls/`
paths; use normal ACL behavior instead [GH-468]
* credential/github: The validity of the token used for login will be checked
at renewal time [GH-1047]
* credential/github: The `config` endpoint no longer requires a root token;
normal ACL path matching applies
* deps: Use the standardized Go 1.6 vendoring system
* secret/aws: Inform users of AWS-imposed policy restrictions around STS
tokens if they attempt to use an invalid policy [GH-1113]
* secret/mysql: The MySQL backend now allows disabling verification of the
`connection_url` [GH-1096]
* secret/pki: Submitted CSRs are now verified to have the correct key type and
minimum number of bits according to the role. The exception is intermediate
CA signing and the `sign-verbatim` path [GH-1104]
* secret/pki: New `tidy` endpoint to allow expunging expired certificates.
[GH-1129]
* secret/postgresql: The PostgreSQL backend now allows disabling verification
of the `connection_url` [GH-1096]
* secret/ssh: When verifying an OTP, return 400 if it is not valid instead of
204 [GH-1086]
* credential/app-id: App ID backend will check the validity of app-id and user-id
during renewal time [GH-1039]
* credential/cert: TLS Certificates backend, during renewal, will now match the
client identity with the client identity used during login [GH-1127]
BUG FIXES:
* credential/ldap: Properly escape values being provided to search filters
[GH-1100]
* secret/aws: Capping on length of usernames for both IAM and STS types
[GH-1102]
* secret/pki: If a cert is not found during lookup of a serial number,
respond with a 400 rather than a 500 [GH-1085]
* secret/postgresql: Add extra revocation statements to better handle more
permission scenarios [GH-1053]
* secret/postgresql: Make connection_url work properly [GH-1112]
## 0.5.0 (February 10, 2016)
SECURITY:
* Previous versions of Vault could allow a malicious user to hijack the rekey
operation by canceling an operation in progress and starting a new one. The
practical application of this is very small. If the user was an unseal key
owner, they could attempt to do this in order to either receive unencrypted
reseal keys or to replace the PGP keys used for encryption with ones under
their control. However, since this would invalidate any rekey progress, they
would need other unseal key holders to resubmit, which would be rather
suspicious during this manual operation if they were not also the original
initiator of the rekey attempt. If the user was not an unseal key holder,
there is no benefit to be gained; the only outcome that could be attempted
would be a denial of service against a legitimate rekey operation by sending
cancel requests over and over. Thanks to Josh Snyder for the report!
DEPRECATIONS/BREAKING CHANGES:
* `s3` physical backend: Environment variables are now preferred over
configuration values. This makes it behave similar to the rest of Vault,
which, in increasing order of preference, uses values from the configuration
file, environment variables, and CLI flags. [GH-871]
* `etcd` physical backend: `sync` functionality is now supported and turned on
by default. This can be disabled. [GH-921]
* `transit`: If a client attempts to encrypt a value with a key that does not
yet exist, what happens now depends on the capabilities set in the client's
ACL policies. If the client has `create` (or `create` and `update`)
capability, the key will upsert as in the past. If the client has `update`
capability, they will receive an error. [GH-1012]
* `token-renew` CLI command: If the token given for renewal is the same as the
client token, the `renew-self` endpoint will be used in the API. Given that
the `default` policy (by default) allows all clients access to the
`renew-self` endpoint, this makes it much more likely that the intended
operation will be successful. [GH-894]
* Token `lookup`: the `ttl` value in the response now reflects the actual
remaining TTL rather than the original TTL specified when the token was
created; this value is now located in `creation_ttl` [GH-986]
* Vault no longer uses grace periods on leases or token TTLs. Uncertainty
about the length grace period for any given backend could cause confusion
and uncertainty. [GH-1002]
* `rekey`: Rekey now requires a nonce to be supplied with key shares. This
nonce is generated at the start of a rekey attempt and is unique for that
attempt.
* `status`: The exit code for the `status` CLI command is now `2` for an
uninitialized Vault instead of `1`. `1` is returned for errors. This better
matches the rest of the CLI.
FEATURES:
* **Split Data/High Availability Physical Backends**: You can now configure
two separate physical backends: one to be used for High Availability
coordination and another to be used for encrypted data storage. See the
[configuration
documentation](https://vaultproject.io/docs/config/index.html) for details.
[GH-395]
* **Fine-Grained Access Control**: Policies can now use the `capabilities` set
to specify fine-grained control over operations allowed on a path, including
separation of `sudo` privileges from other privileges. These can be mixed
and matched in any way desired. The `policy` value is kept for backwards
compatibility. See the [updated policy
documentation](https://vaultproject.io/docs/concepts/policies.html) for
details. [GH-914]
* **List Support**: Listing is now supported via the API and the new `vault
list` command. This currently supports listing keys in the `generic` and
`cubbyhole` backends and a few other places (noted in the IMPROVEMENTS
section below). Different parts of the API and backends will need to
implement list capabilities in ways that make sense to particular endpoints,
so further support will appear over time. [GH-617]
* **Root Token Generation via Unseal Keys**: You can now use the
`generate-root` CLI command to generate new orphaned, non-expiring root
tokens in case the original is lost or revoked (accidentally or
purposefully). This requires a quorum of unseal key holders. The output
value is protected via any PGP key of the initiator's choosing or a one-time
pad known only to the initiator (a suitable pad can be generated via the
`-genotp` flag to the command. [GH-915]
* **Unseal Key Archiving**: You can now optionally have Vault store your
unseal keys in your chosen physical store for disaster recovery purposes.
This option is only available when the keys are encrypted with PGP. [GH-907]
* **Keybase Support for PGP Encryption Keys**: You can now specify Keybase
users when passing in PGP keys to the `init`, `rekey`, and `generate-root`
CLI commands. Public keys for these users will be fetched automatically.
[GH-901]
* **DynamoDB HA Physical Backend**: There is now a new, community-supported
HA-enabled physical backend using Amazon DynamoDB. See the [configuration
documentation](https://vaultproject.io/docs/config/index.html) for details.
[GH-878]
* **PostgreSQL Physical Backend**: There is now a new, community-supported
physical backend using PostgreSQL. See the [configuration
documentation](https://vaultproject.io/docs/config/index.html) for details.
[GH-945]
* **STS Support in AWS Secret Backend**: You can now use the AWS secret
backend to fetch STS tokens rather than IAM users. [GH-927]
* **Speedups in the transit backend**: The `transit` backend has gained a
cache, and now loads only the working set of keys (e.g. from the
`min_decryption_version` to the current key version) into its working set.
This provides large speedups and potential memory savings when the `rotate`
feature of the backend is used heavily.
IMPROVEMENTS:
* cli: Output secrets sorted by key name [GH-830]
* cli: Support YAML as an output format [GH-832]
* cli: Show an error if the output format is incorrect, rather than falling
back to an empty table [GH-849]
* cli: Allow setting the `advertise_addr` for HA via the
`VAULT_ADVERTISE_ADDR` environment variable [GH-581]
* cli/generate-root: Add generate-root and associated functionality [GH-915]
* cli/init: Add `-check` flag that returns whether Vault is initialized
[GH-949]
* cli/server: Use internal functions for the token-helper rather than shelling
out, which fixes some problems with using a static binary in Docker or paths
with multiple spaces when launching in `-dev` mode [GH-850]
* cli/token-lookup: Add token-lookup command [GH-892]
* command/{init,rekey}: Allow ASCII-armored keychain files to be arguments for
`-pgp-keys` [GH-940]
* conf: Use normal bool values rather than empty/non-empty for the
`tls_disable` option [GH-802]
* credential/ldap: Add support for binding, both anonymously (to discover a
user DN) and via a username and password [GH-975]
* credential/token: Add `last_renewal_time` to token lookup calls [GH-896]
* credential/token: Change `ttl` to reflect the current remaining TTL; the
original value is in `creation_ttl` [GH-1007]
* helper/certutil: Add ability to parse PKCS#8 bundles [GH-829]
* logical/aws: You can now get STS tokens instead of IAM users [GH-927]
* logical/cassandra: Add `protocol_version` parameter to set the CQL proto
version [GH-1005]
* logical/cubbyhole: Add cubbyhole access to default policy [GH-936]
* logical/mysql: Add list support for roles path [GH-984]
* logical/pki: Fix up key usages being specified for CAs [GH-989]
* logical/pki: Add list support for roles path [GH-985]
* logical/pki: Allow `pem_bundle` to be specified as the format, which
provides a concatenated PEM bundle of returned values [GH-1008]
* logical/pki: Add 30 seconds of slack to the validity start period to
accommodate some clock skew in machines [GH-1036]
* logical/postgres: Add `max_idle_connections` parameter [GH-950]
* logical/postgres: Add list support for roles path
* logical/ssh: Add list support for roles path [GH-983]
* logical/transit: Keys are archived and only keys between the latest version
and `min_decryption_version` are loaded into the working set. This can
provide a very large speed increase when rotating keys very often. [GH-977]
* logical/transit: Keys are now cached, which should provide a large speedup
in most cases [GH-979]
* physical/cache: Use 2Q cache instead of straight LRU [GH-908]
* physical/etcd: Support basic auth [GH-859]
* physical/etcd: Support sync functionality and enable by default [GH-921]
BUG FIXES:
* api: Correct the HTTP verb used in the LookupSelf method [GH-887]
* api: Fix the output of `Sys().MountConfig(...)` to return proper values
[GH-1017]
* command/read: Fix panic when an empty argument was given [GH-923]
* command/ssh: Fix panic when username lookup fails [GH-886]
* core: When running in standalone mode, don't advertise that we are active
until post-unseal setup completes [GH-872]
* core: Update go-cleanhttp dependency to ensure idle connections aren't
leaked [GH-867]
* core: Don't allow tokens to have duplicate policies [GH-897]
* core: Fix regression in `sys/renew` that caused information stored in the
Secret part of the response to be lost [GH-912]
* physical: Use square brackets when setting an IPv6-based advertise address
as the auto-detected advertise address [GH-883]
* physical/s3: Use an initialized client when using IAM roles to fix a
regression introduced against newer versions of the AWS Go SDK [GH-836]
* secret/pki: Fix a condition where unmounting could fail if the CA
certificate was not properly loaded [GH-946]
* secret/ssh: Fix a problem where SSH connections were not always closed
properly [GH-942]
MISC:
* Clarified our stance on support for community-derived physical backends.
See the [configuration
documentation](https://vaultproject.io/docs/config/index.html) for details.
* Add `vault-java` to libraries [GH-851]
* Various minor documentation fixes and improvements [GH-839] [GH-854]
[GH-861] [GH-876] [GH-899] [GH-900] [GH-904] [GH-923] [GH-924] [GH-958]
[GH-959] [GH-981] [GH-990] [GH-1024] [GH-1025]
BUILD NOTE:
* The HashiCorp-provided binary release of Vault 0.5.0 is built against a
patched version of Go 1.5.3 containing two specific bug fixes affecting TLS
certificate handling. These fixes are in the Go 1.6 tree and were
cherry-picked on top of stock Go 1.5.3. If you want to examine the way in
which the releases were built, please look at our [cross-compilation
Dockerfile](https://github.com/hashicorp/vault/blob/v0.5.0/scripts/cross/Dockerfile-patched-1.5.3).
## 0.4.1 (January 13, 2016)
SECURITY:
* Build against Go 1.5.3 to mitigate a security vulnerability introduced in
Go 1.5. For more information, please see
https://groups.google.com/forum/#!topic/golang-dev/MEATuOi_ei4
This is a security-only release; other than the version number and building
against Go 1.5.3, there are no changes from 0.4.0.
## 0.4.0 (December 10, 2015)
DEPRECATIONS/BREAKING CHANGES:
* Policy Name Casing: Policy names are now normalized to lower-case on write,
helping prevent accidental case mismatches. For backwards compatibility,
policy names are not currently normalized when reading or deleting. [GH-676]
* Default etcd port number: the default connection string for the `etcd`
physical store uses port 2379 instead of port 4001, which is the port used
by the supported version 2.x of etcd. [GH-753]
* As noted below in the FEATURES section, if your Vault installation contains
a policy called `default`, new tokens created will inherit this policy
automatically.
* In the PKI backend there have been a few minor breaking changes:
* The token display name is no longer a valid option for providing a base
domain for issuance. Since this name is prepended with the name of the
authentication backend that issued it, it provided a faulty use-case at best
and a confusing experience at worst. We hope to figure out a better
per-token value in a future release.
* The `allowed_base_domain` parameter has been changed to `allowed_domains`,
which accepts a comma-separated list of domains. This allows issuing
certificates with DNS subjects across multiple domains. If you had a
configured `allowed_base_domain` parameter, it will be migrated
automatically when the role is read (either via a normal read, or via
issuing a certificate).
FEATURES:
* **Significantly Enhanced PKI Backend**: The `pki` backend can now generate
and sign root CA certificates and intermediate CA CSRs. It can also now sign
submitted client CSRs, as well as a significant number of other
enhancements. See the updated documentation for the full API. [GH-666]
* **CRL Checking for Certificate Authentication**: The `cert` backend now
supports pushing CRLs into the mount and using the contained serial numbers
for revocation checking. See the documentation for the `cert` backend for
more info. [GH-330]
* **Default Policy**: Vault now ensures that a policy named `default` is added
to every token. This policy cannot be deleted, but it can be modified
(including to an empty policy). There are three endpoints allowed in the
default `default` policy, related to token self-management: `lookup-self`,
which allows a token to retrieve its own information, and `revoke-self` and
`renew-self`, which are self-explanatory. If your existing Vault
installation contains a policy called `default`, it will not be overridden,
but it will be added to each new token created. You can override this
behavior when using manual token creation (i.e. not via an authentication
backend) by setting the "no_default_policy" flag to true. [GH-732]
IMPROVEMENTS:
* api: API client now uses a 60 second timeout instead of indefinite [GH-681]
* api: Implement LookupSelf, RenewSelf, and RevokeSelf functions for auth
tokens [GH-739]
* api: Standardize environment variable reading logic inside the API; the CLI
now uses this but can still override via command-line parameters [GH-618]
* audit: HMAC-SHA256'd client tokens are now stored with each request entry.
Previously they were only displayed at creation time; this allows much
better traceability of client actions. [GH-713]
* audit: There is now a `sys/audit-hash` endpoint that can be used to generate
an HMAC-SHA256'd value from provided data using the given audit backend's
salt [GH-784]
* core: The physical storage read cache can now be disabled via
"disable_cache" [GH-674]
* core: The unsealing process can now be reset midway through (this feature
was documented before, but not enabled) [GH-695]
* core: Tokens can now renew themselves [GH-455]
* core: Base64-encoded PGP keys can be used with the CLI for `init` and
`rekey` operations [GH-653]
* core: Print version on startup [GH-765]
* core: Access to `sys/policy` and `sys/mounts` now uses the normal ACL system
instead of requiring a root token [GH-769]
* credential/token: Display whether or not a token is an orphan in the output
of a lookup call [GH-766]
* logical: Allow `.` in path-based variables in many more locations [GH-244]
* logical: Responses now contain a "warnings" key containing a list of
warnings returned from the server. These are conditions that did not require
failing an operation, but of which the client should be aware. [GH-676]
* physical/(consul,etcd): Consul and etcd now use a connection pool to limit
the number of outstanding operations, improving behavior when a lot of
operations must happen at once [GH-677] [GH-780]
* physical/consul: The `datacenter` parameter was removed; It could not be
effective unless the Vault node (or the Consul node it was connecting to)
was in the datacenter specified, in which case it wasn't needed [GH-816]
* physical/etcd: Support TLS-encrypted connections and use a connection pool
to limit the number of outstanding operations [GH-780]
* physical/s3: The S3 endpoint can now be configured, allowing using
S3-API-compatible storage solutions [GH-750]
* physical/s3: The S3 bucket can now be configured with the `AWS_S3_BUCKET`
environment variable [GH-758]
* secret/consul: Management tokens can now be created [GH-714]
BUG FIXES:
* api: API client now checks for a 301 response for redirects. Vault doesn't
generate these, but in certain conditions Go's internal HTTP handler can
generate them, leading to client errors.
* cli: `token-create` now supports the `ttl` parameter in addition to the
deprecated `lease` parameter. [GH-688]
* core: Return data from `generic` backends on the last use of a limited-use
token [GH-615]
* core: Fix upgrade path for leases created in `generic` prior to 0.3 [GH-673]
* core: Stale leader entries will now be reaped [GH-679]
* core: Using `mount-tune` on the auth/token path did not take effect.
[GH-688]
* core: Fix a potential race condition when (un)sealing the vault with metrics
enabled [GH-694]
* core: Fix an error that could happen in some failure scenarios where Vault
could fail to revert to a clean state [GH-733]
* core: Ensure secondary indexes are removed when a lease is expired [GH-749]
* core: Ensure rollback manager uses an up-to-date mounts table [GH-771]
* everywhere: Don't use http.DefaultClient, as it shares state implicitly and
is a source of hard-to-track-down bugs [GH-700]
* credential/token: Allow creating orphan tokens via an API path [GH-748]
* secret/generic: Validate given duration at write time, not just read time;
if stored durations are not parseable, return a warning and the default
duration rather than an error [GH-718]
* secret/generic: Return 400 instead of 500 when `generic` backend is written
to with no data fields [GH-825]
* secret/postgresql: Revoke permissions before dropping a user or revocation
may fail [GH-699]
MISC:
* Various documentation fixes and improvements [GH-685] [GH-688] [GH-697]
[GH-710] [GH-715] [GH-831]
## 0.3.1 (October 6, 2015)
SECURITY:
* core: In certain failure scenarios, the full values of requests and
responses would be logged [GH-665]
FEATURES:
* **Settable Maximum Open Connections**: The `mysql` and `postgresql` backends
now allow setting the number of maximum open connections to the database,
which was previously capped to 2. [GH-661]
* **Renewable Tokens for GitHub**: The `github` backend now supports
specifying a TTL, enabling renewable tokens. [GH-664]
BUG FIXES:
* dist: linux-amd64 distribution was dynamically linked [GH-656]
* credential/github: Fix acceptance tests [GH-651]
MISC:
* Various minor documentation fixes and improvements [GH-649] [GH-650]
[GH-654] [GH-663]
## 0.3.0 (September 28, 2015)
DEPRECATIONS/BREAKING CHANGES:
Note: deprecations and breaking changes in upcoming releases are announced
ahead of time on the "vault-tool" mailing list.
* **Cookie Authentication Removed**: As of 0.3 the only way to authenticate is
via the X-Vault-Token header. Cookie authentication was hard to properly
test, could result in browsers/tools/applications saving tokens in plaintext
on disk, and other issues. [GH-564]
* **Terminology/Field Names**: Vault is transitioning from overloading the
term "lease" to mean both "a set of metadata" and "the amount of time the
metadata is valid". The latter is now being referred to as TTL (or
"lease_duration" for backwards-compatibility); some parts of Vault have
already switched to using "ttl" and others will follow in upcoming releases.
In particular, the "token", "generic", and "pki" backends accept both "ttl"
and "lease" but in 0.4 only "ttl" will be accepted. [GH-528]
* **Downgrade Not Supported**: Due to enhancements in the storage subsystem,
values written by Vault 0.3+ will not be able to be read by prior versions
of Vault. There are no expected upgrade issues, however, as with all
critical infrastructure it is recommended to back up Vault's physical
storage before upgrading.
FEATURES:
* **SSH Backend**: Vault can now be used to delegate SSH access to machines,
via a (recommended) One-Time Password approach or by issuing dynamic keys.
[GH-385]
* **Cubbyhole Backend**: This backend works similarly to the "generic" backend
but provides a per-token workspace. This enables some additional
authentication workflows (especially for containers) and can be useful to
applications to e.g. store local credentials while being restarted or
upgraded, rather than persisting to disk. [GH-612]
* **Transit Backend Improvements**: The transit backend now allows key
rotation and datakey generation. For rotation, data encrypted with previous
versions of the keys can still be decrypted, down to a (configurable)
minimum previous version; there is a rewrap function for manual upgrades of
ciphertext to newer versions. Additionally, the backend now allows
generating and returning high-entropy keys of a configurable bitsize
suitable for AES and other functions; this is returned wrapped by a named
key, or optionally both wrapped and plaintext for immediate use. [GH-626]
* **Global and Per-Mount Default/Max TTL Support**: You can now set the
default and maximum Time To Live for leases both globally and per-mount.
Per-mount settings override global settings. Not all backends honor these
settings yet, but the maximum is a hard limit enforced outside the backend.
See the documentation for "/sys/mounts/" for details on configuring
per-mount TTLs. [GH-469]
* **PGP Encryption for Unseal Keys**: When initializing or rotating Vault's
master key, PGP/GPG public keys can now be provided. The output keys will be
encrypted with the given keys, in order. [GH-570]
* **Duo Multifactor Authentication Support**: Backends that support MFA can
now use Duo as the mechanism. [GH-464]
* **Performance Improvements**: Users of the "generic" backend will see a
significant performance improvement as the backend no longer creates leases,
although it does return TTLs (global/mount default, or set per-item) as
before. [GH-631]
* **Codebase Audit**: Vault's codebase was audited by iSEC. (The terms of the
audit contract do not allow us to make the results public.) [GH-220]
IMPROVEMENTS:
* audit: Log entries now contain a time field [GH-495]
* audit: Obfuscated audit entries now use hmac-sha256 instead of sha1 [GH-627]
* backends: Add ability for a cleanup function to be called on backend unmount
[GH-608]
* config: Allow specifying minimum acceptable TLS version [GH-447]
* core: If trying to mount in a location that is already mounted, be more
helpful about the error [GH-510]
* core: Be more explicit on failure if the issue is invalid JSON [GH-553]
* core: Tokens can now revoke themselves [GH-620]
* credential/app-id: Give a more specific error when sending a duplicate POST
to sys/auth/app-id [GH-392]
* credential/github: Support custom API endpoints (e.g. for Github Enterprise)
[GH-572]
* credential/ldap: Add per-user policies and option to login with
userPrincipalName [GH-420]
* credential/token: Allow root tokens to specify the ID of a token being
created from CLI [GH-502]
* credential/userpass: Enable renewals for login tokens [GH-623]
* scripts: Use /usr/bin/env to find Bash instead of hardcoding [GH-446]
* scripts: Use godep for build scripts to use same environment as tests
[GH-404]
* secret/mysql: Allow reading configuration data [GH-529]
* secret/pki: Split "allow_any_name" logic to that and "enforce_hostnames", to
allow for non-hostname values (e.g. for client certificates) [GH-555]
* storage/consul: Allow specifying certificates used to talk to Consul
[GH-384]
* storage/mysql: Allow SSL encrypted connections [GH-439]
* storage/s3: Allow using temporary security credentials [GH-433]
* telemetry: Put telemetry object in configuration to allow more flexibility
[GH-419]
* testing: Disable mlock for testing of logical backends so as not to require
root [GH-479]
BUG FIXES:
* audit/file: Do not enable auditing if file permissions are invalid [GH-550]
* backends: Allow hyphens in endpoint patterns (fixes AWS and others) [GH-559]
* cli: Fixed missing setup of client TLS certificates if no custom CA was
provided
* cli/read: Do not include a carriage return when using raw field output
[GH-624]
* core: Bad input data could lead to a panic for that session, rather than
returning an error [GH-503]
* core: Allow SHA2-384/SHA2-512 hashed certificates [GH-448]
* core: Do not return a Secret if there are no uses left on a token (since it
will be unable to be used) [GH-615]
* core: Code paths that called lookup-self would decrement num_uses and
potentially immediately revoke a token [GH-552]
* core: Some /sys/ paths would not properly redirect from a standby to the
leader [GH-499] [GH-551]
* credential/aws: Translate spaces in a token's display name to avoid making
IAM unhappy [GH-567]
* credential/github: Integration failed if more than ten organizations or
teams [GH-489]
* credential/token: Tokens with sudo access to "auth/token/create" can now use
root-only options [GH-629]
* secret/cassandra: Work around backwards-incompatible change made in
Cassandra 2.2 preventing Vault from properly setting/revoking leases
[GH-549]
* secret/mysql: Use varbinary instead of varchar to avoid InnoDB/UTF-8 issues
[GH-522]
* secret/postgres: Explicitly set timezone in connections [GH-597]
* storage/etcd: Renew semaphore periodically to prevent leadership flapping
[GH-606]
* storage/zk: Fix collisions in storage that could lead to data unavailability
[GH-411]
MISC:
* Various documentation fixes and improvements [GH-412] [GH-474] [GH-476]
[GH-482] [GH-483] [GH-486] [GH-508] [GH-568] [GH-574] [GH-586] [GH-590]
[GH-591] [GH-592] [GH-595] [GH-613] [GH-637]
* Less "armon" in stack traces [GH-453]
* Sourcegraph integration [GH-456]
## 0.2.0 (July 13, 2015)
FEATURES:
* **Key Rotation Support**: The `rotate` command can be used to rotate the
master encryption key used to write data to the storage (physical) backend.
[GH-277]
* **Rekey Support**: Rekey can be used to rotate the master key and change the
configuration of the unseal keys (number of shares, threshold required).
[GH-277]
* **New secret backend: `pki`**: Enable Vault to be a certificate authority
and generate signed TLS certificates. [GH-310]
* **New secret backend: `cassandra`**: Generate dynamic credentials for
Cassandra [GH-363]
* **New storage backend: `etcd`**: store physical data in etcd [GH-259]
[GH-297]
* **New storage backend: `s3`**: store physical data in S3. Does not support
HA. [GH-242]
* **New storage backend: `MySQL`**: store physical data in MySQL. Does not
support HA. [GH-324]
* `transit` secret backend supports derived keys for per-transaction unique
keys [GH-399]
IMPROVEMENTS:
* cli/auth: Enable `cert` method [GH-380]
* cli/auth: read input from stdin [GH-250]
* cli/read: Ability to read a single field from a secret [GH-257]
* cli/write: Adding a force flag when no input required
* core: allow time duration format in place of seconds for some inputs
* core: audit log provides more useful information [GH-360]
* core: graceful shutdown for faster HA failover
* core: **change policy format** to use explicit globbing [GH-400] Any
existing policy in Vault is automatically upgraded to avoid issues. All
policy files must be updated for future writes. Adding the explicit glob
character `*` to the path specification is all that is required.
* core: policy merging to give deny highest precedence [GH-400]
* credential/app-id: Protect against timing attack on app-id
* credential/cert: Record the common name in the metadata [GH-342]
* credential/ldap: Allow TLS verification to be disabled [GH-372]
* credential/ldap: More flexible names allowed [GH-245] [GH-379] [GH-367]
* credential/userpass: Protect against timing attack on password
* credential/userpass: Use bcrypt for password matching
* http: response codes improved to reflect error [GH-366]
* http: the `sys/health` endpoint supports `?standbyok` to return 200 on
standby [GH-389]
* secret/app-id: Support deleting AppID and UserIDs [GH-200]
* secret/consul: Fine grained lease control [GH-261]
* secret/transit: Decouple raw key from key management endpoint [GH-355]
* secret/transit: Upsert named key when encrypt is used [GH-355]
* storage/zk: Support for HA configuration [GH-252]
* storage/zk: Changing node representation. **Backwards incompatible**.
[GH-416]
BUG FIXES:
* audit/file: file removing TLS connection state
* audit/syslog: fix removing TLS connection state
* command/*: commands accepting `k=v` allow blank values
* core: Allow building on FreeBSD [GH-365]
* core: Fixed various panics when audit logging enabled
* core: Lease renewal does not create redundant lease
* core: fixed leases with negative duration [GH-354]
* core: token renewal does not create child token
* core: fixing panic when lease increment is null [GH-408]
* credential/app-id: Salt the paths in storage backend to avoid information
leak
* credential/cert: Fixing client certificate not being requested
* credential/cert: Fixing panic when no certificate match found [GH-361]
* http: Accept PUT as POST for sys/auth
* http: Accept PUT as POST for sys/mounts [GH-349]
* http: Return 503 when sealed [GH-225]
* secret/postgres: Username length is capped to exceeding limit
* server: Do not panic if backend not configured [GH-222]
* server: Explicitly check value of tls_diable [GH-201]
* storage/zk: Fixed issues with version conflicts [GH-190]
MISC:
* cli/path-help: renamed from `help` to avoid confusion
## 0.1.2 (May 11, 2015)
FEATURES:
* **New physical backend: `zookeeper`**: store physical data in Zookeeper.
HA not supported yet.
* **New credential backend: `ldap`**: authenticate using LDAP credentials.
IMPROVEMENTS:
* core: Auth backends can store internal data about auth creds
* audit: display name for auth is shown in logs [GH-176]
* command/*: `-insecure` has been renamed to `-tls-skip-verify` [GH-130]
* command/*: `VAULT_TOKEN` overrides local stored auth [GH-162]
* command/server: environment variables are copy-pastable
* credential/app-id: hash of app and user ID are in metadata [GH-176]
* http: HTTP API accepts `X-Vault-Token` as auth header [GH-124]
* logical/*: Generate help output even if no synopsis specified
BUG FIXES:
* core: login endpoints should never return secrets
* core: Internal data should never be returned from core endpoints
* core: defer barrier initialization to as late as possible to avoid error
cases during init that corrupt data (no data loss)
* core: guard against invalid init config earlier
* audit/file: create file if it doesn't exist [GH-148]
* command/*: ignore directories when traversing CA paths [GH-181]
* credential/*: all policy mapping keys are case insensitive [GH-163]
* physical/consul: Fixing path for locking so HA works in every case
## 0.1.1 (May 2, 2015)
SECURITY CHANGES:
* physical/file: create the storge with 0600 permissions [GH-102]
* token/disk: write the token to disk with 0600 perms
IMPROVEMENTS:
* core: Very verbose error if mlock fails [GH-59]
* command/*: On error with TLS oversized record, show more human-friendly
error message. [GH-123]
* command/read: `lease_renewable` is now outputted along with the secret to
show whether it is renewable or not
* command/server: Add configuration option to disable mlock
* command/server: Disable mlock for dev mode so it works on more systems
BUG FIXES:
* core: if token helper isn't absolute, prepend with path to Vault
executable, not "vault" (which requires PATH) [GH-60]
* core: Any "mapping" routes allow hyphens in keys [GH-119]
* core: Validate `advertise_addr` is a valid URL with scheme [GH-106]
* command/auth: Using an invalid token won't crash [GH-75]
* credential/app-id: app and user IDs can have hyphens in keys [GH-119]
* helper/password: import proper DLL for Windows to ask password [GH-83]
## 0.1.0 (April 28, 2015)
* Initial release
| CHANGELOG.md | 0 | https://github.com/hashicorp/vault/commit/83635c16b6b62d7b43d3d1853a0cb189133f18a6 | [
0.0006153701106086373,
0.00017760634364094585,
0.00015936180716380477,
0.0001653840736253187,
0.00005612078894046135
] |
{
"id": 0,
"code_window": [
"### etcd server\n",
"\n",
"- Add [`etcd --log-format`](https://github.com/etcd-io/etcd/pull/13339) flag to support log format.\n",
"- Add [`etcd --experimental-max-learners`](https://github.com/etcd-io/etcd/pull/13377) flag to allow configuration of learner max membership.\n",
"- Add [`etcd --experimental-enable-lease-checkpoint-persist`](https://github.com/etcd-io/etcd/pull/13508) flag to handle upgrade from v3.5.2 clusters with this feature enabled.\n",
"- Fix [non mutating requests pass through quotaKVServer when NOSPACE](https://github.com/etcd-io/etcd/pull/13435)\n",
"- Fix [exclude the same alarm type activated by multiple peers](https://github.com/etcd-io/etcd/pull/13467).\n",
"- Fix [Provide a better liveness probe for when etcd runs as a Kubernetes pod](https://github.com/etcd-io/etcd/pull/13399)\n",
"- Fix [Lease checkpoints don't prevent to reset ttl on leader change](https://github.com/etcd-io/etcd/pull/13508).\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"- Add [`etcdctl make-mirror --rev`](https://github.com/etcd-io/etcd/pull/13519) flag to support incremental mirror.\n"
],
"file_path": "CHANGELOG-3.6.md",
"type": "add",
"edit_start_line_idx": 36
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"fmt"
"testing"
"time"
"go.etcd.io/etcd/tests/v3/framework/e2e"
)
func TestCtlV3MakeMirror(t *testing.T) { testCtl(t, makeMirrorTest) }
func TestCtlV3MakeMirrorModifyDestPrefix(t *testing.T) { testCtl(t, makeMirrorModifyDestPrefixTest) }
func TestCtlV3MakeMirrorNoDestPrefix(t *testing.T) { testCtl(t, makeMirrorNoDestPrefixTest) }
func makeMirrorTest(cx ctlCtx) {
var (
flags = []string{}
kvs = []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}}
kvs2 = []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}}
prefix = "key"
)
testMirrorCommand(cx, flags, kvs, kvs2, prefix, prefix)
}
func makeMirrorModifyDestPrefixTest(cx ctlCtx) {
var (
flags = []string{"--prefix", "o_", "--dest-prefix", "d_"}
kvs = []kv{{"o_key1", "val1"}, {"o_key2", "val2"}, {"o_key3", "val3"}}
kvs2 = []kvExec{{key: "d_key1", val: "val1"}, {key: "d_key2", val: "val2"}, {key: "d_key3", val: "val3"}}
srcprefix = "o_"
destprefix = "d_"
)
testMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)
}
func makeMirrorNoDestPrefixTest(cx ctlCtx) {
var (
flags = []string{"--prefix", "o_", "--no-dest-prefix"}
kvs = []kv{{"o_key1", "val1"}, {"o_key2", "val2"}, {"o_key3", "val3"}}
kvs2 = []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}}
srcprefix = "o_"
destprefix = "key"
)
testMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)
}
func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvExec, srcprefix, destprefix string) {
// set up another cluster to mirror with
mirrorcfg := e2e.NewConfigAutoTLS()
mirrorcfg.ClusterSize = 1
mirrorcfg.BasePort = 10000
mirrorctx := ctlCtx{
t: cx.t,
cfg: *mirrorcfg,
dialTimeout: 7 * time.Second,
}
mirrorepc, err := e2e.NewEtcdProcessCluster(cx.t, &mirrorctx.cfg)
if err != nil {
cx.t.Fatalf("could not start etcd process cluster (%v)", err)
}
mirrorctx.epc = mirrorepc
defer func() {
if err = mirrorctx.epc.Close(); err != nil {
cx.t.Fatalf("error closing etcd processes (%v)", err)
}
}()
cmdArgs := append(cx.PrefixArgs(), "make-mirror")
cmdArgs = append(cmdArgs, flags...)
cmdArgs = append(cmdArgs, fmt.Sprintf("localhost:%d", mirrorcfg.BasePort))
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
if err != nil {
cx.t.Fatal(err)
}
defer func() {
err = proc.Stop()
if err != nil {
cx.t.Fatal(err)
}
}()
for i := range sourcekvs {
if err = ctlV3Put(cx, sourcekvs[i].key, sourcekvs[i].val, ""); err != nil {
cx.t.Fatal(err)
}
}
if err = ctlV3Get(cx, []string{srcprefix, "--prefix"}, sourcekvs...); err != nil {
cx.t.Fatal(err)
}
if err = ctlV3Watch(mirrorctx, []string{destprefix, "--rev", "1", "--prefix"}, destkvs...); err != nil {
cx.t.Fatal(err)
}
}
| tests/e2e/ctl_v3_make_mirror_test.go | 1 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00022515621094498783,
0.0001763719046721235,
0.00016669012256897986,
0.00016839845920912921,
0.000016988469724310562
] |
{
"id": 0,
"code_window": [
"### etcd server\n",
"\n",
"- Add [`etcd --log-format`](https://github.com/etcd-io/etcd/pull/13339) flag to support log format.\n",
"- Add [`etcd --experimental-max-learners`](https://github.com/etcd-io/etcd/pull/13377) flag to allow configuration of learner max membership.\n",
"- Add [`etcd --experimental-enable-lease-checkpoint-persist`](https://github.com/etcd-io/etcd/pull/13508) flag to handle upgrade from v3.5.2 clusters with this feature enabled.\n",
"- Fix [non mutating requests pass through quotaKVServer when NOSPACE](https://github.com/etcd-io/etcd/pull/13435)\n",
"- Fix [exclude the same alarm type activated by multiple peers](https://github.com/etcd-io/etcd/pull/13467).\n",
"- Fix [Provide a better liveness probe for when etcd runs as a Kubernetes pod](https://github.com/etcd-io/etcd/pull/13399)\n",
"- Fix [Lease checkpoints don't prevent to reset ttl on leader change](https://github.com/etcd-io/etcd/pull/13508).\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"- Add [`etcdctl make-mirror --rev`](https://github.com/etcd-io/etcd/pull/13519) flag to support incremental mirror.\n"
],
"file_path": "CHANGELOG-3.6.md",
"type": "add",
"edit_start_line_idx": 36
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package report generates human-readable benchmark reports.
package report
| pkg/report/doc.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0001744534820318222,
0.00017303918139077723,
0.00017162488074973226,
0.00017303918139077723,
0.0000014143006410449743
] |
{
"id": 0,
"code_window": [
"### etcd server\n",
"\n",
"- Add [`etcd --log-format`](https://github.com/etcd-io/etcd/pull/13339) flag to support log format.\n",
"- Add [`etcd --experimental-max-learners`](https://github.com/etcd-io/etcd/pull/13377) flag to allow configuration of learner max membership.\n",
"- Add [`etcd --experimental-enable-lease-checkpoint-persist`](https://github.com/etcd-io/etcd/pull/13508) flag to handle upgrade from v3.5.2 clusters with this feature enabled.\n",
"- Fix [non mutating requests pass through quotaKVServer when NOSPACE](https://github.com/etcd-io/etcd/pull/13435)\n",
"- Fix [exclude the same alarm type activated by multiple peers](https://github.com/etcd-io/etcd/pull/13467).\n",
"- Fix [Provide a better liveness probe for when etcd runs as a Kubernetes pod](https://github.com/etcd-io/etcd/pull/13399)\n",
"- Fix [Lease checkpoints don't prevent to reset ttl on leader change](https://github.com/etcd-io/etcd/pull/13508).\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"- Add [`etcdctl make-mirror --rev`](https://github.com/etcd-io/etcd/pull/13519) flag to support incremental mirror.\n"
],
"file_path": "CHANGELOG-3.6.md",
"type": "add",
"edit_start_line_idx": 36
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package recipe
import (
"context"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/client/v3"
)
// WaitEvents waits on a key until it observes the given events and returns the final one.
func WaitEvents(c *clientv3.Client, key string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wc := c.Watch(ctx, key, clientv3.WithRev(rev))
if wc == nil {
return nil, ErrNoWatcher
}
return waitEvents(wc, evs), nil
}
func WaitPrefixEvents(c *clientv3.Client, prefix string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wc := c.Watch(ctx, prefix, clientv3.WithPrefix(), clientv3.WithRev(rev))
if wc == nil {
return nil, ErrNoWatcher
}
return waitEvents(wc, evs), nil
}
func waitEvents(wc clientv3.WatchChan, evs []mvccpb.Event_EventType) *clientv3.Event {
i := 0
for wresp := range wc {
for _, ev := range wresp.Events {
if ev.Type == evs[i] {
i++
if i == len(evs) {
return ev
}
}
}
}
return nil
}
| client/v3/experimental/recipes/watch.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00017445364210288972,
0.00016680173575878143,
0.00016314751701429486,
0.00016470527043566108,
0.000004164054189459421
] |
{
"id": 0,
"code_window": [
"### etcd server\n",
"\n",
"- Add [`etcd --log-format`](https://github.com/etcd-io/etcd/pull/13339) flag to support log format.\n",
"- Add [`etcd --experimental-max-learners`](https://github.com/etcd-io/etcd/pull/13377) flag to allow configuration of learner max membership.\n",
"- Add [`etcd --experimental-enable-lease-checkpoint-persist`](https://github.com/etcd-io/etcd/pull/13508) flag to handle upgrade from v3.5.2 clusters with this feature enabled.\n",
"- Fix [non mutating requests pass through quotaKVServer when NOSPACE](https://github.com/etcd-io/etcd/pull/13435)\n",
"- Fix [exclude the same alarm type activated by multiple peers](https://github.com/etcd-io/etcd/pull/13467).\n",
"- Fix [Provide a better liveness probe for when etcd runs as a Kubernetes pod](https://github.com/etcd-io/etcd/pull/13399)\n",
"- Fix [Lease checkpoints don't prevent to reset ttl on leader change](https://github.com/etcd-io/etcd/pull/13508).\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"- Add [`etcdctl make-mirror --rev`](https://github.com/etcd-io/etcd/pull/13519) flag to support incremental mirror.\n"
],
"file_path": "CHANGELOG-3.6.md",
"type": "add",
"edit_start_line_idx": 36
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raft
import (
"context"
"testing"
"time"
)
func BenchmarkOneNode(b *testing.B) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s := newTestMemoryStorage(withPeers(1))
rn := newTestRawNode(1, 10, 1, s)
n := newNode(rn)
go n.run()
defer n.Stop()
n.Campaign(ctx)
go func() {
for i := 0; i < b.N; i++ {
n.Propose(ctx, []byte("foo"))
}
}()
for {
rd := <-n.Ready()
s.Append(rd.Entries)
// a reasonable disk sync latency
time.Sleep(1 * time.Millisecond)
n.Advance()
if rd.HardState.Commit == uint64(b.N+1) {
return
}
}
}
| raft/node_bench_test.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00017474099877290428,
0.00016925629461184144,
0.00016648313612677157,
0.00016840279567986727,
0.0000028559252314153127
] |
{
"id": 1,
"code_window": [
"\tmmuser string\n",
"\tmmpassword string\n",
"\tmmnodestprefix bool\n",
")\n",
"\n",
"// NewMakeMirrorCommand returns the cobra command for \"makeMirror\".\n",
"func NewMakeMirrorCommand() *cobra.Command {\n",
"\tc := &cobra.Command{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tmmrev int64\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "add",
"edit_start_line_idx": 45
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"context"
"errors"
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/bgentry/speakeasy"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/mirror"
"github.com/spf13/cobra"
)
var (
mminsecureTr bool
mmcert string
mmkey string
mmcacert string
mmprefix string
mmdestprefix string
mmuser string
mmpassword string
mmnodestprefix bool
)
// NewMakeMirrorCommand returns the cobra command for "makeMirror".
func NewMakeMirrorCommand() *cobra.Command {
c := &cobra.Command{
Use: "make-mirror [options] <destination>",
Short: "Makes a mirror at the destination etcd cluster",
Run: makeMirrorCommandFunc,
}
c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror")
c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster")
c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster")
c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster")
c.Flags().StringVar(&mmkey, "dest-key", "", "Identify secure client using this TLS key file")
c.Flags().StringVar(&mmcacert, "dest-cacert", "", "Verify certificates of TLS enabled secure servers using this CA bundle")
// TODO: secure by default when etcd enables secure gRPC by default.
c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "Disable transport security for client connections")
c.Flags().StringVar(&mmuser, "dest-user", "", "Destination username[:password] for authentication (prompt if password is not supplied)")
c.Flags().StringVar(&mmpassword, "dest-password", "", "Destination password for authentication (if this option is used, --user option shouldn't include password)")
return c
}
func authDestCfg() *authCfg {
if mmuser == "" {
return nil
}
var cfg authCfg
if mmpassword == "" {
splitted := strings.SplitN(mmuser, ":", 2)
if len(splitted) < 2 {
var err error
cfg.username = mmuser
cfg.password, err = speakeasy.Ask("Destination Password: ")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
} else {
cfg.username = splitted[0]
cfg.password = splitted[1]
}
} else {
cfg.username = mmuser
cfg.password = mmpassword
}
return &cfg
}
func makeMirrorCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("make-mirror takes one destination argument"))
}
dialTimeout := dialTimeoutFromCmd(cmd)
keepAliveTime := keepAliveTimeFromCmd(cmd)
keepAliveTimeout := keepAliveTimeoutFromCmd(cmd)
sec := &secureCfg{
cert: mmcert,
key: mmkey,
cacert: mmcacert,
insecureTransport: mminsecureTr,
}
auth := authDestCfg()
cc := &clientConfig{
endpoints: []string{args[0]},
dialTimeout: dialTimeout,
keepAliveTime: keepAliveTime,
keepAliveTimeout: keepAliveTimeout,
scfg: sec,
acfg: auth,
}
dc := cc.mustClient()
c := mustClientFromCmd(cmd)
err := makeMirror(context.TODO(), c, dc)
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) error {
total := int64(0)
// if destination prefix is specified and remove destination prefix is true return error
if mmnodestprefix && len(mmdestprefix) > 0 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one"))
}
go func() {
for {
time.Sleep(30 * time.Second)
fmt.Println(atomic.LoadInt64(&total))
}
}()
s := mirror.NewSyncer(c, mmprefix, 0)
rc, errc := s.SyncBase(ctx)
// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix
if !mmnodestprefix && len(mmdestprefix) == 0 {
mmdestprefix = mmprefix
}
for r := range rc {
for _, kv := range r.Kvs {
_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))
if err != nil {
return err
}
atomic.AddInt64(&total, 1)
}
}
err := <-errc
if err != nil {
return err
}
wc := s.SyncUpdates(ctx)
for wr := range wc {
if wr.CompactRevision != 0 {
return rpctypes.ErrCompacted
}
var lastRev int64
ops := []clientv3.Op{}
for _, ev := range wr.Events {
nextRev := ev.Kv.ModRevision
if lastRev != 0 && nextRev > lastRev {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
ops = []clientv3.Op{}
}
lastRev = nextRev
switch ev.Type {
case mvccpb.PUT:
ops = append(ops, clientv3.OpPut(modifyPrefix(string(ev.Kv.Key)), string(ev.Kv.Value)))
atomic.AddInt64(&total, 1)
case mvccpb.DELETE:
ops = append(ops, clientv3.OpDelete(modifyPrefix(string(ev.Kv.Key))))
atomic.AddInt64(&total, 1)
default:
panic("unexpected event type")
}
}
if len(ops) != 0 {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
}
}
return nil
}
func modifyPrefix(key string) string {
return strings.Replace(key, mmprefix, mmdestprefix, 1)
}
| etcdctl/ctlv3/command/make_mirror_command.go | 1 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.9976723790168762,
0.39289379119873047,
0.00016821222379803658,
0.007657320238649845,
0.45148777961730957
] |
{
"id": 1,
"code_window": [
"\tmmuser string\n",
"\tmmpassword string\n",
"\tmmnodestprefix bool\n",
")\n",
"\n",
"// NewMakeMirrorCommand returns the cobra command for \"makeMirror\".\n",
"func NewMakeMirrorCommand() *cobra.Command {\n",
"\tc := &cobra.Command{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tmmrev int64\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "add",
"edit_start_line_idx": 45
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v2store
import "github.com/prometheus/client_golang/prometheus"
// Set of raw Prometheus metrics.
// Labels
// * action = declared in event.go
// * outcome = Outcome
// Do not increment directly, use Report* methods.
var (
readCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "etcd_debugging",
Subsystem: "store",
Name: "reads_total",
Help: "Total number of reads action by (get/getRecursive), local to this member.",
}, []string{"action"})
writeCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "etcd_debugging",
Subsystem: "store",
Name: "writes_total",
Help: "Total number of writes (e.g. set/compareAndDelete) seen by this member.",
}, []string{"action"})
readFailedCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "etcd_debugging",
Subsystem: "store",
Name: "reads_failed_total",
Help: "Failed read actions by (get/getRecursive), local to this member.",
}, []string{"action"})
writeFailedCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "etcd_debugging",
Subsystem: "store",
Name: "writes_failed_total",
Help: "Failed write actions (e.g. set/compareAndDelete), seen by this member.",
}, []string{"action"})
expireCounter = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "etcd_debugging",
Subsystem: "store",
Name: "expires_total",
Help: "Total number of expired keys.",
})
watchRequests = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "etcd_debugging",
Subsystem: "store",
Name: "watch_requests_total",
Help: "Total number of incoming watch requests (new or reestablished).",
})
watcherCount = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: "etcd_debugging",
Subsystem: "store",
Name: "watchers",
Help: "Count of currently active watchers.",
})
)
const (
GetRecursive = "getRecursive"
)
func init() {
if prometheus.Register(readCounter) != nil {
// Tests will try to double register since the tests use both
// store and store_test packages; ignore second attempts.
return
}
prometheus.MustRegister(writeCounter)
prometheus.MustRegister(expireCounter)
prometheus.MustRegister(watchRequests)
prometheus.MustRegister(watcherCount)
}
func reportReadSuccess(readAction string) {
readCounter.WithLabelValues(readAction).Inc()
}
func reportReadFailure(readAction string) {
readCounter.WithLabelValues(readAction).Inc()
readFailedCounter.WithLabelValues(readAction).Inc()
}
func reportWriteSuccess(writeAction string) {
writeCounter.WithLabelValues(writeAction).Inc()
}
func reportWriteFailure(writeAction string) {
writeCounter.WithLabelValues(writeAction).Inc()
writeFailedCounter.WithLabelValues(writeAction).Inc()
}
func reportExpiredKey() {
expireCounter.Inc()
}
func reportWatchRequest() {
watchRequests.Inc()
}
func reportWatcherAdded() {
watcherCount.Inc()
}
func reportWatcherRemoved() {
watcherCount.Dec()
}
| server/etcdserver/api/v2store/metrics.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00018018083937931806,
0.0001719981519272551,
0.000165125573403202,
0.00017226161435246468,
0.000003837195436062757
] |
{
"id": 1,
"code_window": [
"\tmmuser string\n",
"\tmmpassword string\n",
"\tmmnodestprefix bool\n",
")\n",
"\n",
"// NewMakeMirrorCommand returns the cobra command for \"makeMirror\".\n",
"func NewMakeMirrorCommand() *cobra.Command {\n",
"\tc := &cobra.Command{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tmmrev int64\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "add",
"edit_start_line_idx": 45
} | // Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdutl
import (
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func GetLogger() *zap.Logger {
config := zap.NewProductionConfig()
config.Encoding = "console"
config.EncoderConfig.EncodeTime = zapcore.RFC3339TimeEncoder
lg, err := config.Build()
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
}
return lg
}
| etcdutl/etcdutl/common.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0008997215772978961,
0.000355810479959473,
0.0001709775679046288,
0.00017627139459364116,
0.000314045581035316
] |
{
"id": 1,
"code_window": [
"\tmmuser string\n",
"\tmmpassword string\n",
"\tmmnodestprefix bool\n",
")\n",
"\n",
"// NewMakeMirrorCommand returns the cobra command for \"makeMirror\".\n",
"func NewMakeMirrorCommand() *cobra.Command {\n",
"\tc := &cobra.Command{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tmmrev int64\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "add",
"edit_start_line_idx": 45
} | #!/usr/bin/env python3
import sys
import os
import argparse
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
logging.basicConfig(format='[%(levelname)s %(asctime)s %(name)s] %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
params = None
def parse_args():
parser = argparse.ArgumentParser(
description='plot graph using mixed read/write result file.')
parser.add_argument('input_file_a', type=str,
help='first input data files in csv format. (required)')
parser.add_argument('input_file_b', type=str, nargs='?',
help='second input data files in csv format. (optional)')
parser.add_argument('-t', '--title', dest='title', type=str, required=True,
help='plot graph title string')
parser.add_argument('-z', '--zero-centered', dest='zero', action='store_true', required=False,
help='plot the improvement graph with white color represents 0.0',
default=True)
parser.add_argument('--no-zero-centered', dest='zero', action='store_false', required=False,
help='plot the improvement graph without white color represents 0.0')
parser.add_argument('-o', '--output-image-file', dest='output', type=str, required=True,
help='output image filename')
parser.add_argument('-F', '--output-format', dest='format', type=str, default='png',
help='output image file format. default: jpg')
return parser.parse_args()
def load_data_files(*args):
df_list = []
try:
for i in args:
if i is not None:
logger.debug('loading csv file {}'.format(i))
df_list.append(pd.read_csv(i))
except FileNotFoundError as e:
logger.error(str(e))
sys.exit(1)
res = []
try:
for df in df_list:
param_df = df[df['type'] == 'PARAM']
param_str = ''
if len(param_df) != 0:
param_str = param_df['comment'].iloc[0]
new_df = df[df['type'] == 'DATA'][[
'ratio', 'conn_size', 'value_size']].copy()
cols = [x for x in df.columns if x.find('iter') != -1]
tmp = [df[df['type'] == 'DATA'][x].str.split(':') for x in cols]
read_df = [x.apply(lambda x: float(x[0])) for x in tmp]
read_avg = sum(read_df) / len(read_df)
new_df['read'] = read_avg
write_df = [x.apply(lambda x: float(x[1])) for x in tmp]
write_avg = sum(write_df) / len(write_df)
new_df['write'] = write_avg
new_df['ratio'] = new_df['ratio'].astype(float)
new_df['conn_size'] = new_df['conn_size'].astype(int)
new_df['value_size'] = new_df['value_size'].astype(int)
res.append({
'dataframe': new_df,
'param': param_str
})
except Exception as e:
logger.error(str(e))
sys.exit(1)
return res
# This is copied directly from matplotlib source code. Some early versions of matplotlib
# do not have CenteredNorm class
class CenteredNorm(colors.Normalize):
def __init__(self, vcenter=0, halfrange=None, clip=False):
"""
Normalize symmetrical data around a center (0 by default).
Unlike `TwoSlopeNorm`, `CenteredNorm` applies an equal rate of change
around the center.
Useful when mapping symmetrical data around a conceptual center
e.g., data that range from -2 to 4, with 0 as the midpoint, and
with equal rates of change around that midpoint.
Parameters
----------
vcenter : float, default: 0
The data value that defines ``0.5`` in the normalization.
halfrange : float, optional
The range of data values that defines a range of ``0.5`` in the
normalization, so that *vcenter* - *halfrange* is ``0.0`` and
*vcenter* + *halfrange* is ``1.0`` in the normalization.
Defaults to the largest absolute difference to *vcenter* for
the values in the dataset.
Examples
--------
This maps data values -2 to 0.25, 0 to 0.5, and 4 to 1.0
(assuming equal rates of change above and below 0.0):
>>> import matplotlib.colors as mcolors
>>> norm = mcolors.CenteredNorm(halfrange=4.0)
>>> data = [-2., 0., 4.]
>>> norm(data)
array([0.25, 0.5 , 1. ])
"""
self._vcenter = vcenter
self.vmin = None
self.vmax = None
# calling the halfrange setter to set vmin and vmax
self.halfrange = halfrange
self.clip = clip
def _set_vmin_vmax(self):
"""
Set *vmin* and *vmax* based on *vcenter* and *halfrange*.
"""
self.vmax = self._vcenter + self._halfrange
self.vmin = self._vcenter - self._halfrange
def autoscale(self, A):
"""
Set *halfrange* to ``max(abs(A-vcenter))``, then set *vmin* and *vmax*.
"""
A = np.asanyarray(A)
self._halfrange = max(self._vcenter-A.min(),
A.max()-self._vcenter)
self._set_vmin_vmax()
def autoscale_None(self, A):
"""Set *vmin* and *vmax*."""
A = np.asanyarray(A)
if self._halfrange is None and A.size:
self.autoscale(A)
@property
def vcenter(self):
return self._vcenter
@vcenter.setter
def vcenter(self, vcenter):
self._vcenter = vcenter
if self.vmax is not None:
# recompute halfrange assuming vmin and vmax represent
# min and max of data
self._halfrange = max(self._vcenter-self.vmin,
self.vmax-self._vcenter)
self._set_vmin_vmax()
@property
def halfrange(self):
return self._halfrange
@halfrange.setter
def halfrange(self, halfrange):
if halfrange is None:
self._halfrange = None
self.vmin = None
self.vmax = None
else:
self._halfrange = abs(halfrange)
def __call__(self, value, clip=None):
if self._halfrange is not None:
# enforce symmetry, reset vmin and vmax
self._set_vmin_vmax()
return super().__call__(value, clip=clip)
# plot type is the type of the data to plot. Either 'read' or 'write'
def plot_data(title, plot_type, cmap_name_default, *args):
if len(args) == 1:
fig_size = (12, 16)
df0 = args[0]['dataframe']
df0param = args[0]['param']
fig = plt.figure(figsize=fig_size)
count = 0
for val, df in df0.groupby('ratio'):
count += 1
plt.subplot(4, 2, count)
plt.tripcolor(df['conn_size'], df['value_size'], df[plot_type])
plt.title('R/W Ratio {:.4f} [{:.2f}, {:.2f}]'.format(val, df[plot_type].min(),
df[plot_type].max()))
plt.yscale('log', base=2)
plt.ylabel('Value Size')
plt.xscale('log', base=2)
plt.xlabel('Connections Amount')
plt.colorbar()
plt.tight_layout()
fig.suptitle('{} [{}]\n{}'.format(title, plot_type.upper(), df0param))
elif len(args) == 2:
fig_size = (12, 26)
df0 = args[0]['dataframe']
df0param = args[0]['param']
df1 = args[1]['dataframe']
df1param = args[1]['param']
fig = plt.figure(figsize=fig_size)
col = 0
delta_df = df1.copy()
delta_df[[plot_type]] = ((df1[[plot_type]] - df0[[plot_type]]) /
df0[[plot_type]]) * 100
for tmp in [df0, df1, delta_df]:
row = 0
for val, df in tmp.groupby('ratio'):
pos = row * 3 + col + 1
plt.subplot(8, 3, pos)
norm = None
if col == 2:
cmap_name = 'bwr'
if params.zero:
norm = CenteredNorm()
else:
cmap_name = cmap_name_default
plt.tripcolor(df['conn_size'], df['value_size'], df[plot_type],
norm=norm,
cmap=plt.get_cmap(cmap_name))
if row == 0:
if col == 0:
plt.title('{}\nR/W Ratio {:.4f} [{:.1f}, {:.1f}]'.format(
os.path.basename(params.input_file_a),
val, df[plot_type].min(), df[plot_type].max()))
elif col == 1:
plt.title('{}\nR/W Ratio {:.4f} [{:.1f}, {:.1f}]'.format(
os.path.basename(params.input_file_b),
val, df[plot_type].min(), df[plot_type].max()))
elif col == 2:
plt.title('Gain\nR/W Ratio {:.4f} [{:.2f}%, {:.2f}%]'.format(val, df[plot_type].min(),
df[plot_type].max()))
else:
if col == 2:
plt.title('R/W Ratio {:.4f} [{:.2f}%, {:.2f}%]'.format(val, df[plot_type].min(),
df[plot_type].max()))
else:
plt.title('R/W Ratio {:.4f} [{:.1f}, {:.1f}]'.format(val, df[plot_type].min(),
df[plot_type].max()))
plt.yscale('log', base=2)
plt.ylabel('Value Size')
plt.xscale('log', base=2)
plt.xlabel('Connections Amount')
if col == 2:
plt.colorbar(format='%.2f%%')
else:
plt.colorbar()
plt.tight_layout()
row += 1
col += 1
fig.suptitle('{} [{}]\n{} {}\n{} {}'.format(
title, plot_type.upper(), os.path.basename(params.input_file_a), df0param,
os.path.basename(params.input_file_b), df1param))
else:
raise Exception('invalid plot input data')
fig.subplots_adjust(top=0.93)
plt.savefig("{}_{}.{}".format(params.output, plot_type,
params.format), format=params.format)
def main():
global params
logging.basicConfig()
params = parse_args()
result = load_data_files(params.input_file_a, params.input_file_b)
for i in [('read', 'viridis'), ('write', 'plasma')]:
plot_type, cmap_name = i
plot_data(params.title, plot_type, cmap_name, *result)
if __name__ == '__main__':
main()
| tools/rw-heatmaps/plot_data.py | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0001804101630114019,
0.00017398092313669622,
0.00016996230988297611,
0.00017437333008274436,
0.0000026909751795756165
] |
{
"id": 2,
"code_window": [
"\t}\n",
"\n",
"\tc.Flags().StringVar(&mmprefix, \"prefix\", \"\", \"Key-value prefix to mirror\")\n",
"\tc.Flags().StringVar(&mmdestprefix, \"dest-prefix\", \"\", \"destination prefix to mirror a prefix to a different prefix in the destination cluster\")\n",
"\tc.Flags().BoolVar(&mmnodestprefix, \"no-dest-prefix\", false, \"mirror key-values to the root of the destination cluster\")\n",
"\tc.Flags().StringVar(&mmcert, \"dest-cert\", \"\", \"Identify secure client using this TLS certificate file for the destination cluster\")\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tc.Flags().Int64Var(&mmrev, \"rev\", 0, \"Specify the kv revision to start to mirror\")\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "add",
"edit_start_line_idx": 56
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"context"
"errors"
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/bgentry/speakeasy"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/mirror"
"github.com/spf13/cobra"
)
var (
mminsecureTr bool
mmcert string
mmkey string
mmcacert string
mmprefix string
mmdestprefix string
mmuser string
mmpassword string
mmnodestprefix bool
)
// NewMakeMirrorCommand returns the cobra command for "makeMirror".
func NewMakeMirrorCommand() *cobra.Command {
c := &cobra.Command{
Use: "make-mirror [options] <destination>",
Short: "Makes a mirror at the destination etcd cluster",
Run: makeMirrorCommandFunc,
}
c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror")
c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster")
c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster")
c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster")
c.Flags().StringVar(&mmkey, "dest-key", "", "Identify secure client using this TLS key file")
c.Flags().StringVar(&mmcacert, "dest-cacert", "", "Verify certificates of TLS enabled secure servers using this CA bundle")
// TODO: secure by default when etcd enables secure gRPC by default.
c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "Disable transport security for client connections")
c.Flags().StringVar(&mmuser, "dest-user", "", "Destination username[:password] for authentication (prompt if password is not supplied)")
c.Flags().StringVar(&mmpassword, "dest-password", "", "Destination password for authentication (if this option is used, --user option shouldn't include password)")
return c
}
func authDestCfg() *authCfg {
if mmuser == "" {
return nil
}
var cfg authCfg
if mmpassword == "" {
splitted := strings.SplitN(mmuser, ":", 2)
if len(splitted) < 2 {
var err error
cfg.username = mmuser
cfg.password, err = speakeasy.Ask("Destination Password: ")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
} else {
cfg.username = splitted[0]
cfg.password = splitted[1]
}
} else {
cfg.username = mmuser
cfg.password = mmpassword
}
return &cfg
}
func makeMirrorCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("make-mirror takes one destination argument"))
}
dialTimeout := dialTimeoutFromCmd(cmd)
keepAliveTime := keepAliveTimeFromCmd(cmd)
keepAliveTimeout := keepAliveTimeoutFromCmd(cmd)
sec := &secureCfg{
cert: mmcert,
key: mmkey,
cacert: mmcacert,
insecureTransport: mminsecureTr,
}
auth := authDestCfg()
cc := &clientConfig{
endpoints: []string{args[0]},
dialTimeout: dialTimeout,
keepAliveTime: keepAliveTime,
keepAliveTimeout: keepAliveTimeout,
scfg: sec,
acfg: auth,
}
dc := cc.mustClient()
c := mustClientFromCmd(cmd)
err := makeMirror(context.TODO(), c, dc)
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) error {
total := int64(0)
// if destination prefix is specified and remove destination prefix is true return error
if mmnodestprefix && len(mmdestprefix) > 0 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one"))
}
go func() {
for {
time.Sleep(30 * time.Second)
fmt.Println(atomic.LoadInt64(&total))
}
}()
s := mirror.NewSyncer(c, mmprefix, 0)
rc, errc := s.SyncBase(ctx)
// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix
if !mmnodestprefix && len(mmdestprefix) == 0 {
mmdestprefix = mmprefix
}
for r := range rc {
for _, kv := range r.Kvs {
_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))
if err != nil {
return err
}
atomic.AddInt64(&total, 1)
}
}
err := <-errc
if err != nil {
return err
}
wc := s.SyncUpdates(ctx)
for wr := range wc {
if wr.CompactRevision != 0 {
return rpctypes.ErrCompacted
}
var lastRev int64
ops := []clientv3.Op{}
for _, ev := range wr.Events {
nextRev := ev.Kv.ModRevision
if lastRev != 0 && nextRev > lastRev {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
ops = []clientv3.Op{}
}
lastRev = nextRev
switch ev.Type {
case mvccpb.PUT:
ops = append(ops, clientv3.OpPut(modifyPrefix(string(ev.Kv.Key)), string(ev.Kv.Value)))
atomic.AddInt64(&total, 1)
case mvccpb.DELETE:
ops = append(ops, clientv3.OpDelete(modifyPrefix(string(ev.Kv.Key))))
atomic.AddInt64(&total, 1)
default:
panic("unexpected event type")
}
}
if len(ops) != 0 {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
}
}
return nil
}
func modifyPrefix(key string) string {
return strings.Replace(key, mmprefix, mmdestprefix, 1)
}
| etcdctl/ctlv3/command/make_mirror_command.go | 1 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.7057971358299255,
0.03805333375930786,
0.00016619211237411946,
0.0012550558894872665,
0.14601550996303558
] |
{
"id": 2,
"code_window": [
"\t}\n",
"\n",
"\tc.Flags().StringVar(&mmprefix, \"prefix\", \"\", \"Key-value prefix to mirror\")\n",
"\tc.Flags().StringVar(&mmdestprefix, \"dest-prefix\", \"\", \"destination prefix to mirror a prefix to a different prefix in the destination cluster\")\n",
"\tc.Flags().BoolVar(&mmnodestprefix, \"no-dest-prefix\", false, \"mirror key-values to the root of the destination cluster\")\n",
"\tc.Flags().StringVar(&mmcert, \"dest-cert\", \"\", \"Identify secure client using this TLS certificate file for the destination cluster\")\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tc.Flags().Int64Var(&mmrev, \"rev\", 0, \"Specify the kv revision to start to mirror\")\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "add",
"edit_start_line_idx": 56
} | // Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package namespace
func prefixInterval(pfx string, key, end []byte) (pfxKey []byte, pfxEnd []byte) {
pfxKey = make([]byte, len(pfx)+len(key))
copy(pfxKey[copy(pfxKey, pfx):], key)
if len(end) == 1 && end[0] == 0 {
// the edge of the keyspace
pfxEnd = make([]byte, len(pfx))
copy(pfxEnd, pfx)
ok := false
for i := len(pfxEnd) - 1; i >= 0; i-- {
if pfxEnd[i]++; pfxEnd[i] != 0 {
ok = true
break
}
}
if !ok {
// 0xff..ff => 0x00
pfxEnd = []byte{0}
}
} else if len(end) >= 1 {
pfxEnd = make([]byte, len(pfx)+len(end))
copy(pfxEnd[copy(pfxEnd, pfx):], end)
}
return pfxKey, pfxEnd
}
| client/v3/namespace/util.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00017623892927076668,
0.00017049048619810492,
0.0001640310074435547,
0.00017126131569966674,
0.000004032230208395049
] |
{
"id": 2,
"code_window": [
"\t}\n",
"\n",
"\tc.Flags().StringVar(&mmprefix, \"prefix\", \"\", \"Key-value prefix to mirror\")\n",
"\tc.Flags().StringVar(&mmdestprefix, \"dest-prefix\", \"\", \"destination prefix to mirror a prefix to a different prefix in the destination cluster\")\n",
"\tc.Flags().BoolVar(&mmnodestprefix, \"no-dest-prefix\", false, \"mirror key-values to the root of the destination cluster\")\n",
"\tc.Flags().StringVar(&mmcert, \"dest-cert\", \"\", \"Identify secure client using this TLS certificate file for the destination cluster\")\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tc.Flags().Int64Var(&mmrev, \"rev\", 0, \"Specify the kv revision to start to mirror\")\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "add",
"edit_start_line_idx": 56
} | etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
etcd4: ./etcd --name m4 --data-dir /tmp/m4.data --listen-client-urls https://127.0.0.1:13791 --advertise-client-urls https://m4.etcd.local:13791 --listen-peer-urls https://127.0.0.1:13880 --initial-advertise-peer-urls=https://m1.etcd.local:13880 --initial-cluster-token tkn --discovery-srv=etcd.local --discovery-srv-name=c1 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
etcd5: ./etcd --name m5 --data-dir /tmp/m5.data --listen-client-urls https://127.0.0.1:23791 --advertise-client-urls https://m5.etcd.local:23791 --listen-peer-urls https://127.0.0.1:23880 --initial-advertise-peer-urls=https://m5.etcd.local:23880 --initial-cluster-token tkn --discovery-srv=etcd.local --discovery-srv-name=c1 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
etcd6: ./etcd --name m6 --data-dir /tmp/m6.data --listen-client-urls https://127.0.0.1:33791 --advertise-client-urls https://m6.etcd.local:33791 --listen-peer-urls https://127.0.0.1:33880 --initial-advertise-peer-urls=https://m6.etcd.local:33880 --initial-cluster-token tkn --discovery-srv=etcd.local --discovery-srv-name=c1 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr
| tests/manual/docker-dns-srv/certs/Procfile | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0016368655487895012,
0.0010762761812657118,
0.0005156867555342615,
0.0010762761812657118,
0.0005605894257314503
] |
{
"id": 2,
"code_window": [
"\t}\n",
"\n",
"\tc.Flags().StringVar(&mmprefix, \"prefix\", \"\", \"Key-value prefix to mirror\")\n",
"\tc.Flags().StringVar(&mmdestprefix, \"dest-prefix\", \"\", \"destination prefix to mirror a prefix to a different prefix in the destination cluster\")\n",
"\tc.Flags().BoolVar(&mmnodestprefix, \"no-dest-prefix\", false, \"mirror key-values to the root of the destination cluster\")\n",
"\tc.Flags().StringVar(&mmcert, \"dest-cert\", \"\", \"Identify secure client using this TLS certificate file for the destination cluster\")\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tc.Flags().Int64Var(&mmrev, \"rev\", 0, \"Specify the kv revision to start to mirror\")\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "add",
"edit_start_line_idx": 56
} | // Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tester
import (
"context"
"fmt"
"strings"
"time"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/tests/v3/functional/rpcpb"
"go.uber.org/zap"
)
type fetchSnapshotCaseQuorum struct {
desc string
rpcpbCase rpcpb.Case
injected map[int]struct{}
snapshotted int
}
func (c *fetchSnapshotCaseQuorum) Inject(clus *Cluster) error {
// 1. Assume node C is the current leader with most up-to-date data.
lead, err := clus.GetLeader()
if err != nil {
return err
}
c.snapshotted = lead
// 2. Download snapshot from node C, before destroying node A and B.
clus.lg.Info(
"save snapshot on leader node START",
zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint),
)
var resp *rpcpb.Response
resp, err = clus.sendOpWithResp(lead, rpcpb.Operation_SAVE_SNAPSHOT)
if resp == nil || (resp != nil && !resp.Success) || err != nil {
clus.lg.Info(
"save snapshot on leader node FAIL",
zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint),
zap.Error(err),
)
return err
}
clus.lg.Info(
"save snapshot on leader node SUCCESS",
zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint),
zap.String("member-name", resp.SnapshotInfo.MemberName),
zap.Strings("member-client-urls", resp.SnapshotInfo.MemberClientURLs),
zap.String("snapshot-path", resp.SnapshotInfo.SnapshotPath),
zap.String("snapshot-file-size", resp.SnapshotInfo.SnapshotFileSize),
zap.String("snapshot-total-size", resp.SnapshotInfo.SnapshotTotalSize),
zap.Int64("snapshot-total-key", resp.SnapshotInfo.SnapshotTotalKey),
zap.Int64("snapshot-hash", resp.SnapshotInfo.SnapshotHash),
zap.Int64("snapshot-revision", resp.SnapshotInfo.SnapshotRevision),
zap.String("took", resp.SnapshotInfo.Took),
zap.Error(err),
)
if err != nil {
return err
}
clus.Members[lead].SnapshotInfo = resp.SnapshotInfo
leaderc, err := clus.Members[lead].CreateEtcdClient()
if err != nil {
return err
}
defer leaderc.Close()
var mresp *clientv3.MemberListResponse
mresp, err = leaderc.MemberList(context.Background())
mss := []string{}
if err == nil && mresp != nil {
mss = describeMembers(mresp)
}
clus.lg.Info(
"member list before disastrous machine failure",
zap.String("request-to", clus.Members[lead].EtcdClientEndpoint),
zap.Strings("members", mss),
zap.Error(err),
)
if err != nil {
return err
}
// simulate real life; machine failures may happen
// after some time since last snapshot save
time.Sleep(time.Second)
// 3. Destroy node A and B, and make the whole cluster inoperable.
for {
c.injected = pickQuorum(len(clus.Members))
if _, ok := c.injected[lead]; !ok {
break
}
}
for idx := range c.injected {
clus.lg.Info(
"disastrous machine failure to quorum START",
zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint),
)
err = clus.sendOp(idx, rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA)
clus.lg.Info(
"disastrous machine failure to quorum END",
zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint),
zap.Error(err),
)
if err != nil {
return err
}
}
// 4. Now node C cannot operate either.
// 5. SIGTERM node C and remove its data directories.
clus.lg.Info(
"disastrous machine failure to old leader START",
zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint),
)
err = clus.sendOp(lead, rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA)
clus.lg.Info(
"disastrous machine failure to old leader END",
zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint),
zap.Error(err),
)
return err
}
func (c *fetchSnapshotCaseQuorum) Recover(clus *Cluster) error {
// 6. Restore a new seed member from node C's latest snapshot file.
oldlead := c.snapshotted
// configuration on restart from recovered snapshot
// seed member's configuration is all the same as previous one
// except initial cluster string is now a single-node cluster
clus.Members[oldlead].EtcdOnSnapshotRestore = clus.Members[oldlead].Etcd
clus.Members[oldlead].EtcdOnSnapshotRestore.InitialClusterState = "existing"
name := clus.Members[oldlead].Etcd.Name
initClus := []string{}
for _, u := range clus.Members[oldlead].Etcd.AdvertisePeerURLs {
initClus = append(initClus, fmt.Sprintf("%s=%s", name, u))
}
clus.Members[oldlead].EtcdOnSnapshotRestore.InitialCluster = strings.Join(initClus, ",")
clus.lg.Info(
"restore snapshot and restart from snapshot request START",
zap.String("target-endpoint", clus.Members[oldlead].EtcdClientEndpoint),
zap.Strings("initial-cluster", initClus),
)
err := clus.sendOp(oldlead, rpcpb.Operation_RESTORE_RESTART_FROM_SNAPSHOT)
clus.lg.Info(
"restore snapshot and restart from snapshot request END",
zap.String("target-endpoint", clus.Members[oldlead].EtcdClientEndpoint),
zap.Strings("initial-cluster", initClus),
zap.Error(err),
)
if err != nil {
return err
}
leaderc, err := clus.Members[oldlead].CreateEtcdClient()
if err != nil {
return err
}
defer leaderc.Close()
// 7. Add another member to establish 2-node cluster.
// 8. Add another member to establish 3-node cluster.
// 9. Add more if any.
idxs := make([]int, 0, len(c.injected))
for idx := range c.injected {
idxs = append(idxs, idx)
}
clus.lg.Info("member add START", zap.Int("members-to-add", len(idxs)))
for i, idx := range idxs {
clus.lg.Info(
"member add request SENT",
zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint),
zap.Strings("peer-urls", clus.Members[idx].Etcd.AdvertisePeerURLs),
)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
_, err := leaderc.MemberAdd(ctx, clus.Members[idx].Etcd.AdvertisePeerURLs)
cancel()
clus.lg.Info(
"member add request DONE",
zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint),
zap.Strings("peer-urls", clus.Members[idx].Etcd.AdvertisePeerURLs),
zap.Error(err),
)
if err != nil {
return err
}
// start the added(new) member with fresh data
clus.Members[idx].EtcdOnSnapshotRestore = clus.Members[idx].Etcd
clus.Members[idx].EtcdOnSnapshotRestore.InitialClusterState = "existing"
name := clus.Members[idx].Etcd.Name
for _, u := range clus.Members[idx].Etcd.AdvertisePeerURLs {
initClus = append(initClus, fmt.Sprintf("%s=%s", name, u))
}
clus.Members[idx].EtcdOnSnapshotRestore.InitialCluster = strings.Join(initClus, ",")
clus.lg.Info(
"restart from snapshot request SENT",
zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint),
zap.Strings("initial-cluster", initClus),
)
err = clus.sendOp(idx, rpcpb.Operation_RESTART_FROM_SNAPSHOT)
clus.lg.Info(
"restart from snapshot request DONE",
zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint),
zap.Strings("initial-cluster", initClus),
zap.Error(err),
)
if err != nil {
return err
}
if i != len(c.injected)-1 {
// wait until membership reconfiguration entry gets applied
// TODO: test concurrent member add
dur := 5 * clus.Members[idx].ElectionTimeout()
clus.lg.Info(
"waiting after restart from snapshot request",
zap.Int("i", i),
zap.Int("idx", idx),
zap.Duration("sleep", dur),
)
time.Sleep(dur)
} else {
clus.lg.Info(
"restart from snapshot request ALL END",
zap.Int("i", i),
zap.Int("idx", idx),
)
}
}
return nil
}
func (c *fetchSnapshotCaseQuorum) Desc() string {
if c.desc != "" {
return c.desc
}
return c.rpcpbCase.String()
}
func (c *fetchSnapshotCaseQuorum) TestCase() rpcpb.Case {
return c.rpcpbCase
}
func new_Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus *Cluster) Case {
c := &fetchSnapshotCaseQuorum{
rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH,
injected: make(map[int]struct{}),
snapshotted: -1,
}
// simulate real life; machine replacements may happen
// after some time since disaster
return &caseDelay{
Case: c,
delayDuration: clus.GetCaseDelayDuration(),
}
}
| tests/functional/tester/case_sigquit_remove_quorum.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0024288890417665243,
0.00025680262478999794,
0.00016385327035095543,
0.00016984099056571722,
0.00041861511999741197
] |
{
"id": 3,
"code_window": [
"\t\t\tfmt.Println(atomic.LoadInt64(&total))\n",
"\t\t}\n",
"\t}()\n",
"\n",
"\ts := mirror.NewSyncer(c, mmprefix, 0)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tstartRev := mmrev - 1\n",
"\tif startRev < 0 {\n",
"\t\tstartRev = 0\n",
"\t}\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 144
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"fmt"
"testing"
"time"
"go.etcd.io/etcd/tests/v3/framework/e2e"
)
func TestCtlV3MakeMirror(t *testing.T) { testCtl(t, makeMirrorTest) }
func TestCtlV3MakeMirrorModifyDestPrefix(t *testing.T) { testCtl(t, makeMirrorModifyDestPrefixTest) }
func TestCtlV3MakeMirrorNoDestPrefix(t *testing.T) { testCtl(t, makeMirrorNoDestPrefixTest) }
func makeMirrorTest(cx ctlCtx) {
var (
flags = []string{}
kvs = []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}}
kvs2 = []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}}
prefix = "key"
)
testMirrorCommand(cx, flags, kvs, kvs2, prefix, prefix)
}
func makeMirrorModifyDestPrefixTest(cx ctlCtx) {
var (
flags = []string{"--prefix", "o_", "--dest-prefix", "d_"}
kvs = []kv{{"o_key1", "val1"}, {"o_key2", "val2"}, {"o_key3", "val3"}}
kvs2 = []kvExec{{key: "d_key1", val: "val1"}, {key: "d_key2", val: "val2"}, {key: "d_key3", val: "val3"}}
srcprefix = "o_"
destprefix = "d_"
)
testMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)
}
func makeMirrorNoDestPrefixTest(cx ctlCtx) {
var (
flags = []string{"--prefix", "o_", "--no-dest-prefix"}
kvs = []kv{{"o_key1", "val1"}, {"o_key2", "val2"}, {"o_key3", "val3"}}
kvs2 = []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}}
srcprefix = "o_"
destprefix = "key"
)
testMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)
}
func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvExec, srcprefix, destprefix string) {
// set up another cluster to mirror with
mirrorcfg := e2e.NewConfigAutoTLS()
mirrorcfg.ClusterSize = 1
mirrorcfg.BasePort = 10000
mirrorctx := ctlCtx{
t: cx.t,
cfg: *mirrorcfg,
dialTimeout: 7 * time.Second,
}
mirrorepc, err := e2e.NewEtcdProcessCluster(cx.t, &mirrorctx.cfg)
if err != nil {
cx.t.Fatalf("could not start etcd process cluster (%v)", err)
}
mirrorctx.epc = mirrorepc
defer func() {
if err = mirrorctx.epc.Close(); err != nil {
cx.t.Fatalf("error closing etcd processes (%v)", err)
}
}()
cmdArgs := append(cx.PrefixArgs(), "make-mirror")
cmdArgs = append(cmdArgs, flags...)
cmdArgs = append(cmdArgs, fmt.Sprintf("localhost:%d", mirrorcfg.BasePort))
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
if err != nil {
cx.t.Fatal(err)
}
defer func() {
err = proc.Stop()
if err != nil {
cx.t.Fatal(err)
}
}()
for i := range sourcekvs {
if err = ctlV3Put(cx, sourcekvs[i].key, sourcekvs[i].val, ""); err != nil {
cx.t.Fatal(err)
}
}
if err = ctlV3Get(cx, []string{srcprefix, "--prefix"}, sourcekvs...); err != nil {
cx.t.Fatal(err)
}
if err = ctlV3Watch(mirrorctx, []string{destprefix, "--rev", "1", "--prefix"}, destkvs...); err != nil {
cx.t.Fatal(err)
}
}
| tests/e2e/ctl_v3_make_mirror_test.go | 1 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00034021877218037844,
0.0001965282135643065,
0.00016269311890937388,
0.00017092705820687115,
0.0000565259215363767
] |
{
"id": 3,
"code_window": [
"\t\t\tfmt.Println(atomic.LoadInt64(&total))\n",
"\t\t}\n",
"\t}()\n",
"\n",
"\ts := mirror.NewSyncer(c, mmprefix, 0)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tstartRev := mmrev - 1\n",
"\tif startRev < 0 {\n",
"\t\tstartRev = 0\n",
"\t}\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 144
} | // Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"os"
"path/filepath"
"sort"
)
// ReadDirOp represents an read-directory operation.
type ReadDirOp struct {
ext string
}
// ReadDirOption configures archiver operations.
type ReadDirOption func(*ReadDirOp)
// WithExt filters file names by their extensions.
// (e.g. WithExt(".wal") to list only WAL files)
func WithExt(ext string) ReadDirOption {
return func(op *ReadDirOp) { op.ext = ext }
}
func (op *ReadDirOp) applyOpts(opts []ReadDirOption) {
for _, opt := range opts {
opt(op)
}
}
// ReadDir returns the filenames in the given directory in sorted order.
func ReadDir(d string, opts ...ReadDirOption) ([]string, error) {
op := &ReadDirOp{}
op.applyOpts(opts)
dir, err := os.Open(d)
if err != nil {
return nil, err
}
defer dir.Close()
names, err := dir.Readdirnames(-1)
if err != nil {
return nil, err
}
sort.Strings(names)
if op.ext != "" {
tss := make([]string, 0)
for _, v := range names {
if filepath.Ext(v) == op.ext {
tss = append(tss, v)
}
}
names = tss
}
return names, nil
}
| client/pkg/fileutil/read_dir.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.5599523186683655,
0.07015020400285721,
0.0001686477626208216,
0.00017570397176314145,
0.18512779474258423
] |
{
"id": 3,
"code_window": [
"\t\t\tfmt.Println(atomic.LoadInt64(&total))\n",
"\t\t}\n",
"\t}()\n",
"\n",
"\ts := mirror.NewSyncer(c, mmprefix, 0)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tstartRev := mmrev - 1\n",
"\tif startRev < 0 {\n",
"\t\tstartRev = 0\n",
"\t}\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 144
} | {
"signing": {
"default": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
| tests/manual/docker-dns-srv/certs-gateway/gencert.json | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00017281790496781468,
0.0001714385871309787,
0.0001700592547422275,
0.0001714385871309787,
0.0000013793249991067569
] |
{
"id": 3,
"code_window": [
"\t\t\tfmt.Println(atomic.LoadInt64(&total))\n",
"\t\t}\n",
"\t}()\n",
"\n",
"\ts := mirror.NewSyncer(c, mmprefix, 0)\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tstartRev := mmrev - 1\n",
"\tif startRev < 0 {\n",
"\t\tstartRev = 0\n",
"\t}\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 144
} | // Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package flags
import (
"reflect"
"testing"
)
func TestNewUniqueURLsWithExceptions(t *testing.T) {
tests := []struct {
s string
exp map[string]struct{}
rs string
exception string
}{
{ // non-URL but allowed by exception
s: "*",
exp: map[string]struct{}{"*": {}},
rs: "*",
exception: "*",
},
{
s: "",
exp: map[string]struct{}{},
rs: "",
exception: "*",
},
{
s: "https://1.2.3.4:8080",
exp: map[string]struct{}{"https://1.2.3.4:8080": {}},
rs: "https://1.2.3.4:8080",
exception: "*",
},
{
s: "https://1.2.3.4:8080,https://1.2.3.4:8080",
exp: map[string]struct{}{"https://1.2.3.4:8080": {}},
rs: "https://1.2.3.4:8080",
exception: "*",
},
{
s: "http://10.1.1.1:80",
exp: map[string]struct{}{"http://10.1.1.1:80": {}},
rs: "http://10.1.1.1:80",
exception: "*",
},
{
s: "http://localhost:80",
exp: map[string]struct{}{"http://localhost:80": {}},
rs: "http://localhost:80",
exception: "*",
},
{
s: "http://:80",
exp: map[string]struct{}{"http://:80": {}},
rs: "http://:80",
exception: "*",
},
{
s: "https://localhost:5,https://localhost:3",
exp: map[string]struct{}{"https://localhost:3": {}, "https://localhost:5": {}},
rs: "https://localhost:3,https://localhost:5",
exception: "*",
},
{
s: "http://localhost:5,https://localhost:3",
exp: map[string]struct{}{"https://localhost:3": {}, "http://localhost:5": {}},
rs: "http://localhost:5,https://localhost:3",
exception: "*",
},
}
for i := range tests {
uv := NewUniqueURLsWithExceptions(tests[i].s, tests[i].exception)
if !reflect.DeepEqual(tests[i].exp, uv.Values) {
t.Fatalf("#%d: expected %+v, got %+v", i, tests[i].exp, uv.Values)
}
if uv.String() != tests[i].rs {
t.Fatalf("#%d: expected %q, got %q", i, tests[i].rs, uv.String())
}
}
}
| pkg/flags/unique_urls_test.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.015151036903262138,
0.0020250719971954823,
0.0001662493305047974,
0.00023027141287457198,
0.004417970776557922
] |
{
"id": 4,
"code_window": [
"\n",
"\trc, errc := s.SyncBase(ctx)\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep"
],
"after_edit": [
"\ts := mirror.NewSyncer(c, mmprefix, startRev)\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 146
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"context"
"errors"
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/bgentry/speakeasy"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/mirror"
"github.com/spf13/cobra"
)
var (
mminsecureTr bool
mmcert string
mmkey string
mmcacert string
mmprefix string
mmdestprefix string
mmuser string
mmpassword string
mmnodestprefix bool
)
// NewMakeMirrorCommand returns the cobra command for "makeMirror".
func NewMakeMirrorCommand() *cobra.Command {
c := &cobra.Command{
Use: "make-mirror [options] <destination>",
Short: "Makes a mirror at the destination etcd cluster",
Run: makeMirrorCommandFunc,
}
c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror")
c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster")
c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster")
c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster")
c.Flags().StringVar(&mmkey, "dest-key", "", "Identify secure client using this TLS key file")
c.Flags().StringVar(&mmcacert, "dest-cacert", "", "Verify certificates of TLS enabled secure servers using this CA bundle")
// TODO: secure by default when etcd enables secure gRPC by default.
c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "Disable transport security for client connections")
c.Flags().StringVar(&mmuser, "dest-user", "", "Destination username[:password] for authentication (prompt if password is not supplied)")
c.Flags().StringVar(&mmpassword, "dest-password", "", "Destination password for authentication (if this option is used, --user option shouldn't include password)")
return c
}
func authDestCfg() *authCfg {
if mmuser == "" {
return nil
}
var cfg authCfg
if mmpassword == "" {
splitted := strings.SplitN(mmuser, ":", 2)
if len(splitted) < 2 {
var err error
cfg.username = mmuser
cfg.password, err = speakeasy.Ask("Destination Password: ")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
} else {
cfg.username = splitted[0]
cfg.password = splitted[1]
}
} else {
cfg.username = mmuser
cfg.password = mmpassword
}
return &cfg
}
func makeMirrorCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("make-mirror takes one destination argument"))
}
dialTimeout := dialTimeoutFromCmd(cmd)
keepAliveTime := keepAliveTimeFromCmd(cmd)
keepAliveTimeout := keepAliveTimeoutFromCmd(cmd)
sec := &secureCfg{
cert: mmcert,
key: mmkey,
cacert: mmcacert,
insecureTransport: mminsecureTr,
}
auth := authDestCfg()
cc := &clientConfig{
endpoints: []string{args[0]},
dialTimeout: dialTimeout,
keepAliveTime: keepAliveTime,
keepAliveTimeout: keepAliveTimeout,
scfg: sec,
acfg: auth,
}
dc := cc.mustClient()
c := mustClientFromCmd(cmd)
err := makeMirror(context.TODO(), c, dc)
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) error {
total := int64(0)
// if destination prefix is specified and remove destination prefix is true return error
if mmnodestprefix && len(mmdestprefix) > 0 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one"))
}
go func() {
for {
time.Sleep(30 * time.Second)
fmt.Println(atomic.LoadInt64(&total))
}
}()
s := mirror.NewSyncer(c, mmprefix, 0)
rc, errc := s.SyncBase(ctx)
// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix
if !mmnodestprefix && len(mmdestprefix) == 0 {
mmdestprefix = mmprefix
}
for r := range rc {
for _, kv := range r.Kvs {
_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))
if err != nil {
return err
}
atomic.AddInt64(&total, 1)
}
}
err := <-errc
if err != nil {
return err
}
wc := s.SyncUpdates(ctx)
for wr := range wc {
if wr.CompactRevision != 0 {
return rpctypes.ErrCompacted
}
var lastRev int64
ops := []clientv3.Op{}
for _, ev := range wr.Events {
nextRev := ev.Kv.ModRevision
if lastRev != 0 && nextRev > lastRev {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
ops = []clientv3.Op{}
}
lastRev = nextRev
switch ev.Type {
case mvccpb.PUT:
ops = append(ops, clientv3.OpPut(modifyPrefix(string(ev.Kv.Key)), string(ev.Kv.Value)))
atomic.AddInt64(&total, 1)
case mvccpb.DELETE:
ops = append(ops, clientv3.OpDelete(modifyPrefix(string(ev.Kv.Key))))
atomic.AddInt64(&total, 1)
default:
panic("unexpected event type")
}
}
if len(ops) != 0 {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
}
}
return nil
}
func modifyPrefix(key string) string {
return strings.Replace(key, mmprefix, mmdestprefix, 1)
}
| etcdctl/ctlv3/command/make_mirror_command.go | 1 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.9991629123687744,
0.17757870256900787,
0.00016775219410192221,
0.00017234997358173132,
0.3766806423664093
] |
{
"id": 4,
"code_window": [
"\n",
"\trc, errc := s.SyncBase(ctx)\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep"
],
"after_edit": [
"\ts := mirror.NewSyncer(c, mmprefix, startRev)\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 146
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wait
import "sync"
type WaitTime interface {
// Wait returns a chan that waits on the given logical deadline.
// The chan will be triggered when Trigger is called with a
// deadline that is later than the one it is waiting for.
Wait(deadline uint64) <-chan struct{}
// Trigger triggers all the waiting chans with an earlier logical deadline.
Trigger(deadline uint64)
}
var closec chan struct{}
func init() { closec = make(chan struct{}); close(closec) }
type timeList struct {
l sync.Mutex
lastTriggerDeadline uint64
m map[uint64]chan struct{}
}
func NewTimeList() *timeList {
return &timeList{m: make(map[uint64]chan struct{})}
}
func (tl *timeList) Wait(deadline uint64) <-chan struct{} {
tl.l.Lock()
defer tl.l.Unlock()
if tl.lastTriggerDeadline >= deadline {
return closec
}
ch := tl.m[deadline]
if ch == nil {
ch = make(chan struct{})
tl.m[deadline] = ch
}
return ch
}
func (tl *timeList) Trigger(deadline uint64) {
tl.l.Lock()
defer tl.l.Unlock()
tl.lastTriggerDeadline = deadline
for t, ch := range tl.m {
if t <= deadline {
delete(tl.m, t)
close(ch)
}
}
}
| pkg/wait/wait_time.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0012793075293302536,
0.00032950585591606796,
0.0001694144739303738,
0.0001703988091321662,
0.00038776086876168847
] |
{
"id": 4,
"code_window": [
"\n",
"\trc, errc := s.SyncBase(ctx)\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep"
],
"after_edit": [
"\ts := mirror.NewSyncer(c, mmprefix, startRev)\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 146
} | ---
name: Distributors Application
title: Distributors Application for <YOUR DISTRIBUTION HERE>
about: Apply for membership of [email protected]
---
<!--
Please answer the following questions and provide supporting evidence for
meeting the membership criteria.
-->
**Actively monitored security email alias for our project:**
**1. Have a user base not limited to your own organization.**
**2. Have a publicly verifiable track record up to present day of fixing security issues.**
**3. Not be a downstream or rebuild of another distribution.**
**4. Be a participant and active contributor in the community.**
**5. Accept the Embargo Policy.**
<!-- https://github.com/etcd-io/etcd/blob/main/security/security-release-process.md#disclosures -->
**6. Be willing to contribute back.**
<!-- Per https://github.com/etcd-io/etcd/blob/main/security/security-release-process.md#patch-release-and-public-communication -->
**7. Have someone already on the list vouch for the person requesting membership on behalf of your distribution.**
| .github/ISSUE_TEMPLATE/distributors-application.md | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00017290904361288995,
0.00017103098798543215,
0.00016974075697362423,
0.00017044319247361273,
0.0000013585890883405227
] |
{
"id": 4,
"code_window": [
"\n",
"\trc, errc := s.SyncBase(ctx)\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep"
],
"after_edit": [
"\ts := mirror.NewSyncer(c, mmprefix, startRev)\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 146
} | -----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEA78eZhOmcFetbIu0CtfF/iupSUlq/KxPtJeGG72y4f2wnT25z
807IivhTeBQPX7q+9y/0b0A8TvVxvfaSiISjxsYoreg0ifay9uEaSu7SaYJ5yina
8uhPuv2b1VwT9y7XScl95KXcPk0or5cIudAnhEAOWq4Dl58gzRiDlgvIjKXoO896
RNA36QxICzIxhPyP/WB0Qjoiv/JEHIV6lX1h6QCxWpH/fkDzaTaxCD41eyD42hhF
HgxvTCND2GwZeOTZCiItJ3pVfzWsvZMj0jyn754CVNnU6rBklV1vg5WaOziQYUSx
Iofglhn6LRsxc11nUEbw/1ZXmsWGFAypMh1cdQIDAQABAoIBAQCnpgkisyuc78fy
7YAdslKY0Cjqx+QtvGrtN3he4sdE4FvD39hWX9k7wVCq/muZZTqsHe1r85+3HUl/
pmzh4suX6Wj73wUNCV4r20vE5KJdfwqkXQtnFyLX/QX98blL9IY2YxkQyx7ouI4f
5xwEvxNCFn9yy4RbeLk4bVFjka2RF/x6qEUCHq5Q74vWvyC1i3kGKgYruM39RQw3
D5fG8xdUexBc32nfzynP+0NcFAiy+yUQWOLcE4i8XaegFvg+QvWOx1iwjqU3FDeC
JzKrtw9SLBWf7AGraxA59K4WJ63xqGqFugWcFaYh923X8zES/s0wrtV2T14Lgj3Q
aWJ0DfQBAoGBAPNd1Aph0Z9PwZ1zivJprj8XDnFP2XIqlODYuiVddjbvIV13d6F/
PE/ViW0MVduF0ejkPs9+hSxYOH58EWIt44Li/Nre1U42ny+fJrY1P5Mq5nriM4L4
lx2YFaWzAoxzpMbbQ14kEMcQSicziDbBx62aaQYu4UwrvqXYdSYp+D+BAoGBAPw6
Gtv6hytg19GtH6sQG9/4K4cLGX4lJE3pTL3eUicfoEI+hviZdG8FS77Uv05ga6fQ
OlyqvpmmXp6fgTrSlHBeKO75A3sT7fP1v1foq1y+CdMGytOnJENUc80bN0L1dFI1
zwYm7eLDP0KdUYpf+Rpgcap4StQbotpc6oy705b1AoGBAO9z26VXd+ybifKE9Cru
Zp727aP6IAaf9RqCxCztl9oXUanoWVISoeIfRfeA0p2LPu06Xr7ESv5F01hIdMY4
RonLE2W7KP+q6NfvbSSMogAIjvxLwslUFUPuFyaRSqmtQ2zR4qgnLkbfNUb7AkR2
SCT9L+cAi3bp98ywfRvO4c6BAoGANkAJJudry1i5EtA5z4FXfYTTV+h7QzaZ6GgV
qYD4CpIy1gy82xumf3qUICeCPkle3mlbJDNVa5btIxELqqtAYiregwfsR7yxoZdp
4G6a7Qey9UCwv3Vjx1eS0LrZ1/0TV9ta++fDotJ7+Mf9kdWyromv6QqWjaikDnON
v1dm20ECgYEA6i+uvBuomUzqJYjUTNgMtmfYjuXv8D58wLkY10si7T2ayAlFKBG2
Dno/dojOcixO1dtaq7dA+KNXtESekjT1Y4VleGHWpEglRScE629iow4ASrluP/Ec
F2DvTRW4daFDWQV4je1u0+wDj5B8KZjO/e759BztiRyRqTCzpxTa8Ms=
-----END RSA PRIVATE KEY-----
| tests/fixtures/server-serverusage.key.insecure | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.005004902835935354,
0.002092151204124093,
0.0001706231851130724,
0.0011009281734004617,
0.002094350755214691
] |
{
"id": 5,
"code_window": [
"\n",
"\t// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix\n",
"\tif !mmnodestprefix && len(mmdestprefix) == 0 {\n",
"\t\tmmdestprefix = mmprefix\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t// If a rev is provided, then do not sync the whole key space.\n",
"\t// Instead, just start watching the key space starting from the rev\n",
"\tif startRev == 0 {\n",
"\t\trc, errc := s.SyncBase(ctx)\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 148
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"context"
"errors"
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/bgentry/speakeasy"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/mirror"
"github.com/spf13/cobra"
)
var (
mminsecureTr bool
mmcert string
mmkey string
mmcacert string
mmprefix string
mmdestprefix string
mmuser string
mmpassword string
mmnodestprefix bool
)
// NewMakeMirrorCommand returns the cobra command for "makeMirror".
func NewMakeMirrorCommand() *cobra.Command {
c := &cobra.Command{
Use: "make-mirror [options] <destination>",
Short: "Makes a mirror at the destination etcd cluster",
Run: makeMirrorCommandFunc,
}
c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror")
c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster")
c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster")
c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster")
c.Flags().StringVar(&mmkey, "dest-key", "", "Identify secure client using this TLS key file")
c.Flags().StringVar(&mmcacert, "dest-cacert", "", "Verify certificates of TLS enabled secure servers using this CA bundle")
// TODO: secure by default when etcd enables secure gRPC by default.
c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "Disable transport security for client connections")
c.Flags().StringVar(&mmuser, "dest-user", "", "Destination username[:password] for authentication (prompt if password is not supplied)")
c.Flags().StringVar(&mmpassword, "dest-password", "", "Destination password for authentication (if this option is used, --user option shouldn't include password)")
return c
}
func authDestCfg() *authCfg {
if mmuser == "" {
return nil
}
var cfg authCfg
if mmpassword == "" {
splitted := strings.SplitN(mmuser, ":", 2)
if len(splitted) < 2 {
var err error
cfg.username = mmuser
cfg.password, err = speakeasy.Ask("Destination Password: ")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
} else {
cfg.username = splitted[0]
cfg.password = splitted[1]
}
} else {
cfg.username = mmuser
cfg.password = mmpassword
}
return &cfg
}
func makeMirrorCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("make-mirror takes one destination argument"))
}
dialTimeout := dialTimeoutFromCmd(cmd)
keepAliveTime := keepAliveTimeFromCmd(cmd)
keepAliveTimeout := keepAliveTimeoutFromCmd(cmd)
sec := &secureCfg{
cert: mmcert,
key: mmkey,
cacert: mmcacert,
insecureTransport: mminsecureTr,
}
auth := authDestCfg()
cc := &clientConfig{
endpoints: []string{args[0]},
dialTimeout: dialTimeout,
keepAliveTime: keepAliveTime,
keepAliveTimeout: keepAliveTimeout,
scfg: sec,
acfg: auth,
}
dc := cc.mustClient()
c := mustClientFromCmd(cmd)
err := makeMirror(context.TODO(), c, dc)
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) error {
total := int64(0)
// if destination prefix is specified and remove destination prefix is true return error
if mmnodestprefix && len(mmdestprefix) > 0 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one"))
}
go func() {
for {
time.Sleep(30 * time.Second)
fmt.Println(atomic.LoadInt64(&total))
}
}()
s := mirror.NewSyncer(c, mmprefix, 0)
rc, errc := s.SyncBase(ctx)
// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix
if !mmnodestprefix && len(mmdestprefix) == 0 {
mmdestprefix = mmprefix
}
for r := range rc {
for _, kv := range r.Kvs {
_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))
if err != nil {
return err
}
atomic.AddInt64(&total, 1)
}
}
err := <-errc
if err != nil {
return err
}
wc := s.SyncUpdates(ctx)
for wr := range wc {
if wr.CompactRevision != 0 {
return rpctypes.ErrCompacted
}
var lastRev int64
ops := []clientv3.Op{}
for _, ev := range wr.Events {
nextRev := ev.Kv.ModRevision
if lastRev != 0 && nextRev > lastRev {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
ops = []clientv3.Op{}
}
lastRev = nextRev
switch ev.Type {
case mvccpb.PUT:
ops = append(ops, clientv3.OpPut(modifyPrefix(string(ev.Kv.Key)), string(ev.Kv.Value)))
atomic.AddInt64(&total, 1)
case mvccpb.DELETE:
ops = append(ops, clientv3.OpDelete(modifyPrefix(string(ev.Kv.Key))))
atomic.AddInt64(&total, 1)
default:
panic("unexpected event type")
}
}
if len(ops) != 0 {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
}
}
return nil
}
func modifyPrefix(key string) string {
return strings.Replace(key, mmprefix, mmdestprefix, 1)
}
| etcdctl/ctlv3/command/make_mirror_command.go | 1 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.9989712238311768,
0.2648296356201172,
0.0001688128395471722,
0.0009111773688346148,
0.4319523870944977
] |
{
"id": 5,
"code_window": [
"\n",
"\t// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix\n",
"\tif !mmnodestprefix && len(mmdestprefix) == 0 {\n",
"\t\tmmdestprefix = mmprefix\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t// If a rev is provided, then do not sync the whole key space.\n",
"\t// Instead, just start watching the key space starting from the rev\n",
"\tif startRev == 0 {\n",
"\t\trc, errc := s.SyncBase(ctx)\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 148
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v2http
import (
"math"
"net/http"
"strings"
"time"
"go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2auth"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes"
"go.uber.org/zap"
)
const (
// time to wait for a Watch request
defaultWatchTimeout = time.Duration(math.MaxInt64)
)
func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) {
if err == nil {
return
}
if e, ok := err.(v2auth.Error); ok {
herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error())
if et := herr.WriteTo(w); et != nil {
if lg != nil {
lg.Debug(
"failed to write v2 HTTP error",
zap.String("remote-addr", r.RemoteAddr),
zap.String("v2auth-error", e.Error()),
zap.Error(et),
)
}
}
return
}
etcdhttp.WriteError(lg, w, r, err)
}
// allowMethod verifies that the given method is one of the allowed methods,
// and if not, it writes an error to w. A boolean is returned indicating
// whether or not the method is allowed.
func allowMethod(w http.ResponseWriter, m string, ms ...string) bool {
for _, meth := range ms {
if m == meth {
return true
}
}
w.Header().Set("Allow", strings.Join(ms, ","))
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return false
}
func requestLogger(lg *zap.Logger, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if lg != nil {
lg.Debug(
"handling HTTP request",
zap.String("method", r.Method),
zap.String("request-uri", r.RequestURI),
zap.String("remote-addr", r.RemoteAddr),
)
}
handler.ServeHTTP(w, r)
})
}
| server/etcdserver/api/v2http/http.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00017944342107512057,
0.00017311875126324594,
0.00016704668814782053,
0.00017441173258703202,
0.000004567516043607611
] |
{
"id": 5,
"code_window": [
"\n",
"\t// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix\n",
"\tif !mmnodestprefix && len(mmdestprefix) == 0 {\n",
"\t\tmmdestprefix = mmprefix\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t// If a rev is provided, then do not sync the whole key space.\n",
"\t// Instead, just start watching the key space starting from the rev\n",
"\tif startRev == 0 {\n",
"\t\trc, errc := s.SyncBase(ctx)\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 148
} | {
"signing": {
"default": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
| tests/manual/docker-dns-srv/certs/gencert.json | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00017624818428885192,
0.00017333310097455978,
0.00017041800310835242,
0.00017333310097455978,
0.000002915090590249747
] |
{
"id": 5,
"code_window": [
"\n",
"\t// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix\n",
"\tif !mmnodestprefix && len(mmdestprefix) == 0 {\n",
"\t\tmmdestprefix = mmprefix\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t// If a rev is provided, then do not sync the whole key space.\n",
"\t// Instead, just start watching the key space starting from the rev\n",
"\tif startRev == 0 {\n",
"\t\trc, errc := s.SyncBase(ctx)\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 148
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package recipe
import (
"context"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/client/v3"
)
// WaitEvents waits on a key until it observes the given events and returns the final one.
func WaitEvents(c *clientv3.Client, key string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wc := c.Watch(ctx, key, clientv3.WithRev(rev))
if wc == nil {
return nil, ErrNoWatcher
}
return waitEvents(wc, evs), nil
}
func WaitPrefixEvents(c *clientv3.Client, prefix string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wc := c.Watch(ctx, prefix, clientv3.WithPrefix(), clientv3.WithRev(rev))
if wc == nil {
return nil, ErrNoWatcher
}
return waitEvents(wc, evs), nil
}
func waitEvents(wc clientv3.WatchChan, evs []mvccpb.Event_EventType) *clientv3.Event {
i := 0
for wresp := range wc {
for _, ev := range wresp.Events {
if ev.Type == evs[i] {
i++
if i == len(evs) {
return ev
}
}
}
}
return nil
}
| client/v3/experimental/recipes/watch.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0016268633771687746,
0.00041569946915842593,
0.00017123649013228714,
0.00017319730250164866,
0.0005416529602371156
] |
{
"id": 6,
"code_window": [
"\n",
"\tfor r := range rc {\n",
"\t\tfor _, kv := range r.Kvs {\n",
"\t\t\t_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))\n",
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t\t// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix\n",
"\t\tif !mmnodestprefix && len(mmdestprefix) == 0 {\n",
"\t\t\tmmdestprefix = mmprefix\n",
"\t\t}\n",
"\n",
"\t\tfor r := range rc {\n",
"\t\t\tfor _, kv := range r.Kvs {\n",
"\t\t\t\t_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))\n",
"\t\t\t\tif err != nil {\n",
"\t\t\t\t\treturn err\n",
"\t\t\t\t}\n",
"\t\t\t\tatomic.AddInt64(&total, 1)\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 153
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"fmt"
"testing"
"time"
"go.etcd.io/etcd/tests/v3/framework/e2e"
)
func TestCtlV3MakeMirror(t *testing.T) { testCtl(t, makeMirrorTest) }
func TestCtlV3MakeMirrorModifyDestPrefix(t *testing.T) { testCtl(t, makeMirrorModifyDestPrefixTest) }
func TestCtlV3MakeMirrorNoDestPrefix(t *testing.T) { testCtl(t, makeMirrorNoDestPrefixTest) }
func makeMirrorTest(cx ctlCtx) {
var (
flags = []string{}
kvs = []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}}
kvs2 = []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}}
prefix = "key"
)
testMirrorCommand(cx, flags, kvs, kvs2, prefix, prefix)
}
func makeMirrorModifyDestPrefixTest(cx ctlCtx) {
var (
flags = []string{"--prefix", "o_", "--dest-prefix", "d_"}
kvs = []kv{{"o_key1", "val1"}, {"o_key2", "val2"}, {"o_key3", "val3"}}
kvs2 = []kvExec{{key: "d_key1", val: "val1"}, {key: "d_key2", val: "val2"}, {key: "d_key3", val: "val3"}}
srcprefix = "o_"
destprefix = "d_"
)
testMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)
}
func makeMirrorNoDestPrefixTest(cx ctlCtx) {
var (
flags = []string{"--prefix", "o_", "--no-dest-prefix"}
kvs = []kv{{"o_key1", "val1"}, {"o_key2", "val2"}, {"o_key3", "val3"}}
kvs2 = []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}}
srcprefix = "o_"
destprefix = "key"
)
testMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)
}
func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvExec, srcprefix, destprefix string) {
// set up another cluster to mirror with
mirrorcfg := e2e.NewConfigAutoTLS()
mirrorcfg.ClusterSize = 1
mirrorcfg.BasePort = 10000
mirrorctx := ctlCtx{
t: cx.t,
cfg: *mirrorcfg,
dialTimeout: 7 * time.Second,
}
mirrorepc, err := e2e.NewEtcdProcessCluster(cx.t, &mirrorctx.cfg)
if err != nil {
cx.t.Fatalf("could not start etcd process cluster (%v)", err)
}
mirrorctx.epc = mirrorepc
defer func() {
if err = mirrorctx.epc.Close(); err != nil {
cx.t.Fatalf("error closing etcd processes (%v)", err)
}
}()
cmdArgs := append(cx.PrefixArgs(), "make-mirror")
cmdArgs = append(cmdArgs, flags...)
cmdArgs = append(cmdArgs, fmt.Sprintf("localhost:%d", mirrorcfg.BasePort))
proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap)
if err != nil {
cx.t.Fatal(err)
}
defer func() {
err = proc.Stop()
if err != nil {
cx.t.Fatal(err)
}
}()
for i := range sourcekvs {
if err = ctlV3Put(cx, sourcekvs[i].key, sourcekvs[i].val, ""); err != nil {
cx.t.Fatal(err)
}
}
if err = ctlV3Get(cx, []string{srcprefix, "--prefix"}, sourcekvs...); err != nil {
cx.t.Fatal(err)
}
if err = ctlV3Watch(mirrorctx, []string{destprefix, "--rev", "1", "--prefix"}, destkvs...); err != nil {
cx.t.Fatal(err)
}
}
| tests/e2e/ctl_v3_make_mirror_test.go | 1 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.9052532315254211,
0.15022310614585876,
0.00017632516392040998,
0.0005738298059441149,
0.33315181732177734
] |
{
"id": 6,
"code_window": [
"\n",
"\tfor r := range rc {\n",
"\t\tfor _, kv := range r.Kvs {\n",
"\t\t\t_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))\n",
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t\t// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix\n",
"\t\tif !mmnodestprefix && len(mmdestprefix) == 0 {\n",
"\t\t\tmmdestprefix = mmprefix\n",
"\t\t}\n",
"\n",
"\t\tfor r := range rc {\n",
"\t\t\tfor _, kv := range r.Kvs {\n",
"\t\t\t\t_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))\n",
"\t\t\t\tif err != nil {\n",
"\t\t\t\t\treturn err\n",
"\t\t\t\t}\n",
"\t\t\t\tatomic.AddInt64(&total, 1)\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 153
} | // Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"context"
"encoding/binary"
"fmt"
"math"
"math/rand"
"os"
"os/signal"
"strconv"
"sync"
"time"
v3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/pkg/v3/report"
"github.com/spf13/cobra"
"golang.org/x/time/rate"
"gopkg.in/cheggaaa/pb.v1"
)
var (
checkPerfLoad string
checkPerfPrefix string
checkDatascaleLoad string
checkDatascalePrefix string
autoCompact bool
autoDefrag bool
)
type checkPerfCfg struct {
limit int
clients int
duration int
}
var checkPerfCfgMap = map[string]checkPerfCfg{
// TODO: support read limit
"s": {
limit: 150,
clients: 50,
duration: 60,
},
"m": {
limit: 1000,
clients: 200,
duration: 60,
},
"l": {
limit: 8000,
clients: 500,
duration: 60,
},
"xl": {
limit: 15000,
clients: 1000,
duration: 60,
},
}
type checkDatascaleCfg struct {
limit int
kvSize int
clients int
}
var checkDatascaleCfgMap = map[string]checkDatascaleCfg{
"s": {
limit: 10000,
kvSize: 1024,
clients: 50,
},
"m": {
limit: 100000,
kvSize: 1024,
clients: 200,
},
"l": {
limit: 1000000,
kvSize: 1024,
clients: 500,
},
"xl": {
// xl tries to hit the upper bound aggressively which is 3 versions of 1M objects (3M in total)
limit: 3000000,
kvSize: 1024,
clients: 1000,
},
}
// NewCheckCommand returns the cobra command for "check".
func NewCheckCommand() *cobra.Command {
cc := &cobra.Command{
Use: "check <subcommand>",
Short: "commands for checking properties of the etcd cluster",
}
cc.AddCommand(NewCheckPerfCommand())
cc.AddCommand(NewCheckDatascaleCommand())
return cc
}
// NewCheckPerfCommand returns the cobra command for "check perf".
func NewCheckPerfCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "perf [options]",
Short: "Check the performance of the etcd cluster",
Run: newCheckPerfCommand,
}
// TODO: support customized configuration
cmd.Flags().StringVar(&checkPerfLoad, "load", "s", "The performance check's workload model. Accepted workloads: s(small), m(medium), l(large), xl(xLarge)")
cmd.Flags().StringVar(&checkPerfPrefix, "prefix", "/etcdctl-check-perf/", "The prefix for writing the performance check's keys.")
cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "Compact storage with last revision after test is finished.")
cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "Defragment storage after test is finished.")
cmd.RegisterFlagCompletionFunc("load", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
return []string{"small", "medium", "large", "xLarge"}, cobra.ShellCompDirectiveDefault
})
return cmd
}
// newCheckPerfCommand executes the "check perf" command.
func newCheckPerfCommand(cmd *cobra.Command, args []string) {
var checkPerfAlias = map[string]string{
"s": "s", "small": "s",
"m": "m", "medium": "m",
"l": "l", "large": "l",
"xl": "xl", "xLarge": "xl",
}
model, ok := checkPerfAlias[checkPerfLoad]
if !ok {
cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("unknown load option %v", checkPerfLoad))
}
cfg := checkPerfCfgMap[model]
requests := make(chan v3.Op, cfg.clients)
limit := rate.NewLimiter(rate.Limit(cfg.limit), 1)
cc := clientConfigFromCmd(cmd)
clients := make([]*v3.Client, cfg.clients)
for i := 0; i < cfg.clients; i++ {
clients[i] = cc.mustClient()
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(cfg.duration)*time.Second)
defer cancel()
ctx, icancel := interruptableContext(ctx, func() { attemptCleanup(clients[0], false) })
defer icancel()
gctx, gcancel := context.WithCancel(ctx)
resp, err := clients[0].Get(gctx, checkPerfPrefix, v3.WithPrefix(), v3.WithLimit(1))
gcancel()
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
if len(resp.Kvs) > 0 {
cobrautl.ExitWithError(cobrautl.ExitInvalidInput, fmt.Errorf("prefix %q has keys. Delete with 'etcdctl del --prefix %s' first", checkPerfPrefix, checkPerfPrefix))
}
ksize, vsize := 256, 1024
k, v := make([]byte, ksize), string(make([]byte, vsize))
bar := pb.New(cfg.duration)
bar.Format("Bom !")
bar.Start()
r := report.NewReport("%4.4f")
var wg sync.WaitGroup
wg.Add(len(clients))
for i := range clients {
go func(c *v3.Client) {
defer wg.Done()
for op := range requests {
st := time.Now()
_, derr := c.Do(context.Background(), op)
r.Results() <- report.Result{Err: derr, Start: st, End: time.Now()}
}
}(clients[i])
}
go func() {
cctx, ccancel := context.WithCancel(ctx)
defer ccancel()
for limit.Wait(cctx) == nil {
binary.PutVarint(k, rand.Int63n(math.MaxInt64))
requests <- v3.OpPut(checkPerfPrefix+string(k), v)
}
close(requests)
}()
go func() {
for i := 0; i < cfg.duration; i++ {
time.Sleep(time.Second)
bar.Add(1)
}
bar.Finish()
}()
sc := r.Stats()
wg.Wait()
close(r.Results())
s := <-sc
attemptCleanup(clients[0], autoCompact)
if autoDefrag {
for _, ep := range clients[0].Endpoints() {
defrag(clients[0], ep)
}
}
ok = true
if len(s.ErrorDist) != 0 {
fmt.Println("FAIL: too many errors")
for k, v := range s.ErrorDist {
fmt.Printf("FAIL: ERROR(%v) -> %d\n", k, v)
}
ok = false
}
if s.RPS/float64(cfg.limit) <= 0.9 {
fmt.Printf("FAIL: Throughput too low: %d writes/s\n", int(s.RPS)+1)
ok = false
} else {
fmt.Printf("PASS: Throughput is %d writes/s\n", int(s.RPS)+1)
}
if s.Slowest > 0.5 { // slowest request > 500ms
fmt.Printf("Slowest request took too long: %fs\n", s.Slowest)
ok = false
} else {
fmt.Printf("PASS: Slowest request took %fs\n", s.Slowest)
}
if s.Stddev > 0.1 { // stddev > 100ms
fmt.Printf("Stddev too high: %fs\n", s.Stddev)
ok = false
} else {
fmt.Printf("PASS: Stddev is %fs\n", s.Stddev)
}
if ok {
fmt.Println("PASS")
} else {
fmt.Println("FAIL")
os.Exit(cobrautl.ExitError)
}
}
func attemptCleanup(client *v3.Client, autoCompact bool) {
dctx, dcancel := context.WithTimeout(context.Background(), 30*time.Second)
defer dcancel()
dresp, err := client.Delete(dctx, checkPerfPrefix, v3.WithPrefix())
if err != nil {
fmt.Printf("FAIL: Cleanup failed during key deletion: ERROR(%v)\n", err)
return
}
if autoCompact {
compact(client, dresp.Header.Revision)
}
}
func interruptableContext(ctx context.Context, attemptCleanup func()) (context.Context, func()) {
ctx, cancel := context.WithCancel(ctx)
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, os.Interrupt)
go func() {
defer signal.Stop(signalChan)
select {
case <-signalChan:
cancel()
attemptCleanup()
}
}()
return ctx, cancel
}
// NewCheckDatascaleCommand returns the cobra command for "check datascale".
func NewCheckDatascaleCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "datascale [options]",
Short: "Check the memory usage of holding data for different workloads on a given server endpoint.",
Long: "If no endpoint is provided, localhost will be used. If multiple endpoints are provided, first endpoint will be used.",
Run: newCheckDatascaleCommand,
}
cmd.Flags().StringVar(&checkDatascaleLoad, "load", "s", "The datascale check's workload model. Accepted workloads: s(small), m(medium), l(large), xl(xLarge)")
cmd.Flags().StringVar(&checkDatascalePrefix, "prefix", "/etcdctl-check-datascale/", "The prefix for writing the datascale check's keys.")
cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "Compact storage with last revision after test is finished.")
cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "Defragment storage after test is finished.")
return cmd
}
// newCheckDatascaleCommand executes the "check datascale" command.
func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
var checkDatascaleAlias = map[string]string{
"s": "s", "small": "s",
"m": "m", "medium": "m",
"l": "l", "large": "l",
"xl": "xl", "xLarge": "xl",
}
model, ok := checkDatascaleAlias[checkDatascaleLoad]
if !ok {
cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("unknown load option %v", checkDatascaleLoad))
}
cfg := checkDatascaleCfgMap[model]
requests := make(chan v3.Op, cfg.clients)
cc := clientConfigFromCmd(cmd)
clients := make([]*v3.Client, cfg.clients)
for i := 0; i < cfg.clients; i++ {
clients[i] = cc.mustClient()
}
// get endpoints
eps, errEndpoints := endpointsFromCmd(cmd)
if errEndpoints != nil {
cobrautl.ExitWithError(cobrautl.ExitError, errEndpoints)
}
sec := secureCfgFromCmd(cmd)
ctx, cancel := context.WithCancel(context.Background())
resp, err := clients[0].Get(ctx, checkDatascalePrefix, v3.WithPrefix(), v3.WithLimit(1))
cancel()
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
if len(resp.Kvs) > 0 {
cobrautl.ExitWithError(cobrautl.ExitInvalidInput, fmt.Errorf("prefix %q has keys. Delete with etcdctl del --prefix %s first", checkDatascalePrefix, checkDatascalePrefix))
}
ksize, vsize := 512, 512
k, v := make([]byte, ksize), string(make([]byte, vsize))
r := report.NewReport("%4.4f")
var wg sync.WaitGroup
wg.Add(len(clients))
// get the process_resident_memory_bytes and process_virtual_memory_bytes before the put operations
bytesBefore := endpointMemoryMetrics(eps[0], sec)
if bytesBefore == 0 {
fmt.Println("FAIL: Could not read process_resident_memory_bytes before the put operations.")
os.Exit(cobrautl.ExitError)
}
fmt.Println(fmt.Sprintf("Start data scale check for work load [%v key-value pairs, %v bytes per key-value, %v concurrent clients].", cfg.limit, cfg.kvSize, cfg.clients))
bar := pb.New(cfg.limit)
bar.Format("Bom !")
bar.Start()
for i := range clients {
go func(c *v3.Client) {
defer wg.Done()
for op := range requests {
st := time.Now()
_, derr := c.Do(context.Background(), op)
r.Results() <- report.Result{Err: derr, Start: st, End: time.Now()}
bar.Increment()
}
}(clients[i])
}
go func() {
for i := 0; i < cfg.limit; i++ {
binary.PutVarint(k, rand.Int63n(math.MaxInt64))
requests <- v3.OpPut(checkDatascalePrefix+string(k), v)
}
close(requests)
}()
sc := r.Stats()
wg.Wait()
close(r.Results())
bar.Finish()
s := <-sc
// get the process_resident_memory_bytes after the put operations
bytesAfter := endpointMemoryMetrics(eps[0], sec)
if bytesAfter == 0 {
fmt.Println("FAIL: Could not read process_resident_memory_bytes after the put operations.")
os.Exit(cobrautl.ExitError)
}
// delete the created kv pairs
ctx, cancel = context.WithCancel(context.Background())
dresp, derr := clients[0].Delete(ctx, checkDatascalePrefix, v3.WithPrefix())
defer cancel()
if derr != nil {
cobrautl.ExitWithError(cobrautl.ExitError, derr)
}
if autoCompact {
compact(clients[0], dresp.Header.Revision)
}
if autoDefrag {
for _, ep := range clients[0].Endpoints() {
defrag(clients[0], ep)
}
}
if bytesAfter == 0 {
fmt.Println("FAIL: Could not read process_resident_memory_bytes after the put operations.")
os.Exit(cobrautl.ExitError)
}
bytesUsed := bytesAfter - bytesBefore
mbUsed := bytesUsed / (1024 * 1024)
if len(s.ErrorDist) != 0 {
fmt.Println("FAIL: too many errors")
for k, v := range s.ErrorDist {
fmt.Printf("FAIL: ERROR(%v) -> %d\n", k, v)
}
os.Exit(cobrautl.ExitError)
} else {
fmt.Println(fmt.Sprintf("PASS: Approximate system memory used : %v MB.", strconv.FormatFloat(mbUsed, 'f', 2, 64)))
}
}
| etcdctl/ctlv3/command/check.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.9948531985282898,
0.022944705560803413,
0.0001639126567170024,
0.000175457913428545,
0.14653649926185608
] |
{
"id": 6,
"code_window": [
"\n",
"\tfor r := range rc {\n",
"\t\tfor _, kv := range r.Kvs {\n",
"\t\t\t_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))\n",
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t\t// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix\n",
"\t\tif !mmnodestprefix && len(mmdestprefix) == 0 {\n",
"\t\t\tmmdestprefix = mmprefix\n",
"\t\t}\n",
"\n",
"\t\tfor r := range rc {\n",
"\t\t\tfor _, kv := range r.Kvs {\n",
"\t\t\t\t_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))\n",
"\t\t\t\tif err != nil {\n",
"\t\t\t\t\treturn err\n",
"\t\t\t\t}\n",
"\t\t\t\tatomic.AddInt64(&total, 1)\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 153
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"errors"
"net/http"
)
func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) {
select {
case resp := <-t.respchan:
return resp, nil
case err := <-t.errchan:
return nil, err
case <-t.startCancel:
case <-req.Cancel:
}
select {
// this simulates that the request is finished before cancel effects
case resp := <-t.respchan:
return resp, nil
// wait on finishCancel to simulate taking some amount of
// time while calling CancelRequest
case <-t.finishCancel:
return nil, errors.New("cancelled")
}
}
| client/v2/fake_transport_test.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0001793377159629017,
0.0001748305221553892,
0.00016935454914346337,
0.00017647200729697943,
0.000003528122078932938
] |
{
"id": 6,
"code_window": [
"\n",
"\tfor r := range rc {\n",
"\t\tfor _, kv := range r.Kvs {\n",
"\t\t\t_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))\n",
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep"
],
"after_edit": [
"\t\t// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix\n",
"\t\tif !mmnodestprefix && len(mmdestprefix) == 0 {\n",
"\t\t\tmmdestprefix = mmprefix\n",
"\t\t}\n",
"\n",
"\t\tfor r := range rc {\n",
"\t\t\tfor _, kv := range r.Kvs {\n",
"\t\t\t\t_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))\n",
"\t\t\t\tif err != nil {\n",
"\t\t\t\t\treturn err\n",
"\t\t\t\t}\n",
"\t\t\t\tatomic.AddInt64(&total, 1)\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 153
} | // Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdutl
import (
"fmt"
"go.etcd.io/etcd/etcdutl/v3/snapshot"
)
type fieldsPrinter struct{ printer }
func (p *fieldsPrinter) DBStatus(r snapshot.Status) {
fmt.Println(`"Hash" :`, r.Hash)
fmt.Println(`"Revision" :`, r.Revision)
fmt.Println(`"Keys" :`, r.TotalKey)
fmt.Println(`"Size" :`, r.TotalSize)
fmt.Println(`"Version" :`, r.Version)
}
| etcdutl/etcdutl/printer_fields.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00031514803413301706,
0.00021105798077769578,
0.00017315785225946456,
0.0001779630547389388,
0.000060131438658572733
] |
{
"id": 7,
"code_window": [
"\t\t\t}\n",
"\t\t\tatomic.AddInt64(&total, 1)\n",
"\t\t}\n"
],
"labels": [
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 159
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"context"
"errors"
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/bgentry/speakeasy"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/mirror"
"github.com/spf13/cobra"
)
var (
mminsecureTr bool
mmcert string
mmkey string
mmcacert string
mmprefix string
mmdestprefix string
mmuser string
mmpassword string
mmnodestprefix bool
)
// NewMakeMirrorCommand returns the cobra command for "makeMirror".
func NewMakeMirrorCommand() *cobra.Command {
c := &cobra.Command{
Use: "make-mirror [options] <destination>",
Short: "Makes a mirror at the destination etcd cluster",
Run: makeMirrorCommandFunc,
}
c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror")
c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster")
c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster")
c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster")
c.Flags().StringVar(&mmkey, "dest-key", "", "Identify secure client using this TLS key file")
c.Flags().StringVar(&mmcacert, "dest-cacert", "", "Verify certificates of TLS enabled secure servers using this CA bundle")
// TODO: secure by default when etcd enables secure gRPC by default.
c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "Disable transport security for client connections")
c.Flags().StringVar(&mmuser, "dest-user", "", "Destination username[:password] for authentication (prompt if password is not supplied)")
c.Flags().StringVar(&mmpassword, "dest-password", "", "Destination password for authentication (if this option is used, --user option shouldn't include password)")
return c
}
func authDestCfg() *authCfg {
if mmuser == "" {
return nil
}
var cfg authCfg
if mmpassword == "" {
splitted := strings.SplitN(mmuser, ":", 2)
if len(splitted) < 2 {
var err error
cfg.username = mmuser
cfg.password, err = speakeasy.Ask("Destination Password: ")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
} else {
cfg.username = splitted[0]
cfg.password = splitted[1]
}
} else {
cfg.username = mmuser
cfg.password = mmpassword
}
return &cfg
}
func makeMirrorCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("make-mirror takes one destination argument"))
}
dialTimeout := dialTimeoutFromCmd(cmd)
keepAliveTime := keepAliveTimeFromCmd(cmd)
keepAliveTimeout := keepAliveTimeoutFromCmd(cmd)
sec := &secureCfg{
cert: mmcert,
key: mmkey,
cacert: mmcacert,
insecureTransport: mminsecureTr,
}
auth := authDestCfg()
cc := &clientConfig{
endpoints: []string{args[0]},
dialTimeout: dialTimeout,
keepAliveTime: keepAliveTime,
keepAliveTimeout: keepAliveTimeout,
scfg: sec,
acfg: auth,
}
dc := cc.mustClient()
c := mustClientFromCmd(cmd)
err := makeMirror(context.TODO(), c, dc)
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) error {
total := int64(0)
// if destination prefix is specified and remove destination prefix is true return error
if mmnodestprefix && len(mmdestprefix) > 0 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one"))
}
go func() {
for {
time.Sleep(30 * time.Second)
fmt.Println(atomic.LoadInt64(&total))
}
}()
s := mirror.NewSyncer(c, mmprefix, 0)
rc, errc := s.SyncBase(ctx)
// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix
if !mmnodestprefix && len(mmdestprefix) == 0 {
mmdestprefix = mmprefix
}
for r := range rc {
for _, kv := range r.Kvs {
_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))
if err != nil {
return err
}
atomic.AddInt64(&total, 1)
}
}
err := <-errc
if err != nil {
return err
}
wc := s.SyncUpdates(ctx)
for wr := range wc {
if wr.CompactRevision != 0 {
return rpctypes.ErrCompacted
}
var lastRev int64
ops := []clientv3.Op{}
for _, ev := range wr.Events {
nextRev := ev.Kv.ModRevision
if lastRev != 0 && nextRev > lastRev {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
ops = []clientv3.Op{}
}
lastRev = nextRev
switch ev.Type {
case mvccpb.PUT:
ops = append(ops, clientv3.OpPut(modifyPrefix(string(ev.Kv.Key)), string(ev.Kv.Value)))
atomic.AddInt64(&total, 1)
case mvccpb.DELETE:
ops = append(ops, clientv3.OpDelete(modifyPrefix(string(ev.Kv.Key))))
atomic.AddInt64(&total, 1)
default:
panic("unexpected event type")
}
}
if len(ops) != 0 {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
}
}
return nil
}
func modifyPrefix(key string) string {
return strings.Replace(key, mmprefix, mmdestprefix, 1)
}
| etcdctl/ctlv3/command/make_mirror_command.go | 1 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.9920637011528015,
0.08127712458372116,
0.00016822874022182077,
0.00017178631969727576,
0.25420859456062317
] |
{
"id": 7,
"code_window": [
"\t\t\t}\n",
"\t\t\tatomic.AddInt64(&total, 1)\n",
"\t\t}\n"
],
"labels": [
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 159
} | // Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package connectivity
| tests/integration/clientv3/connectivity/doc.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00017724029021337628,
0.0001771026581991464,
0.0001769650261849165,
0.0001771026581991464,
1.3763201422989368e-7
] |
{
"id": 7,
"code_window": [
"\t\t\t}\n",
"\t\t\tatomic.AddInt64(&total, 1)\n",
"\t\t}\n"
],
"labels": [
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 159
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"encoding/json"
"fmt"
"os"
"github.com/urfave/cli"
"go.etcd.io/etcd/client/v2"
)
func handleError(c *cli.Context, code int, err error) {
if c.GlobalString("output") == "json" {
if err, ok := err.(*client.Error); ok {
b, err := json.Marshal(err)
if err != nil {
panic(err)
}
fmt.Fprintln(os.Stderr, string(b))
os.Exit(code)
}
}
fmt.Fprintln(os.Stderr, "Error: ", err)
if cerr, ok := err.(*client.ClusterError); ok {
fmt.Fprintln(os.Stderr, cerr.Detail())
}
os.Exit(code)
}
| etcdctl/ctlv2/command/error.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0001770009839674458,
0.00017323283827863634,
0.00016923104703892022,
0.00017274441779591143,
0.000002736567012107116
] |
{
"id": 7,
"code_window": [
"\t\t\t}\n",
"\t\t\tatomic.AddInt64(&total, 1)\n",
"\t\t}\n"
],
"labels": [
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 159
} |
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| client/pkg/LICENSE | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00017972459318116307,
0.00017295184079557657,
0.000168855010997504,
0.00017270373064093292,
0.000002706837676669238
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 161
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"context"
"errors"
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/bgentry/speakeasy"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/mirror"
"github.com/spf13/cobra"
)
var (
mminsecureTr bool
mmcert string
mmkey string
mmcacert string
mmprefix string
mmdestprefix string
mmuser string
mmpassword string
mmnodestprefix bool
)
// NewMakeMirrorCommand returns the cobra command for "makeMirror".
func NewMakeMirrorCommand() *cobra.Command {
c := &cobra.Command{
Use: "make-mirror [options] <destination>",
Short: "Makes a mirror at the destination etcd cluster",
Run: makeMirrorCommandFunc,
}
c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror")
c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster")
c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster")
c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster")
c.Flags().StringVar(&mmkey, "dest-key", "", "Identify secure client using this TLS key file")
c.Flags().StringVar(&mmcacert, "dest-cacert", "", "Verify certificates of TLS enabled secure servers using this CA bundle")
// TODO: secure by default when etcd enables secure gRPC by default.
c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "Disable transport security for client connections")
c.Flags().StringVar(&mmuser, "dest-user", "", "Destination username[:password] for authentication (prompt if password is not supplied)")
c.Flags().StringVar(&mmpassword, "dest-password", "", "Destination password for authentication (if this option is used, --user option shouldn't include password)")
return c
}
func authDestCfg() *authCfg {
if mmuser == "" {
return nil
}
var cfg authCfg
if mmpassword == "" {
splitted := strings.SplitN(mmuser, ":", 2)
if len(splitted) < 2 {
var err error
cfg.username = mmuser
cfg.password, err = speakeasy.Ask("Destination Password: ")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
} else {
cfg.username = splitted[0]
cfg.password = splitted[1]
}
} else {
cfg.username = mmuser
cfg.password = mmpassword
}
return &cfg
}
func makeMirrorCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("make-mirror takes one destination argument"))
}
dialTimeout := dialTimeoutFromCmd(cmd)
keepAliveTime := keepAliveTimeFromCmd(cmd)
keepAliveTimeout := keepAliveTimeoutFromCmd(cmd)
sec := &secureCfg{
cert: mmcert,
key: mmkey,
cacert: mmcacert,
insecureTransport: mminsecureTr,
}
auth := authDestCfg()
cc := &clientConfig{
endpoints: []string{args[0]},
dialTimeout: dialTimeout,
keepAliveTime: keepAliveTime,
keepAliveTimeout: keepAliveTimeout,
scfg: sec,
acfg: auth,
}
dc := cc.mustClient()
c := mustClientFromCmd(cmd)
err := makeMirror(context.TODO(), c, dc)
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) error {
total := int64(0)
// if destination prefix is specified and remove destination prefix is true return error
if mmnodestprefix && len(mmdestprefix) > 0 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one"))
}
go func() {
for {
time.Sleep(30 * time.Second)
fmt.Println(atomic.LoadInt64(&total))
}
}()
s := mirror.NewSyncer(c, mmprefix, 0)
rc, errc := s.SyncBase(ctx)
// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix
if !mmnodestprefix && len(mmdestprefix) == 0 {
mmdestprefix = mmprefix
}
for r := range rc {
for _, kv := range r.Kvs {
_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))
if err != nil {
return err
}
atomic.AddInt64(&total, 1)
}
}
err := <-errc
if err != nil {
return err
}
wc := s.SyncUpdates(ctx)
for wr := range wc {
if wr.CompactRevision != 0 {
return rpctypes.ErrCompacted
}
var lastRev int64
ops := []clientv3.Op{}
for _, ev := range wr.Events {
nextRev := ev.Kv.ModRevision
if lastRev != 0 && nextRev > lastRev {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
ops = []clientv3.Op{}
}
lastRev = nextRev
switch ev.Type {
case mvccpb.PUT:
ops = append(ops, clientv3.OpPut(modifyPrefix(string(ev.Kv.Key)), string(ev.Kv.Value)))
atomic.AddInt64(&total, 1)
case mvccpb.DELETE:
ops = append(ops, clientv3.OpDelete(modifyPrefix(string(ev.Kv.Key))))
atomic.AddInt64(&total, 1)
default:
panic("unexpected event type")
}
}
if len(ops) != 0 {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
}
}
return nil
}
func modifyPrefix(key string) string {
return strings.Replace(key, mmprefix, mmdestprefix, 1)
}
| etcdctl/ctlv3/command/make_mirror_command.go | 1 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.9205754995346069,
0.04226136580109596,
0.00016485970991197973,
0.000207280638278462,
0.19166532158851624
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 161
} | // Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package namespace
import (
"bytes"
"testing"
)
func TestPrefixInterval(t *testing.T) {
tests := []struct {
pfx string
key []byte
end []byte
wKey []byte
wEnd []byte
}{
// single key
{
pfx: "pfx/",
key: []byte("a"),
wKey: []byte("pfx/a"),
},
// range
{
pfx: "pfx/",
key: []byte("abc"),
end: []byte("def"),
wKey: []byte("pfx/abc"),
wEnd: []byte("pfx/def"),
},
// one-sided range
{
pfx: "pfx/",
key: []byte("abc"),
end: []byte{0},
wKey: []byte("pfx/abc"),
wEnd: []byte("pfx0"),
},
// one-sided range, end of keyspace
{
pfx: "\xff\xff",
key: []byte("abc"),
end: []byte{0},
wKey: []byte("\xff\xffabc"),
wEnd: []byte{0},
},
}
for i, tt := range tests {
pfxKey, pfxEnd := prefixInterval(tt.pfx, tt.key, tt.end)
if !bytes.Equal(pfxKey, tt.wKey) {
t.Errorf("#%d: expected key=%q, got key=%q", i, tt.wKey, pfxKey)
}
if !bytes.Equal(pfxEnd, tt.wEnd) {
t.Errorf("#%d: expected end=%q, got end=%q", i, tt.wEnd, pfxEnd)
}
}
}
| client/v3/namespace/util_test.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0008009589510038495,
0.00025108244153670967,
0.00016682050772942603,
0.00017343969375360757,
0.00020785043307114393
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 161
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"context"
"errors"
"fmt"
"sync"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
v3 "go.etcd.io/etcd/client/v3"
)
// ErrLocked is returned by TryLock when Mutex is already locked by another session.
var ErrLocked = errors.New("mutex: Locked by another session")
var ErrSessionExpired = errors.New("mutex: session is expired")
// Mutex implements the sync Locker interface with etcd
type Mutex struct {
s *Session
pfx string
myKey string
myRev int64
hdr *pb.ResponseHeader
}
func NewMutex(s *Session, pfx string) *Mutex {
return &Mutex{s, pfx + "/", "", -1, nil}
}
// TryLock locks the mutex if not already locked by another session.
// If lock is held by another session, return immediately after attempting necessary cleanup
// The ctx argument is used for the sending/receiving Txn RPC.
func (m *Mutex) TryLock(ctx context.Context) error {
resp, err := m.tryAcquire(ctx)
if err != nil {
return err
}
// if no key on prefix / the minimum rev is key, already hold the lock
ownerKey := resp.Responses[1].GetResponseRange().Kvs
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
m.hdr = resp.Header
return nil
}
client := m.s.Client()
// Cannot lock, so delete the key
if _, err := client.Delete(ctx, m.myKey); err != nil {
return err
}
m.myKey = "\x00"
m.myRev = -1
return ErrLocked
}
// Lock locks the mutex with a cancelable context. If the context is canceled
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
func (m *Mutex) Lock(ctx context.Context) error {
resp, err := m.tryAcquire(ctx)
if err != nil {
return err
}
// if no key on prefix / the minimum rev is key, already hold the lock
ownerKey := resp.Responses[1].GetResponseRange().Kvs
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
m.hdr = resp.Header
return nil
}
client := m.s.Client()
// wait for deletion revisions prior to myKey
// TODO: early termination if the session key is deleted before other session keys with smaller revisions.
_, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
// release lock key if wait failed
if werr != nil {
m.Unlock(client.Ctx())
return werr
}
// make sure the session is not expired, and the owner key still exists.
gresp, werr := client.Get(ctx, m.myKey)
if werr != nil {
m.Unlock(client.Ctx())
return werr
}
if len(gresp.Kvs) == 0 { // is the session key lost?
return ErrSessionExpired
}
m.hdr = gresp.Header
return nil
}
func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) {
s := m.s
client := m.s.Client()
m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
// put self in lock waiters via myKey; oldest waiter holds lock
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
// reuse key in case this session already holds the lock
get := v3.OpGet(m.myKey)
// fetch current holder to complete uncontended path with only one RPC
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
if err != nil {
return nil, err
}
m.myRev = resp.Header.Revision
if !resp.Succeeded {
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
}
return resp, nil
}
func (m *Mutex) Unlock(ctx context.Context) error {
client := m.s.Client()
if _, err := client.Delete(ctx, m.myKey); err != nil {
return err
}
m.myKey = "\x00"
m.myRev = -1
return nil
}
func (m *Mutex) IsOwner() v3.Cmp {
return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
}
func (m *Mutex) Key() string { return m.myKey }
// Header is the response header received from etcd on acquiring the lock.
func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
type lockerMutex struct{ *Mutex }
func (lm *lockerMutex) Lock() {
client := lm.s.Client()
if err := lm.Mutex.Lock(client.Ctx()); err != nil {
panic(err)
}
}
func (lm *lockerMutex) Unlock() {
client := lm.s.Client()
if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
panic(err)
}
}
// NewLocker creates a sync.Locker backed by an etcd mutex.
func NewLocker(s *Session, pfx string) sync.Locker {
return &lockerMutex{NewMutex(s, pfx)}
}
| client/v3/concurrency/mutex.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.04537062719464302,
0.0031948562245815992,
0.0001678152329986915,
0.00021539420413319021,
0.0106118218973279
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 161
} | // Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"io"
"log"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.uber.org/zap/zaptest"
"go.uber.org/zap"
)
func TestServer_Unix_Insecure(t *testing.T) { testServer(t, "unix", false, false) }
func TestServer_TCP_Insecure(t *testing.T) { testServer(t, "tcp", false, false) }
func TestServer_Unix_Secure(t *testing.T) { testServer(t, "unix", true, false) }
func TestServer_TCP_Secure(t *testing.T) { testServer(t, "tcp", true, false) }
func TestServer_Unix_Insecure_DelayTx(t *testing.T) { testServer(t, "unix", false, true) }
func TestServer_TCP_Insecure_DelayTx(t *testing.T) { testServer(t, "tcp", false, true) }
func TestServer_Unix_Secure_DelayTx(t *testing.T) { testServer(t, "unix", true, true) }
func TestServer_TCP_Secure_DelayTx(t *testing.T) { testServer(t, "tcp", true, true) }
func testServer(t *testing.T, scheme string, secure bool, delayTx bool) {
lg := zaptest.NewLogger(t)
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
if scheme == "tcp" {
ln1, ln2 := listen(t, "tcp", "localhost:0", transport.TLSInfo{}), listen(t, "tcp", "localhost:0", transport.TLSInfo{})
srcAddr, dstAddr = ln1.Addr().String(), ln2.Addr().String()
ln1.Close()
ln2.Close()
} else {
defer func() {
os.RemoveAll(srcAddr)
os.RemoveAll(dstAddr)
}()
}
tlsInfo := createTLSInfo(lg, secure)
ln := listen(t, scheme, dstAddr, tlsInfo)
defer ln.Close()
cfg := ServerConfig{
Logger: lg,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
}
if secure {
cfg.TLSInfo = tlsInfo
}
p := NewServer(cfg)
<-p.Ready()
defer p.Close()
data1 := []byte("Hello World!")
donec, writec := make(chan struct{}), make(chan []byte)
go func() {
defer close(donec)
for data := range writec {
send(t, data, scheme, srcAddr, tlsInfo)
}
}()
recvc := make(chan []byte, 1)
go func() {
for i := 0; i < 2; i++ {
recvc <- receive(t, ln)
}
}()
writec <- data1
now := time.Now()
if d := <-recvc; !bytes.Equal(data1, d) {
close(writec)
t.Fatalf("expected %q, got %q", string(data1), string(d))
}
took1 := time.Since(now)
t.Logf("took %v with no latency", took1)
lat, rv := 50*time.Millisecond, 5*time.Millisecond
if delayTx {
p.DelayTx(lat, rv)
}
data2 := []byte("new data")
writec <- data2
now = time.Now()
if d := <-recvc; !bytes.Equal(data2, d) {
close(writec)
t.Fatalf("expected %q, got %q", string(data2), string(d))
}
took2 := time.Since(now)
if delayTx {
t.Logf("took %v with latency %v+-%v", took2, lat, rv)
} else {
t.Logf("took %v with no latency", took2)
}
if delayTx {
p.UndelayTx()
if took2 < lat-rv {
close(writec)
t.Fatalf("expected took2 %v (with latency) > delay: %v", took2, lat-rv)
}
}
close(writec)
select {
case <-donec:
case <-time.After(3 * time.Second):
t.Fatal("took too long to write")
}
select {
case <-p.Done():
t.Fatal("unexpected done")
case err := <-p.Error():
t.Fatal(err)
default:
}
if err := p.Close(); err != nil {
t.Fatal(err)
}
select {
case <-p.Done():
case err := <-p.Error():
if !strings.HasPrefix(err.Error(), "accept ") &&
!strings.HasSuffix(err.Error(), "use of closed network connection") {
t.Fatal(err)
}
case <-time.After(3 * time.Second):
t.Fatal("took too long to close")
}
}
func createTLSInfo(lg *zap.Logger, secure bool) transport.TLSInfo {
if secure {
return transport.TLSInfo{
KeyFile: "../../tests/fixtures/server.key.insecure",
CertFile: "../../tests/fixtures/server.crt",
TrustedCAFile: "../../tests/fixtures/ca.crt",
ClientCertAuth: true,
Logger: lg,
}
}
return transport.TLSInfo{Logger: lg}
}
func TestServer_Unix_Insecure_DelayAccept(t *testing.T) { testServerDelayAccept(t, false) }
func TestServer_Unix_Secure_DelayAccept(t *testing.T) { testServerDelayAccept(t, true) }
func testServerDelayAccept(t *testing.T, secure bool) {
lg := zaptest.NewLogger(t)
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
defer func() {
os.RemoveAll(srcAddr)
os.RemoveAll(dstAddr)
}()
tlsInfo := createTLSInfo(lg, secure)
scheme := "unix"
ln := listen(t, scheme, dstAddr, tlsInfo)
defer ln.Close()
cfg := ServerConfig{
Logger: lg,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
}
if secure {
cfg.TLSInfo = tlsInfo
}
p := NewServer(cfg)
<-p.Ready()
defer p.Close()
data := []byte("Hello World!")
now := time.Now()
send(t, data, scheme, srcAddr, tlsInfo)
if d := receive(t, ln); !bytes.Equal(data, d) {
t.Fatalf("expected %q, got %q", string(data), string(d))
}
took1 := time.Since(now)
t.Logf("took %v with no latency", took1)
lat, rv := 700*time.Millisecond, 10*time.Millisecond
p.DelayAccept(lat, rv)
defer p.UndelayAccept()
if err := p.ResetListener(); err != nil {
t.Fatal(err)
}
time.Sleep(200 * time.Millisecond)
now = time.Now()
send(t, data, scheme, srcAddr, tlsInfo)
if d := receive(t, ln); !bytes.Equal(data, d) {
t.Fatalf("expected %q, got %q", string(data), string(d))
}
took2 := time.Since(now)
t.Logf("took %v with latency %v±%v", took2, lat, rv)
if took1 >= took2 {
t.Fatalf("expected took1 %v < took2 %v", took1, took2)
}
}
func TestServer_PauseTx(t *testing.T) {
lg := zaptest.NewLogger(t)
scheme := "unix"
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
defer func() {
os.RemoveAll(srcAddr)
os.RemoveAll(dstAddr)
}()
ln := listen(t, scheme, dstAddr, transport.TLSInfo{})
defer ln.Close()
p := NewServer(ServerConfig{
Logger: lg,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
})
<-p.Ready()
defer p.Close()
p.PauseTx()
data := []byte("Hello World!")
send(t, data, scheme, srcAddr, transport.TLSInfo{})
recvc := make(chan []byte, 1)
go func() {
recvc <- receive(t, ln)
}()
select {
case d := <-recvc:
t.Fatalf("received unexpected data %q during pause", string(d))
case <-time.After(200 * time.Millisecond):
}
p.UnpauseTx()
select {
case d := <-recvc:
if !bytes.Equal(data, d) {
t.Fatalf("expected %q, got %q", string(data), string(d))
}
case <-time.After(2 * time.Second):
t.Fatal("took too long to receive after unpause")
}
}
func TestServer_ModifyTx_corrupt(t *testing.T) {
lg := zaptest.NewLogger(t)
scheme := "unix"
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
defer func() {
os.RemoveAll(srcAddr)
os.RemoveAll(dstAddr)
}()
ln := listen(t, scheme, dstAddr, transport.TLSInfo{})
defer ln.Close()
p := NewServer(ServerConfig{
Logger: lg,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
})
<-p.Ready()
defer p.Close()
p.ModifyTx(func(d []byte) []byte {
d[len(d)/2]++
return d
})
data := []byte("Hello World!")
send(t, data, scheme, srcAddr, transport.TLSInfo{})
if d := receive(t, ln); bytes.Equal(d, data) {
t.Fatalf("expected corrupted data, got %q", string(d))
}
p.UnmodifyTx()
send(t, data, scheme, srcAddr, transport.TLSInfo{})
if d := receive(t, ln); !bytes.Equal(d, data) {
t.Fatalf("expected uncorrupted data, got %q", string(d))
}
}
func TestServer_ModifyTx_packet_loss(t *testing.T) {
lg := zaptest.NewLogger(t)
scheme := "unix"
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
defer func() {
os.RemoveAll(srcAddr)
os.RemoveAll(dstAddr)
}()
ln := listen(t, scheme, dstAddr, transport.TLSInfo{})
defer ln.Close()
p := NewServer(ServerConfig{
Logger: lg,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
})
<-p.Ready()
defer p.Close()
// 50% packet loss
p.ModifyTx(func(d []byte) []byte {
half := len(d) / 2
return d[:half:half]
})
data := []byte("Hello World!")
send(t, data, scheme, srcAddr, transport.TLSInfo{})
if d := receive(t, ln); bytes.Equal(d, data) {
t.Fatalf("expected corrupted data, got %q", string(d))
}
p.UnmodifyTx()
send(t, data, scheme, srcAddr, transport.TLSInfo{})
if d := receive(t, ln); !bytes.Equal(d, data) {
t.Fatalf("expected uncorrupted data, got %q", string(d))
}
}
func TestServer_BlackholeTx(t *testing.T) {
lg := zaptest.NewLogger(t)
scheme := "unix"
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
defer func() {
os.RemoveAll(srcAddr)
os.RemoveAll(dstAddr)
}()
ln := listen(t, scheme, dstAddr, transport.TLSInfo{})
defer ln.Close()
p := NewServer(ServerConfig{
Logger: lg,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
})
<-p.Ready()
defer p.Close()
p.BlackholeTx()
data := []byte("Hello World!")
send(t, data, scheme, srcAddr, transport.TLSInfo{})
recvc := make(chan []byte, 1)
go func() {
recvc <- receive(t, ln)
}()
select {
case d := <-recvc:
t.Fatalf("unexpected data receive %q during blackhole", string(d))
case <-time.After(200 * time.Millisecond):
}
p.UnblackholeTx()
// expect different data, old data dropped
data[0]++
send(t, data, scheme, srcAddr, transport.TLSInfo{})
select {
case d := <-recvc:
if !bytes.Equal(data, d) {
t.Fatalf("expected %q, got %q", string(data), string(d))
}
case <-time.After(2 * time.Second):
t.Fatal("took too long to receive after unblackhole")
}
}
func TestServer_Shutdown(t *testing.T) {
lg := zaptest.NewLogger(t)
scheme := "unix"
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
defer func() {
os.RemoveAll(srcAddr)
os.RemoveAll(dstAddr)
}()
ln := listen(t, scheme, dstAddr, transport.TLSInfo{})
defer ln.Close()
p := NewServer(ServerConfig{
Logger: lg,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
})
<-p.Ready()
defer p.Close()
s, _ := p.(*server)
s.listener.Close()
time.Sleep(200 * time.Millisecond)
data := []byte("Hello World!")
send(t, data, scheme, srcAddr, transport.TLSInfo{})
if d := receive(t, ln); !bytes.Equal(d, data) {
t.Fatalf("expected %q, got %q", string(data), string(d))
}
}
func TestServer_ShutdownListener(t *testing.T) {
lg := zaptest.NewLogger(t)
scheme := "unix"
srcAddr, dstAddr := newUnixAddr(), newUnixAddr()
defer func() {
os.RemoveAll(srcAddr)
os.RemoveAll(dstAddr)
}()
ln := listen(t, scheme, dstAddr, transport.TLSInfo{})
defer ln.Close()
p := NewServer(ServerConfig{
Logger: lg,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
})
<-p.Ready()
defer p.Close()
// shut down destination
ln.Close()
time.Sleep(200 * time.Millisecond)
ln = listen(t, scheme, dstAddr, transport.TLSInfo{})
defer ln.Close()
data := []byte("Hello World!")
send(t, data, scheme, srcAddr, transport.TLSInfo{})
if d := receive(t, ln); !bytes.Equal(d, data) {
t.Fatalf("expected %q, got %q", string(data), string(d))
}
}
func TestServerHTTP_Insecure_DelayTx(t *testing.T) { testServerHTTP(t, false, true) }
func TestServerHTTP_Secure_DelayTx(t *testing.T) { testServerHTTP(t, true, true) }
func TestServerHTTP_Insecure_DelayRx(t *testing.T) { testServerHTTP(t, false, false) }
func TestServerHTTP_Secure_DelayRx(t *testing.T) { testServerHTTP(t, true, false) }
func testServerHTTP(t *testing.T, secure, delayTx bool) {
lg := zaptest.NewLogger(t)
scheme := "tcp"
ln1, ln2 := listen(t, scheme, "localhost:0", transport.TLSInfo{}), listen(t, scheme, "localhost:0", transport.TLSInfo{})
srcAddr, dstAddr := ln1.Addr().String(), ln2.Addr().String()
ln1.Close()
ln2.Close()
mux := http.NewServeMux()
mux.HandleFunc("/hello", func(w http.ResponseWriter, req *http.Request) {
d, err := io.ReadAll(req.Body)
req.Body.Close()
if err != nil {
t.Fatal(err)
}
if _, err = w.Write([]byte(fmt.Sprintf("%q(confirmed)", string(d)))); err != nil {
t.Fatal(err)
}
})
tlsInfo := createTLSInfo(lg, secure)
var tlsConfig *tls.Config
if secure {
_, err := tlsInfo.ServerConfig()
if err != nil {
t.Fatal(err)
}
}
srv := &http.Server{
Addr: dstAddr,
Handler: mux,
TLSConfig: tlsConfig,
ErrorLog: log.New(io.Discard, "net/http", 0),
}
donec := make(chan struct{})
defer func() {
srv.Close()
<-donec
}()
go func() {
if !secure {
srv.ListenAndServe()
} else {
srv.ListenAndServeTLS(tlsInfo.CertFile, tlsInfo.KeyFile)
}
defer close(donec)
}()
time.Sleep(200 * time.Millisecond)
cfg := ServerConfig{
Logger: lg,
From: url.URL{Scheme: scheme, Host: srcAddr},
To: url.URL{Scheme: scheme, Host: dstAddr},
}
if secure {
cfg.TLSInfo = tlsInfo
}
p := NewServer(cfg)
<-p.Ready()
defer func() {
lg.Info("closing Proxy server...")
p.Close()
lg.Info("closed Proxy server.")
}()
data := "Hello World!"
var resp *http.Response
var err error
now := time.Now()
if secure {
tp, terr := transport.NewTransport(tlsInfo, 3*time.Second)
assert.NoError(t, terr)
cli := &http.Client{Transport: tp}
resp, err = cli.Post("https://"+srcAddr+"/hello", "", strings.NewReader(data))
defer cli.CloseIdleConnections()
defer tp.CloseIdleConnections()
} else {
resp, err = http.Post("http://"+srcAddr+"/hello", "", strings.NewReader(data))
defer http.DefaultClient.CloseIdleConnections()
}
assert.NoError(t, err)
d, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
took1 := time.Since(now)
t.Logf("took %v with no latency", took1)
rs1 := string(d)
exp := fmt.Sprintf("%q(confirmed)", data)
if rs1 != exp {
t.Fatalf("got %q, expected %q", rs1, exp)
}
lat, rv := 100*time.Millisecond, 10*time.Millisecond
if delayTx {
p.DelayTx(lat, rv)
defer p.UndelayTx()
} else {
p.DelayRx(lat, rv)
defer p.UndelayRx()
}
now = time.Now()
if secure {
tp, terr := transport.NewTransport(tlsInfo, 3*time.Second)
if terr != nil {
t.Fatal(terr)
}
cli := &http.Client{Transport: tp}
resp, err = cli.Post("https://"+srcAddr+"/hello", "", strings.NewReader(data))
defer cli.CloseIdleConnections()
defer tp.CloseIdleConnections()
} else {
resp, err = http.Post("http://"+srcAddr+"/hello", "", strings.NewReader(data))
defer http.DefaultClient.CloseIdleConnections()
}
if err != nil {
t.Fatal(err)
}
d, err = io.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
took2 := time.Since(now)
t.Logf("took %v with latency %v±%v", took2, lat, rv)
rs2 := string(d)
if rs2 != exp {
t.Fatalf("got %q, expected %q", rs2, exp)
}
if took1 > took2 {
t.Fatalf("expected took1 %v < took2 %v", took1, took2)
}
}
func newUnixAddr() string {
now := time.Now().UnixNano()
rand.Seed(now)
addr := fmt.Sprintf("%X%X.unix-conn", now, rand.Intn(35000))
os.RemoveAll(addr)
return addr
}
func listen(t *testing.T, scheme, addr string, tlsInfo transport.TLSInfo) (ln net.Listener) {
var err error
if !tlsInfo.Empty() {
ln, err = transport.NewListener(addr, scheme, &tlsInfo)
} else {
ln, err = net.Listen(scheme, addr)
}
if err != nil {
t.Fatal(err)
}
return ln
}
func send(t *testing.T, data []byte, scheme, addr string, tlsInfo transport.TLSInfo) {
var out net.Conn
var err error
if !tlsInfo.Empty() {
tp, terr := transport.NewTransport(tlsInfo, 3*time.Second)
if terr != nil {
t.Fatal(terr)
}
out, err = tp.DialContext(context.Background(), scheme, addr)
} else {
out, err = net.Dial(scheme, addr)
}
if err != nil {
t.Fatal(err)
}
if _, err = out.Write(data); err != nil {
t.Fatal(err)
}
if err = out.Close(); err != nil {
t.Fatal(err)
}
}
func receive(t *testing.T, ln net.Listener) (data []byte) {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
for {
in, err := ln.Accept()
if err != nil {
t.Fatal(err)
}
var n int64
n, err = buf.ReadFrom(in)
if err != nil {
t.Fatal(err)
}
if n > 0 {
break
}
}
return buf.Bytes()
}
| pkg/proxy/server_test.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.9631308913230896,
0.015402008779346943,
0.00016548162966500968,
0.00018216467287857085,
0.115863136947155
] |
{
"id": 9,
"code_window": [
"\n",
"\terr := <-errc\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\twc := s.SyncUpdates(ctx)\n",
"\n",
"\tfor wr := range wc {\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terr := <-errc\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 163
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"context"
"errors"
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/bgentry/speakeasy"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/mirror"
"github.com/spf13/cobra"
)
var (
mminsecureTr bool
mmcert string
mmkey string
mmcacert string
mmprefix string
mmdestprefix string
mmuser string
mmpassword string
mmnodestprefix bool
)
// NewMakeMirrorCommand returns the cobra command for "makeMirror".
func NewMakeMirrorCommand() *cobra.Command {
c := &cobra.Command{
Use: "make-mirror [options] <destination>",
Short: "Makes a mirror at the destination etcd cluster",
Run: makeMirrorCommandFunc,
}
c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror")
c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster")
c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster")
c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster")
c.Flags().StringVar(&mmkey, "dest-key", "", "Identify secure client using this TLS key file")
c.Flags().StringVar(&mmcacert, "dest-cacert", "", "Verify certificates of TLS enabled secure servers using this CA bundle")
// TODO: secure by default when etcd enables secure gRPC by default.
c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "Disable transport security for client connections")
c.Flags().StringVar(&mmuser, "dest-user", "", "Destination username[:password] for authentication (prompt if password is not supplied)")
c.Flags().StringVar(&mmpassword, "dest-password", "", "Destination password for authentication (if this option is used, --user option shouldn't include password)")
return c
}
func authDestCfg() *authCfg {
if mmuser == "" {
return nil
}
var cfg authCfg
if mmpassword == "" {
splitted := strings.SplitN(mmuser, ":", 2)
if len(splitted) < 2 {
var err error
cfg.username = mmuser
cfg.password, err = speakeasy.Ask("Destination Password: ")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
} else {
cfg.username = splitted[0]
cfg.password = splitted[1]
}
} else {
cfg.username = mmuser
cfg.password = mmpassword
}
return &cfg
}
func makeMirrorCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("make-mirror takes one destination argument"))
}
dialTimeout := dialTimeoutFromCmd(cmd)
keepAliveTime := keepAliveTimeFromCmd(cmd)
keepAliveTimeout := keepAliveTimeoutFromCmd(cmd)
sec := &secureCfg{
cert: mmcert,
key: mmkey,
cacert: mmcacert,
insecureTransport: mminsecureTr,
}
auth := authDestCfg()
cc := &clientConfig{
endpoints: []string{args[0]},
dialTimeout: dialTimeout,
keepAliveTime: keepAliveTime,
keepAliveTimeout: keepAliveTimeout,
scfg: sec,
acfg: auth,
}
dc := cc.mustClient()
c := mustClientFromCmd(cmd)
err := makeMirror(context.TODO(), c, dc)
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) error {
total := int64(0)
// if destination prefix is specified and remove destination prefix is true return error
if mmnodestprefix && len(mmdestprefix) > 0 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one"))
}
go func() {
for {
time.Sleep(30 * time.Second)
fmt.Println(atomic.LoadInt64(&total))
}
}()
s := mirror.NewSyncer(c, mmprefix, 0)
rc, errc := s.SyncBase(ctx)
// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix
if !mmnodestprefix && len(mmdestprefix) == 0 {
mmdestprefix = mmprefix
}
for r := range rc {
for _, kv := range r.Kvs {
_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))
if err != nil {
return err
}
atomic.AddInt64(&total, 1)
}
}
err := <-errc
if err != nil {
return err
}
wc := s.SyncUpdates(ctx)
for wr := range wc {
if wr.CompactRevision != 0 {
return rpctypes.ErrCompacted
}
var lastRev int64
ops := []clientv3.Op{}
for _, ev := range wr.Events {
nextRev := ev.Kv.ModRevision
if lastRev != 0 && nextRev > lastRev {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
ops = []clientv3.Op{}
}
lastRev = nextRev
switch ev.Type {
case mvccpb.PUT:
ops = append(ops, clientv3.OpPut(modifyPrefix(string(ev.Kv.Key)), string(ev.Kv.Value)))
atomic.AddInt64(&total, 1)
case mvccpb.DELETE:
ops = append(ops, clientv3.OpDelete(modifyPrefix(string(ev.Kv.Key))))
atomic.AddInt64(&total, 1)
default:
panic("unexpected event type")
}
}
if len(ops) != 0 {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
}
}
return nil
}
func modifyPrefix(key string) string {
return strings.Replace(key, mmprefix, mmdestprefix, 1)
}
| etcdctl/ctlv3/command/make_mirror_command.go | 1 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.9972359538078308,
0.09099552780389786,
0.00016207300359383225,
0.0001754870027070865,
0.2863484025001526
] |
{
"id": 9,
"code_window": [
"\n",
"\terr := <-errc\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\twc := s.SyncUpdates(ctx)\n",
"\n",
"\tfor wr := range wc {\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terr := <-errc\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 163
} | // Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package v3lock provides a v3 locking service from an etcdserver.
package v3lock
| server/etcdserver/api/v3lock/doc.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0001784194028005004,
0.00017780956113711,
0.0001771997194737196,
0.00017780956113711,
6.09841663390398e-7
] |
{
"id": 9,
"code_window": [
"\n",
"\terr := <-errc\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\twc := s.SyncUpdates(ctx)\n",
"\n",
"\tfor wr := range wc {\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terr := <-errc\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 163
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package idutil implements utility functions for generating unique,
// randomized ids.
package idutil
import (
"math"
"sync/atomic"
"time"
)
const (
tsLen = 5 * 8
cntLen = 8
suffixLen = tsLen + cntLen
)
// Generator generates unique identifiers based on counters, timestamps, and
// a node member ID.
//
// The initial id is in this format:
// High order 2 bytes are from memberID, next 5 bytes are from timestamp,
// and low order one byte is a counter.
// | prefix | suffix |
// | 2 bytes | 5 bytes | 1 byte |
// | memberID | timestamp | cnt |
//
// The timestamp 5 bytes is different when the machine is restart
// after 1 ms and before 35 years.
//
// It increases suffix to generate the next id.
// The count field may overflow to timestamp field, which is intentional.
// It helps to extend the event window to 2^56. This doesn't break that
// id generated after restart is unique because etcd throughput is <<
// 256req/ms(250k reqs/second).
type Generator struct {
// high order 2 bytes
prefix uint64
// low order 6 bytes
suffix uint64
}
func NewGenerator(memberID uint16, now time.Time) *Generator {
prefix := uint64(memberID) << suffixLen
unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond)
suffix := lowbit(unixMilli, tsLen) << cntLen
return &Generator{
prefix: prefix,
suffix: suffix,
}
}
// Next generates a id that is unique.
func (g *Generator) Next() uint64 {
suffix := atomic.AddUint64(&g.suffix, 1)
id := g.prefix | lowbit(suffix, suffixLen)
return id
}
func lowbit(x uint64, n uint) uint64 {
return x & (math.MaxUint64 >> (64 - n))
}
| pkg/idutil/id.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00017844136164058,
0.00017073258641175926,
0.000165905526955612,
0.00016956086619757116,
0.0000041860803321469575
] |
{
"id": 9,
"code_window": [
"\n",
"\terr := <-errc\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\n",
"\twc := s.SyncUpdates(ctx)\n",
"\n",
"\tfor wr := range wc {\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terr := <-errc\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n"
],
"file_path": "etcdctl/ctlv3/command/make_mirror_command.go",
"type": "replace",
"edit_start_line_idx": 163
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v2http
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path"
"sort"
"strings"
"testing"
"go.etcd.io/etcd/server/v3/etcdserver/api"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2auth"
"go.uber.org/zap"
)
const goodPassword = "good"
func mustJSONRequest(t *testing.T, method string, p string, body string) *http.Request {
req, err := http.NewRequest(method, path.Join(authPrefix, p), strings.NewReader(body))
if err != nil {
t.Fatalf("Error making JSON request: %s %s %s\n", method, p, body)
}
req.Header.Set("Content-Type", "application/json")
return req
}
type mockAuthStore struct {
users map[string]*v2auth.User
roles map[string]*v2auth.Role
err error
enabled bool
}
func (s *mockAuthStore) AllUsers() ([]string, error) {
var us []string
for u := range s.users {
us = append(us, u)
}
sort.Strings(us)
return us, s.err
}
func (s *mockAuthStore) GetUser(name string) (v2auth.User, error) {
u, ok := s.users[name]
if !ok {
return v2auth.User{}, s.err
}
return *u, s.err
}
func (s *mockAuthStore) CreateOrUpdateUser(user v2auth.User) (out v2auth.User, created bool, err error) {
if s.users == nil {
out, err = s.CreateUser(user)
return out, true, err
}
out, err = s.UpdateUser(user)
return out, false, err
}
func (s *mockAuthStore) CreateUser(user v2auth.User) (v2auth.User, error) { return user, s.err }
func (s *mockAuthStore) DeleteUser(name string) error { return s.err }
func (s *mockAuthStore) UpdateUser(user v2auth.User) (v2auth.User, error) {
return *s.users[user.User], s.err
}
func (s *mockAuthStore) AllRoles() ([]string, error) {
return []string{"awesome", "guest", "root"}, s.err
}
func (s *mockAuthStore) GetRole(name string) (v2auth.Role, error) {
r, ok := s.roles[name]
if ok {
return *r, s.err
}
return v2auth.Role{}, fmt.Errorf("%q does not exist (%v)", name, s.err)
}
func (s *mockAuthStore) CreateRole(role v2auth.Role) error { return s.err }
func (s *mockAuthStore) DeleteRole(name string) error { return s.err }
func (s *mockAuthStore) UpdateRole(role v2auth.Role) (v2auth.Role, error) {
return *s.roles[role.Role], s.err
}
func (s *mockAuthStore) AuthEnabled() bool { return s.enabled }
func (s *mockAuthStore) EnableAuth() error { return s.err }
func (s *mockAuthStore) DisableAuth() error { return s.err }
func (s *mockAuthStore) CheckPassword(user v2auth.User, password string) bool {
return user.Password == password
}
func (s *mockAuthStore) HashPassword(password string) (string, error) {
return password, nil
}
func TestAuthFlow(t *testing.T) {
api.EnableCapability(api.AuthCapability)
var testCases = []struct {
req *http.Request
store mockAuthStore
wcode int
wbody string
}{
{
req: mustJSONRequest(t, "PUT", "users/alice", `{{{{{{{`),
store: mockAuthStore{},
wcode: http.StatusBadRequest,
wbody: `{"message":"Invalid JSON in request body."}`,
},
{
req: mustJSONRequest(t, "PUT", "users/alice", `{"user": "alice", "password": "goodpassword"}`),
store: mockAuthStore{enabled: true},
wcode: http.StatusUnauthorized,
wbody: `{"message":"Insufficient credentials"}`,
},
// Users
{
req: mustJSONRequest(t, "GET", "users", ""),
store: mockAuthStore{
users: map[string]*v2auth.User{
"alice": {
User: "alice",
Roles: []string{"alicerole", "guest"},
Password: "wheeee",
},
"bob": {
User: "bob",
Roles: []string{"guest"},
Password: "wheeee",
},
"root": {
User: "root",
Roles: []string{"root"},
Password: "wheeee",
},
},
roles: map[string]*v2auth.Role{
"alicerole": {
Role: "alicerole",
},
"guest": {
Role: "guest",
},
"root": {
Role: "root",
},
},
},
wcode: http.StatusOK,
wbody: `{"users":[` +
`{"user":"alice","roles":[` +
`{"role":"alicerole","permissions":{"kv":{"read":null,"write":null}}},` +
`{"role":"guest","permissions":{"kv":{"read":null,"write":null}}}` +
`]},` +
`{"user":"bob","roles":[{"role":"guest","permissions":{"kv":{"read":null,"write":null}}}]},` +
`{"user":"root","roles":[{"role":"root","permissions":{"kv":{"read":null,"write":null}}}]}]}`,
},
{
req: mustJSONRequest(t, "GET", "users/alice", ""),
store: mockAuthStore{
users: map[string]*v2auth.User{
"alice": {
User: "alice",
Roles: []string{"alicerole"},
Password: "wheeee",
},
},
roles: map[string]*v2auth.Role{
"alicerole": {
Role: "alicerole",
},
},
},
wcode: http.StatusOK,
wbody: `{"user":"alice","roles":[{"role":"alicerole","permissions":{"kv":{"read":null,"write":null}}}]}`,
},
{
req: mustJSONRequest(t, "PUT", "users/alice", `{"user": "alice", "password": "goodpassword"}`),
store: mockAuthStore{},
wcode: http.StatusCreated,
wbody: `{"user":"alice","roles":null}`,
},
{
req: mustJSONRequest(t, "DELETE", "users/alice", ``),
store: mockAuthStore{},
wcode: http.StatusOK,
wbody: ``,
},
{
req: mustJSONRequest(t, "PUT", "users/alice", `{"user": "alice", "password": "goodpassword"}`),
store: mockAuthStore{
users: map[string]*v2auth.User{
"alice": {
User: "alice",
Roles: []string{"alicerole", "guest"},
Password: "wheeee",
},
},
},
wcode: http.StatusOK,
wbody: `{"user":"alice","roles":["alicerole","guest"]}`,
},
{
req: mustJSONRequest(t, "PUT", "users/alice", `{"user": "alice", "grant": ["alicerole"]}`),
store: mockAuthStore{
users: map[string]*v2auth.User{
"alice": {
User: "alice",
Roles: []string{"alicerole", "guest"},
Password: "wheeee",
},
},
},
wcode: http.StatusOK,
wbody: `{"user":"alice","roles":["alicerole","guest"]}`,
},
{
req: mustJSONRequest(t, "GET", "users/alice", ``),
store: mockAuthStore{
users: map[string]*v2auth.User{},
err: v2auth.Error{Status: http.StatusNotFound, Errmsg: "auth: User alice doesn't exist."},
},
wcode: http.StatusNotFound,
wbody: `{"message":"auth: User alice doesn't exist."}`,
},
{
req: mustJSONRequest(t, "GET", "roles/manager", ""),
store: mockAuthStore{
roles: map[string]*v2auth.Role{
"manager": {
Role: "manager",
},
},
},
wcode: http.StatusOK,
wbody: `{"role":"manager","permissions":{"kv":{"read":null,"write":null}}}`,
},
{
req: mustJSONRequest(t, "DELETE", "roles/manager", ``),
store: mockAuthStore{},
wcode: http.StatusOK,
wbody: ``,
},
{
req: mustJSONRequest(t, "PUT", "roles/manager", `{"role":"manager","permissions":{"kv":{"read":[],"write":[]}}}`),
store: mockAuthStore{},
wcode: http.StatusCreated,
wbody: `{"role":"manager","permissions":{"kv":{"read":[],"write":[]}}}`,
},
{
req: mustJSONRequest(t, "PUT", "roles/manager", `{"role":"manager","revoke":{"kv":{"read":["foo"],"write":[]}}}`),
store: mockAuthStore{
roles: map[string]*v2auth.Role{
"manager": {
Role: "manager",
},
},
},
wcode: http.StatusOK,
wbody: `{"role":"manager","permissions":{"kv":{"read":null,"write":null}}}`,
},
{
req: mustJSONRequest(t, "GET", "roles", ""),
store: mockAuthStore{
roles: map[string]*v2auth.Role{
"awesome": {
Role: "awesome",
},
"guest": {
Role: "guest",
},
"root": {
Role: "root",
},
},
},
wcode: http.StatusOK,
wbody: `{"roles":[{"role":"awesome","permissions":{"kv":{"read":null,"write":null}}},` +
`{"role":"guest","permissions":{"kv":{"read":null,"write":null}}},` +
`{"role":"root","permissions":{"kv":{"read":null,"write":null}}}]}`,
},
{
req: mustJSONRequest(t, "GET", "enable", ""),
store: mockAuthStore{
enabled: true,
},
wcode: http.StatusOK,
wbody: `{"enabled":true}`,
},
{
req: mustJSONRequest(t, "PUT", "enable", ""),
store: mockAuthStore{
enabled: false,
},
wcode: http.StatusOK,
wbody: ``,
},
{
req: (func() *http.Request {
req := mustJSONRequest(t, "DELETE", "enable", "")
req.SetBasicAuth("root", "good")
return req
})(),
store: mockAuthStore{
enabled: true,
users: map[string]*v2auth.User{
"root": {
User: "root",
Password: goodPassword,
Roles: []string{"root"},
},
},
roles: map[string]*v2auth.Role{
"root": {
Role: "root",
},
},
},
wcode: http.StatusOK,
wbody: ``,
},
{
req: (func() *http.Request {
req := mustJSONRequest(t, "DELETE", "enable", "")
req.SetBasicAuth("root", "bad")
return req
})(),
store: mockAuthStore{
enabled: true,
users: map[string]*v2auth.User{
"root": {
User: "root",
Password: goodPassword,
Roles: []string{"root"},
},
},
roles: map[string]*v2auth.Role{
"root": {
Role: "guest",
},
},
},
wcode: http.StatusUnauthorized,
wbody: `{"message":"Insufficient credentials"}`,
},
}
for i, tt := range testCases {
mux := http.NewServeMux()
h := &authHandler{
lg: zap.NewExample(),
sec: &tt.store,
cluster: &fakeCluster{id: 1},
}
handleAuth(mux, h)
rw := httptest.NewRecorder()
mux.ServeHTTP(rw, tt.req)
if rw.Code != tt.wcode {
t.Errorf("#%d: got code=%d, want %d", i, rw.Code, tt.wcode)
}
g := rw.Body.String()
g = strings.TrimSpace(g)
if g != tt.wbody {
t.Errorf("#%d: got body=%s, want %s", i, g, tt.wbody)
}
}
}
func TestGetUserGrantedWithNonexistingRole(t *testing.T) {
sh := &authHandler{
sec: &mockAuthStore{
users: map[string]*v2auth.User{
"root": {
User: "root",
Roles: []string{"root", "foo"},
},
},
roles: map[string]*v2auth.Role{
"root": {
Role: "root",
},
},
},
cluster: &fakeCluster{id: 1},
}
srv := httptest.NewServer(http.HandlerFunc(sh.baseUsers))
defer srv.Close()
req, err := http.NewRequest("GET", "", nil)
if err != nil {
t.Fatal(err)
}
req.URL, err = url.Parse(srv.URL)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", "application/json")
cli := http.DefaultClient
resp, err := cli.Do(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
var uc usersCollections
if err := json.NewDecoder(resp.Body).Decode(&uc); err != nil {
t.Fatal(err)
}
if len(uc.Users) != 1 {
t.Fatalf("expected 1 user, got %+v", uc.Users)
}
if uc.Users[0].User != "root" {
t.Fatalf("expected 'root', got %q", uc.Users[0].User)
}
if len(uc.Users[0].Roles) != 1 {
t.Fatalf("expected 1 role, got %+v", uc.Users[0].Roles)
}
if uc.Users[0].Roles[0].Role != "root" {
t.Fatalf("expected 'root', got %q", uc.Users[0].Roles[0].Role)
}
}
func mustAuthRequest(username, password string) *http.Request {
req, err := http.NewRequest(http.MethodGet, "path", strings.NewReader(""))
if err != nil {
panic("Cannot make auth request: " + err.Error())
}
req.SetBasicAuth(username, password)
return req
}
func unauthedRequest() *http.Request {
req, err := http.NewRequest(http.MethodGet, "path", strings.NewReader(""))
if err != nil {
panic("Cannot make request: " + err.Error())
}
return req
}
func tlsAuthedRequest(req *http.Request, certname string) *http.Request {
bytes, err := os.ReadFile(fmt.Sprintf("testdata/%s.pem", certname))
if err != nil {
panic(err)
}
block, _ := pem.Decode(bytes)
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
panic(err)
}
req.TLS = &tls.ConnectionState{
VerifiedChains: [][]*x509.Certificate{{cert}},
}
return req
}
func TestPrefixAccess(t *testing.T) {
var table = []struct {
key string
req *http.Request
store *mockAuthStore
hasRoot bool
hasKeyPrefixAccess bool
hasRecursiveAccess bool
}{
{
key: "/foo",
req: mustAuthRequest("root", "good"),
store: &mockAuthStore{
users: map[string]*v2auth.User{
"root": {
User: "root",
Password: goodPassword,
Roles: []string{"root"},
},
},
roles: map[string]*v2auth.Role{
"root": {
Role: "root",
},
},
enabled: true,
},
hasRoot: true,
hasKeyPrefixAccess: true,
hasRecursiveAccess: true,
},
{
key: "/foo",
req: mustAuthRequest("user", "good"),
store: &mockAuthStore{
users: map[string]*v2auth.User{
"user": {
User: "user",
Password: goodPassword,
Roles: []string{"foorole"},
},
},
roles: map[string]*v2auth.Role{
"foorole": {
Role: "foorole",
Permissions: v2auth.Permissions{
KV: v2auth.RWPermission{
Read: []string{"/foo"},
Write: []string{"/foo"},
},
},
},
},
enabled: true,
},
hasRoot: false,
hasKeyPrefixAccess: true,
hasRecursiveAccess: false,
},
{
key: "/foo",
req: mustAuthRequest("user", "good"),
store: &mockAuthStore{
users: map[string]*v2auth.User{
"user": {
User: "user",
Password: goodPassword,
Roles: []string{"foorole"},
},
},
roles: map[string]*v2auth.Role{
"foorole": {
Role: "foorole",
Permissions: v2auth.Permissions{
KV: v2auth.RWPermission{
Read: []string{"/foo*"},
Write: []string{"/foo*"},
},
},
},
},
enabled: true,
},
hasRoot: false,
hasKeyPrefixAccess: true,
hasRecursiveAccess: true,
},
{
key: "/foo",
req: mustAuthRequest("user", "bad"),
store: &mockAuthStore{
users: map[string]*v2auth.User{
"user": {
User: "user",
Password: goodPassword,
Roles: []string{"foorole"},
},
},
roles: map[string]*v2auth.Role{
"foorole": {
Role: "foorole",
Permissions: v2auth.Permissions{
KV: v2auth.RWPermission{
Read: []string{"/foo*"},
Write: []string{"/foo*"},
},
},
},
},
enabled: true,
},
hasRoot: false,
hasKeyPrefixAccess: false,
hasRecursiveAccess: false,
},
{
key: "/foo",
req: mustAuthRequest("user", "good"),
store: &mockAuthStore{
users: map[string]*v2auth.User{},
err: errors.New("Not the user"),
enabled: true,
},
hasRoot: false,
hasKeyPrefixAccess: false,
hasRecursiveAccess: false,
},
{
key: "/foo",
req: mustJSONRequest(t, "GET", "somepath", ""),
store: &mockAuthStore{
users: map[string]*v2auth.User{
"user": {
User: "user",
Password: goodPassword,
Roles: []string{"foorole"},
},
},
roles: map[string]*v2auth.Role{
"guest": {
Role: "guest",
Permissions: v2auth.Permissions{
KV: v2auth.RWPermission{
Read: []string{"/foo*"},
Write: []string{"/foo*"},
},
},
},
},
enabled: true,
},
hasRoot: false,
hasKeyPrefixAccess: true,
hasRecursiveAccess: true,
},
{
key: "/bar",
req: mustJSONRequest(t, "GET", "somepath", ""),
store: &mockAuthStore{
users: map[string]*v2auth.User{
"user": {
User: "user",
Password: goodPassword,
Roles: []string{"foorole"},
},
},
roles: map[string]*v2auth.Role{
"guest": {
Role: "guest",
Permissions: v2auth.Permissions{
KV: v2auth.RWPermission{
Read: []string{"/foo*"},
Write: []string{"/foo*"},
},
},
},
},
enabled: true,
},
hasRoot: false,
hasKeyPrefixAccess: false,
hasRecursiveAccess: false,
},
// check access for multiple roles
{
key: "/foo",
req: mustAuthRequest("user", "good"),
store: &mockAuthStore{
users: map[string]*v2auth.User{
"user": {
User: "user",
Password: goodPassword,
Roles: []string{"role1", "role2"},
},
},
roles: map[string]*v2auth.Role{
"role1": {
Role: "role1",
},
"role2": {
Role: "role2",
Permissions: v2auth.Permissions{
KV: v2auth.RWPermission{
Read: []string{"/foo"},
Write: []string{"/foo"},
},
},
},
},
enabled: true,
},
hasRoot: false,
hasKeyPrefixAccess: true,
hasRecursiveAccess: false,
},
{
key: "/foo",
req: (func() *http.Request {
req := mustJSONRequest(t, "GET", "somepath", "")
req.Header.Set("Authorization", "malformedencoding")
return req
})(),
store: &mockAuthStore{
enabled: true,
users: map[string]*v2auth.User{
"root": {
User: "root",
Password: goodPassword,
Roles: []string{"root"},
},
},
roles: map[string]*v2auth.Role{
"guest": {
Role: "guest",
Permissions: v2auth.Permissions{
KV: v2auth.RWPermission{
Read: []string{"/foo*"},
Write: []string{"/foo*"},
},
},
},
},
},
hasRoot: false,
hasKeyPrefixAccess: false,
hasRecursiveAccess: false,
},
{ // guest access in non-TLS mode
key: "/foo",
req: (func() *http.Request {
return mustJSONRequest(t, "GET", "somepath", "")
})(),
store: &mockAuthStore{
enabled: true,
users: map[string]*v2auth.User{
"root": {
User: "root",
Password: goodPassword,
Roles: []string{"root"},
},
},
roles: map[string]*v2auth.Role{
"guest": {
Role: "guest",
Permissions: v2auth.Permissions{
KV: v2auth.RWPermission{
Read: []string{"/foo*"},
Write: []string{"/foo*"},
},
},
},
},
},
hasRoot: false,
hasKeyPrefixAccess: true,
hasRecursiveAccess: true,
},
}
for i, tt := range table {
if tt.hasRoot != hasRootAccess(zap.NewExample(), tt.store, tt.req, true) {
t.Errorf("#%d: hasRoot doesn't match (expected %v)", i, tt.hasRoot)
}
if tt.hasKeyPrefixAccess != hasKeyPrefixAccess(zap.NewExample(), tt.store, tt.req, tt.key, false, true) {
t.Errorf("#%d: hasKeyPrefixAccess doesn't match (expected %v)", i, tt.hasRoot)
}
if tt.hasRecursiveAccess != hasKeyPrefixAccess(zap.NewExample(), tt.store, tt.req, tt.key, true, true) {
t.Errorf("#%d: hasRecursiveAccess doesn't match (expected %v)", i, tt.hasRoot)
}
}
}
func TestUserFromClientCertificate(t *testing.T) {
witherror := &mockAuthStore{
users: map[string]*v2auth.User{
"user": {
User: "user",
Roles: []string{"root"},
Password: "password",
},
"basicauth": {
User: "basicauth",
Roles: []string{"root"},
Password: "password",
},
},
roles: map[string]*v2auth.Role{
"root": {
Role: "root",
},
},
err: errors.New(""),
}
noerror := &mockAuthStore{
users: map[string]*v2auth.User{
"user": {
User: "user",
Roles: []string{"root"},
Password: "password",
},
"basicauth": {
User: "basicauth",
Roles: []string{"root"},
Password: "password",
},
},
roles: map[string]*v2auth.Role{
"root": {
Role: "root",
},
},
}
var table = []struct {
req *http.Request
userExists bool
store v2auth.Store
username string
}{
{
// non tls request
req: unauthedRequest(),
userExists: false,
store: witherror,
},
{
// cert with cn of existing user
req: tlsAuthedRequest(unauthedRequest(), "user"),
userExists: true,
username: "user",
store: noerror,
},
{
// cert with cn of non-existing user
req: tlsAuthedRequest(unauthedRequest(), "otheruser"),
userExists: false,
store: witherror,
},
}
for i, tt := range table {
user := userFromClientCertificate(zap.NewExample(), tt.store, tt.req)
userExists := user != nil
if tt.userExists != userExists {
t.Errorf("#%d: userFromClientCertificate doesn't match (expected %v)", i, tt.userExists)
}
if user != nil && (tt.username != user.User) {
t.Errorf("#%d: userFromClientCertificate username doesn't match (expected %s, got %s)", i, tt.username, user.User)
}
}
}
func TestUserFromBasicAuth(t *testing.T) {
sec := &mockAuthStore{
users: map[string]*v2auth.User{
"user": {
User: "user",
Roles: []string{"root"},
Password: "password",
},
},
roles: map[string]*v2auth.Role{
"root": {
Role: "root",
},
},
}
var table = []struct {
username string
req *http.Request
userExists bool
}{
{
// valid user, valid pass
username: "user",
req: mustAuthRequest("user", "password"),
userExists: true,
},
{
// valid user, bad pass
username: "user",
req: mustAuthRequest("user", "badpass"),
userExists: false,
},
{
// valid user, no pass
username: "user",
req: mustAuthRequest("user", ""),
userExists: false,
},
{
// missing user
username: "missing",
req: mustAuthRequest("missing", "badpass"),
userExists: false,
},
{
// no basic auth
req: unauthedRequest(),
userExists: false,
},
}
for i, tt := range table {
user := userFromBasicAuth(zap.NewExample(), sec, tt.req)
userExists := user != nil
if tt.userExists != userExists {
t.Errorf("#%d: userFromBasicAuth doesn't match (expected %v)", i, tt.userExists)
}
if user != nil && (tt.username != user.User) {
t.Errorf("#%d: userFromBasicAuth username doesn't match (expected %s, got %s)", i, tt.username, user.User)
}
}
}
| server/etcdserver/api/v2http/client_auth_test.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0024162346962839365,
0.00020281928300391883,
0.0001648636389290914,
0.00016968735144473612,
0.00023581752611789852
] |
{
"id": 10,
"code_window": [
"func TestCtlV3MakeMirror(t *testing.T) { testCtl(t, makeMirrorTest) }\n",
"func TestCtlV3MakeMirrorModifyDestPrefix(t *testing.T) { testCtl(t, makeMirrorModifyDestPrefixTest) }\n",
"func TestCtlV3MakeMirrorNoDestPrefix(t *testing.T) { testCtl(t, makeMirrorNoDestPrefixTest) }\n",
"\n",
"func makeMirrorTest(cx ctlCtx) {\n",
"\tvar (\n",
"\t\tflags = []string{}\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func TestCtlV3MakeMirrorWithWatchRev(t *testing.T) { testCtl(t, makeMirrorWithWatchRev) }\n"
],
"file_path": "tests/e2e/ctl_v3_make_mirror_test.go",
"type": "add",
"edit_start_line_idx": 27
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"context"
"errors"
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/bgentry/speakeasy"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/mirror"
"github.com/spf13/cobra"
)
var (
mminsecureTr bool
mmcert string
mmkey string
mmcacert string
mmprefix string
mmdestprefix string
mmuser string
mmpassword string
mmnodestprefix bool
)
// NewMakeMirrorCommand returns the cobra command for "makeMirror".
func NewMakeMirrorCommand() *cobra.Command {
c := &cobra.Command{
Use: "make-mirror [options] <destination>",
Short: "Makes a mirror at the destination etcd cluster",
Run: makeMirrorCommandFunc,
}
c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror")
c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster")
c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster")
c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster")
c.Flags().StringVar(&mmkey, "dest-key", "", "Identify secure client using this TLS key file")
c.Flags().StringVar(&mmcacert, "dest-cacert", "", "Verify certificates of TLS enabled secure servers using this CA bundle")
// TODO: secure by default when etcd enables secure gRPC by default.
c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "Disable transport security for client connections")
c.Flags().StringVar(&mmuser, "dest-user", "", "Destination username[:password] for authentication (prompt if password is not supplied)")
c.Flags().StringVar(&mmpassword, "dest-password", "", "Destination password for authentication (if this option is used, --user option shouldn't include password)")
return c
}
func authDestCfg() *authCfg {
if mmuser == "" {
return nil
}
var cfg authCfg
if mmpassword == "" {
splitted := strings.SplitN(mmuser, ":", 2)
if len(splitted) < 2 {
var err error
cfg.username = mmuser
cfg.password, err = speakeasy.Ask("Destination Password: ")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
} else {
cfg.username = splitted[0]
cfg.password = splitted[1]
}
} else {
cfg.username = mmuser
cfg.password = mmpassword
}
return &cfg
}
func makeMirrorCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("make-mirror takes one destination argument"))
}
dialTimeout := dialTimeoutFromCmd(cmd)
keepAliveTime := keepAliveTimeFromCmd(cmd)
keepAliveTimeout := keepAliveTimeoutFromCmd(cmd)
sec := &secureCfg{
cert: mmcert,
key: mmkey,
cacert: mmcacert,
insecureTransport: mminsecureTr,
}
auth := authDestCfg()
cc := &clientConfig{
endpoints: []string{args[0]},
dialTimeout: dialTimeout,
keepAliveTime: keepAliveTime,
keepAliveTimeout: keepAliveTimeout,
scfg: sec,
acfg: auth,
}
dc := cc.mustClient()
c := mustClientFromCmd(cmd)
err := makeMirror(context.TODO(), c, dc)
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) error {
total := int64(0)
// if destination prefix is specified and remove destination prefix is true return error
if mmnodestprefix && len(mmdestprefix) > 0 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one"))
}
go func() {
for {
time.Sleep(30 * time.Second)
fmt.Println(atomic.LoadInt64(&total))
}
}()
s := mirror.NewSyncer(c, mmprefix, 0)
rc, errc := s.SyncBase(ctx)
// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix
if !mmnodestprefix && len(mmdestprefix) == 0 {
mmdestprefix = mmprefix
}
for r := range rc {
for _, kv := range r.Kvs {
_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))
if err != nil {
return err
}
atomic.AddInt64(&total, 1)
}
}
err := <-errc
if err != nil {
return err
}
wc := s.SyncUpdates(ctx)
for wr := range wc {
if wr.CompactRevision != 0 {
return rpctypes.ErrCompacted
}
var lastRev int64
ops := []clientv3.Op{}
for _, ev := range wr.Events {
nextRev := ev.Kv.ModRevision
if lastRev != 0 && nextRev > lastRev {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
ops = []clientv3.Op{}
}
lastRev = nextRev
switch ev.Type {
case mvccpb.PUT:
ops = append(ops, clientv3.OpPut(modifyPrefix(string(ev.Kv.Key)), string(ev.Kv.Value)))
atomic.AddInt64(&total, 1)
case mvccpb.DELETE:
ops = append(ops, clientv3.OpDelete(modifyPrefix(string(ev.Kv.Key))))
atomic.AddInt64(&total, 1)
default:
panic("unexpected event type")
}
}
if len(ops) != 0 {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
}
}
return nil
}
func modifyPrefix(key string) string {
return strings.Replace(key, mmprefix, mmdestprefix, 1)
}
| etcdctl/ctlv3/command/make_mirror_command.go | 1 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.016690392047166824,
0.0011736633023247123,
0.00016425874491687864,
0.00017104909056797624,
0.0035186512395739555
] |
{
"id": 10,
"code_window": [
"func TestCtlV3MakeMirror(t *testing.T) { testCtl(t, makeMirrorTest) }\n",
"func TestCtlV3MakeMirrorModifyDestPrefix(t *testing.T) { testCtl(t, makeMirrorModifyDestPrefixTest) }\n",
"func TestCtlV3MakeMirrorNoDestPrefix(t *testing.T) { testCtl(t, makeMirrorNoDestPrefixTest) }\n",
"\n",
"func makeMirrorTest(cx ctlCtx) {\n",
"\tvar (\n",
"\t\tflags = []string{}\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func TestCtlV3MakeMirrorWithWatchRev(t *testing.T) { testCtl(t, makeMirrorWithWatchRev) }\n"
],
"file_path": "tests/e2e/ctl_v3_make_mirror_test.go",
"type": "add",
"edit_start_line_idx": 27
} | // Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux
// +build linux
package main
import (
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
)
const downloadURL = `https://storage.googleapis.com/etcd/%s/etcd-%s-linux-amd64.tar.gz`
func install(ver, dir string) (string, error) {
ep := fmt.Sprintf(downloadURL, ver, ver)
resp, err := http.Get(ep)
if err != nil {
return "", err
}
defer resp.Body.Close()
d, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
tarPath := filepath.Join(dir, "etcd.tar.gz")
if err = os.WriteFile(tarPath, d, fileutil.PrivateFileMode); err != nil {
return "", err
}
// parametrizes to prevent attackers from adding arbitrary OS commands
if err = exec.Command("tar", "xzvf", tarPath, "-C", dir, "--strip-components=1").Run(); err != nil {
return "", err
}
return filepath.Join(dir, "etcd"), nil
}
| tools/etcd-dump-metrics/install_linux.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00017747230594977736,
0.0001715111284283921,
0.00016711327771190554,
0.00017102647689171135,
0.000003585244485293515
] |
{
"id": 10,
"code_window": [
"func TestCtlV3MakeMirror(t *testing.T) { testCtl(t, makeMirrorTest) }\n",
"func TestCtlV3MakeMirrorModifyDestPrefix(t *testing.T) { testCtl(t, makeMirrorModifyDestPrefixTest) }\n",
"func TestCtlV3MakeMirrorNoDestPrefix(t *testing.T) { testCtl(t, makeMirrorNoDestPrefixTest) }\n",
"\n",
"func makeMirrorTest(cx ctlCtx) {\n",
"\tvar (\n",
"\t\tflags = []string{}\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func TestCtlV3MakeMirrorWithWatchRev(t *testing.T) { testCtl(t, makeMirrorWithWatchRev) }\n"
],
"file_path": "tests/e2e/ctl_v3_make_mirror_test.go",
"type": "add",
"edit_start_line_idx": 27
} | // Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tester
import (
"context"
"errors"
"fmt"
"io"
"log"
"math/rand"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
"time"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/pkg/v3/debugutil"
"go.etcd.io/etcd/tests/v3/functional/rpcpb"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.uber.org/zap"
"golang.org/x/time/rate"
"google.golang.org/grpc"
)
// Cluster defines tester cluster.
type Cluster struct {
lg *zap.Logger
agentConns []*grpc.ClientConn
agentClients []rpcpb.TransportClient
agentStreams []rpcpb.Transport_TransportClient
agentRequests []*rpcpb.Request
testerHTTPServer *http.Server
Members []*rpcpb.Member `yaml:"agent-configs"`
Tester *rpcpb.Tester `yaml:"tester-config"`
cases []Case
rateLimiter *rate.Limiter
stresser Stresser
checkers []Checker
currentRevision int64
rd int
cs int
}
var dialOpts = []grpc.DialOption{
grpc.WithInsecure(),
grpc.WithTimeout(5 * time.Second),
grpc.WithBlock(),
}
// NewCluster creates a cluster from a tester configuration.
func NewCluster(lg *zap.Logger, fpath string) (*Cluster, error) {
clus, err := read(lg, fpath)
if err != nil {
return nil, err
}
clus.agentConns = make([]*grpc.ClientConn, len(clus.Members))
clus.agentClients = make([]rpcpb.TransportClient, len(clus.Members))
clus.agentStreams = make([]rpcpb.Transport_TransportClient, len(clus.Members))
clus.agentRequests = make([]*rpcpb.Request, len(clus.Members))
clus.cases = make([]Case, 0)
lg.Info("creating members")
for i, ap := range clus.Members {
var err error
clus.agentConns[i], err = grpc.Dial(ap.AgentAddr, dialOpts...)
if err != nil {
return nil, fmt.Errorf("cannot dial agent %v: %w", ap.AgentAddr, err)
}
clus.agentClients[i] = rpcpb.NewTransportClient(clus.agentConns[i])
lg.Info("connected", zap.String("agent-address", ap.AgentAddr))
clus.agentStreams[i], err = clus.agentClients[i].Transport(context.Background())
if err != nil {
return nil, err
}
lg.Info("created stream", zap.String("agent-address", ap.AgentAddr))
}
lg.Info("agents configured.")
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler())
if clus.Tester.EnablePprof {
for p, h := range debugutil.PProfHandlers() {
mux.Handle(p, h)
}
}
clus.testerHTTPServer = &http.Server{
Addr: clus.Tester.Addr,
Handler: mux,
ErrorLog: log.New(io.Discard, "net/http", 0),
}
go clus.serveTesterServer()
lg.Info("tester server started")
clus.rateLimiter = rate.NewLimiter(
rate.Limit(int(clus.Tester.StressQPS)),
int(clus.Tester.StressQPS),
)
clus.setStresserChecker()
return clus, nil
}
// EtcdClientEndpoints returns all etcd client endpoints.
func (clus *Cluster) EtcdClientEndpoints() (css []string) {
css = make([]string, len(clus.Members))
for i := range clus.Members {
css[i] = clus.Members[i].EtcdClientEndpoint
}
return css
}
func (clus *Cluster) serveTesterServer() {
clus.lg.Info(
"started tester HTTP server",
zap.String("tester-address", clus.Tester.Addr),
)
err := clus.testerHTTPServer.ListenAndServe()
clus.lg.Info(
"tester HTTP server returned",
zap.String("tester-address", clus.Tester.Addr),
zap.Error(err),
)
if err != nil && err != http.ErrServerClosed {
clus.lg.Fatal("tester HTTP errored", zap.Error(err))
}
}
func (clus *Cluster) updateCases() {
for _, cs := range clus.Tester.Cases {
switch cs {
case "SIGTERM_ONE_FOLLOWER":
clus.cases = append(clus.cases,
new_Case_SIGTERM_ONE_FOLLOWER(clus))
case "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.cases = append(clus.cases,
new_Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus))
case "SIGTERM_LEADER":
clus.cases = append(clus.cases,
new_Case_SIGTERM_LEADER(clus))
case "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.cases = append(clus.cases,
new_Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus))
case "SIGTERM_QUORUM":
clus.cases = append(clus.cases,
new_Case_SIGTERM_QUORUM(clus))
case "SIGTERM_ALL":
clus.cases = append(clus.cases,
new_Case_SIGTERM_ALL(clus))
case "SIGQUIT_AND_REMOVE_ONE_FOLLOWER":
clus.cases = append(clus.cases,
new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus))
case "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.cases = append(clus.cases,
new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus))
case "SIGQUIT_AND_REMOVE_LEADER":
clus.cases = append(clus.cases,
new_Case_SIGQUIT_AND_REMOVE_LEADER(clus))
case "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.cases = append(clus.cases,
new_Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus))
case "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH":
clus.cases = append(clus.cases,
new_Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus))
case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER":
clus.cases = append(clus.cases,
new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER(clus))
case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.cases = append(clus.cases,
new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT())
case "BLACKHOLE_PEER_PORT_TX_RX_LEADER":
clus.cases = append(clus.cases,
new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER(clus))
case "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.cases = append(clus.cases,
new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT())
case "BLACKHOLE_PEER_PORT_TX_RX_QUORUM":
clus.cases = append(clus.cases,
new_Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM(clus))
case "BLACKHOLE_PEER_PORT_TX_RX_ALL":
clus.cases = append(clus.cases,
new_Case_BLACKHOLE_PEER_PORT_TX_RX_ALL(clus))
case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus, true))
case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus, true))
case "DELAY_PEER_PORT_TX_RX_LEADER":
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_LEADER(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER":
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_LEADER(clus, true))
case "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus, true))
case "DELAY_PEER_PORT_TX_RX_QUORUM":
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_QUORUM(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM":
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_QUORUM(clus, true))
case "DELAY_PEER_PORT_TX_RX_ALL":
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_ALL(clus, false))
case "RANDOM_DELAY_PEER_PORT_TX_RX_ALL":
clus.cases = append(clus.cases,
new_Case_DELAY_PEER_PORT_TX_RX_ALL(clus, true))
case "NO_FAIL_WITH_STRESS":
clus.cases = append(clus.cases,
new_Case_NO_FAIL_WITH_STRESS(clus))
case "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS":
clus.cases = append(clus.cases,
new_Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS(clus))
case "EXTERNAL":
clus.cases = append(clus.cases,
new_Case_EXTERNAL(clus.Tester.ExternalExecPath))
case "FAILPOINTS":
fpFailures, fperr := failpointFailures(clus)
if len(fpFailures) == 0 {
clus.lg.Info("no failpoints found!", zap.Error(fperr))
}
clus.cases = append(clus.cases,
fpFailures...)
case "FAILPOINTS_WITH_DISK_IO_LATENCY":
fpFailures, fperr := failpointDiskIOFailures(clus)
if len(fpFailures) == 0 {
clus.lg.Info("no failpoints found!", zap.Error(fperr))
}
clus.cases = append(clus.cases,
fpFailures...)
}
}
}
func (clus *Cluster) listCases() (css []string) {
css = make([]string, len(clus.cases))
for i := range clus.cases {
css[i] = clus.cases[i].Desc()
}
return css
}
// UpdateDelayLatencyMs updates delay latency with random value
// within election timeout.
func (clus *Cluster) UpdateDelayLatencyMs() {
rand.Seed(time.Now().UnixNano())
clus.Tester.UpdatedDelayLatencyMs = uint32(rand.Int63n(clus.Members[0].Etcd.ElectionTimeoutMs))
minLatRv := clus.Tester.DelayLatencyMsRv + clus.Tester.DelayLatencyMsRv/5
if clus.Tester.UpdatedDelayLatencyMs <= minLatRv {
clus.Tester.UpdatedDelayLatencyMs += minLatRv
}
}
func (clus *Cluster) setStresserChecker() {
css := &compositeStresser{}
lss := []*leaseStresser{}
rss := []*runnerStresser{}
for _, m := range clus.Members {
sss := newStresser(clus, m)
css.stressers = append(css.stressers, &compositeStresser{sss})
for _, s := range sss {
if v, ok := s.(*leaseStresser); ok {
lss = append(lss, v)
clus.lg.Info("added lease stresser", zap.String("endpoint", m.EtcdClientEndpoint))
}
if v, ok := s.(*runnerStresser); ok {
rss = append(rss, v)
clus.lg.Info("added lease stresser", zap.String("endpoint", m.EtcdClientEndpoint))
}
}
}
clus.stresser = css
for _, cs := range clus.Tester.Checkers {
switch cs {
case "KV_HASH":
clus.checkers = append(clus.checkers, newKVHashChecker(clus))
case "LEASE_EXPIRE":
for _, ls := range lss {
clus.checkers = append(clus.checkers, newLeaseExpireChecker(ls))
}
case "RUNNER":
for _, rs := range rss {
clus.checkers = append(clus.checkers, newRunnerChecker(rs.etcdClientEndpoint, rs.errc))
}
case "NO_CHECK":
clus.checkers = append(clus.checkers, newNoChecker())
case "SHORT_TTL_LEASE_EXPIRE":
for _, ls := range lss {
clus.checkers = append(clus.checkers, newShortTTLLeaseExpireChecker(ls))
}
}
}
clus.lg.Info("updated stressers")
}
func (clus *Cluster) runCheckers(exceptions ...rpcpb.Checker) (err error) {
defer func() {
if err != nil {
return
}
if err = clus.updateRevision(); err != nil {
clus.lg.Warn(
"updateRevision failed",
zap.Error(err),
)
return
}
}()
exs := make(map[rpcpb.Checker]struct{})
for _, e := range exceptions {
exs[e] = struct{}{}
}
for _, chk := range clus.checkers {
clus.lg.Warn(
"consistency check START",
zap.String("checker", chk.Type().String()),
zap.Strings("client-endpoints", chk.EtcdClientEndpoints()),
)
err = chk.Check()
clus.lg.Warn(
"consistency check END",
zap.String("checker", chk.Type().String()),
zap.Strings("client-endpoints", chk.EtcdClientEndpoints()),
zap.Error(err),
)
if err != nil {
_, ok := exs[chk.Type()]
if !ok {
return err
}
clus.lg.Warn(
"consistency check SKIP FAIL",
zap.String("checker", chk.Type().String()),
zap.Strings("client-endpoints", chk.EtcdClientEndpoints()),
zap.Error(err),
)
}
}
return nil
}
// Send_INITIAL_START_ETCD bootstraps etcd cluster the very first time.
// After this, just continue to call kill/restart.
func (clus *Cluster) Send_INITIAL_START_ETCD() error {
// this is the only time that creates request from scratch
return clus.broadcast(rpcpb.Operation_INITIAL_START_ETCD)
}
// send_SIGQUIT_ETCD_AND_ARCHIVE_DATA sends "send_SIGQUIT_ETCD_AND_ARCHIVE_DATA" operation.
func (clus *Cluster) send_SIGQUIT_ETCD_AND_ARCHIVE_DATA() error {
return clus.broadcast(rpcpb.Operation_SIGQUIT_ETCD_AND_ARCHIVE_DATA)
}
// send_RESTART_ETCD sends restart operation.
func (clus *Cluster) send_RESTART_ETCD() error {
return clus.broadcast(rpcpb.Operation_RESTART_ETCD)
}
func (clus *Cluster) broadcast(op rpcpb.Operation) error {
var wg sync.WaitGroup
wg.Add(len(clus.agentStreams))
errc := make(chan error, len(clus.agentStreams))
for i := range clus.agentStreams {
go func(idx int, o rpcpb.Operation) {
defer wg.Done()
errc <- clus.sendOp(idx, o)
}(i, op)
}
wg.Wait()
close(errc)
errs := []string{}
for err := range errc {
if err == nil {
continue
}
if err != nil {
destroyed := false
if op == rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT {
if err == io.EOF {
destroyed = true
}
if strings.Contains(err.Error(),
"rpc error: code = Unavailable desc = transport is closing") {
// agent server has already closed;
// so this error is expected
destroyed = true
}
if strings.Contains(err.Error(),
"desc = os: process already finished") {
destroyed = true
}
}
if !destroyed {
errs = append(errs, err.Error())
}
}
}
if len(errs) == 0 {
return nil
}
return errors.New(strings.Join(errs, ", "))
}
func (clus *Cluster) sendOp(idx int, op rpcpb.Operation) error {
_, err := clus.sendOpWithResp(idx, op)
return err
}
func (clus *Cluster) sendOpWithResp(idx int, op rpcpb.Operation) (*rpcpb.Response, error) {
// maintain the initial member object
// throughout the test time
clus.agentRequests[idx] = &rpcpb.Request{
Operation: op,
Member: clus.Members[idx],
Tester: clus.Tester,
}
err := clus.agentStreams[idx].Send(clus.agentRequests[idx])
clus.lg.Info(
"sent request",
zap.String("operation", op.String()),
zap.String("to", clus.Members[idx].EtcdClientEndpoint),
zap.Error(err),
)
if err != nil {
return nil, err
}
resp, err := clus.agentStreams[idx].Recv()
if resp != nil {
clus.lg.Info(
"received response",
zap.String("operation", op.String()),
zap.String("from", clus.Members[idx].EtcdClientEndpoint),
zap.Bool("success", resp.Success),
zap.String("status", resp.Status),
zap.Error(err),
)
} else {
clus.lg.Info(
"received empty response",
zap.String("operation", op.String()),
zap.String("from", clus.Members[idx].EtcdClientEndpoint),
zap.Error(err),
)
}
if err != nil {
return nil, err
}
if !resp.Success {
return nil, errors.New(resp.Status)
}
m, secure := clus.Members[idx], false
for _, cu := range m.Etcd.AdvertiseClientURLs {
u, perr := url.Parse(cu)
if perr != nil {
return nil, perr
}
if u.Scheme == "https" { // TODO: handle unix
secure = true
}
}
// store TLS assets from agents/servers onto disk
if secure && (op == rpcpb.Operation_INITIAL_START_ETCD || op == rpcpb.Operation_RESTART_ETCD) {
dirClient := filepath.Join(
clus.Tester.DataDir,
clus.Members[idx].Etcd.Name,
"fixtures",
"client",
)
if err = fileutil.TouchDirAll(clus.lg, dirClient); err != nil {
return nil, err
}
clientCertData := []byte(resp.Member.ClientCertData)
if len(clientCertData) == 0 {
return nil, fmt.Errorf("got empty client cert from %q", m.EtcdClientEndpoint)
}
clientCertPath := filepath.Join(dirClient, "cert.pem")
if err = os.WriteFile(clientCertPath, clientCertData, 0644); err != nil { // overwrite if exists
return nil, err
}
resp.Member.ClientCertPath = clientCertPath
clus.lg.Info(
"saved client cert file",
zap.String("path", clientCertPath),
)
clientKeyData := []byte(resp.Member.ClientKeyData)
if len(clientKeyData) == 0 {
return nil, fmt.Errorf("got empty client key from %q", m.EtcdClientEndpoint)
}
clientKeyPath := filepath.Join(dirClient, "key.pem")
if err = os.WriteFile(clientKeyPath, clientKeyData, 0644); err != nil { // overwrite if exists
return nil, err
}
resp.Member.ClientKeyPath = clientKeyPath
clus.lg.Info(
"saved client key file",
zap.String("path", clientKeyPath),
)
clientTrustedCAData := []byte(resp.Member.ClientTrustedCAData)
if len(clientTrustedCAData) != 0 {
// TODO: disable this when auto TLS is deprecated
clientTrustedCAPath := filepath.Join(dirClient, "ca.pem")
if err = os.WriteFile(clientTrustedCAPath, clientTrustedCAData, 0644); err != nil { // overwrite if exists
return nil, err
}
resp.Member.ClientTrustedCAPath = clientTrustedCAPath
clus.lg.Info(
"saved client trusted CA file",
zap.String("path", clientTrustedCAPath),
)
}
// no need to store peer certs for tester clients
clus.Members[idx] = resp.Member
}
return resp, nil
}
// Send_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT terminates all tester connections to agents and etcd servers.
func (clus *Cluster) Send_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT() {
err := clus.broadcast(rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT)
if err != nil {
clus.lg.Warn("destroying etcd/agents FAIL", zap.Error(err))
} else {
clus.lg.Info("destroying etcd/agents PASS")
}
for i, conn := range clus.agentConns {
err := conn.Close()
clus.lg.Info("closed connection to agent", zap.String("agent-address", clus.Members[i].AgentAddr), zap.Error(err))
}
if clus.testerHTTPServer != nil {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
err := clus.testerHTTPServer.Shutdown(ctx)
cancel()
clus.lg.Info("closed tester HTTP server", zap.String("tester-address", clus.Tester.Addr), zap.Error(err))
}
}
// WaitHealth ensures all members are healthy
// by writing a test key to etcd cluster.
func (clus *Cluster) WaitHealth() error {
var err error
// wait 60s to check cluster health.
// TODO: set it to a reasonable value. It is set that high because
// follower may use long time to catch up the leader when reboot under
// reasonable workload (https://github.com/etcd-io/etcd/issues/2698)
for i := 0; i < 60; i++ {
for _, m := range clus.Members {
if err = m.WriteHealthKey(); err != nil {
clus.lg.Warn(
"health check FAIL",
zap.Int("retries", i),
zap.String("endpoint", m.EtcdClientEndpoint),
zap.Error(err),
)
break
}
clus.lg.Info(
"health check PASS",
zap.Int("retries", i),
zap.String("endpoint", m.EtcdClientEndpoint),
)
}
if err == nil {
clus.lg.Info("health check ALL PASS")
return nil
}
time.Sleep(time.Second)
}
return err
}
// GetLeader returns the index of leader and error if any.
func (clus *Cluster) GetLeader() (int, error) {
for i, m := range clus.Members {
isLeader, err := m.IsLeader()
if isLeader || err != nil {
return i, err
}
}
return 0, fmt.Errorf("no leader found")
}
// maxRev returns the maximum revision found on the cluster.
func (clus *Cluster) maxRev() (rev int64, err error) {
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
defer cancel()
revc, errc := make(chan int64, len(clus.Members)), make(chan error, len(clus.Members))
for i := range clus.Members {
go func(m *rpcpb.Member) {
mrev, merr := m.Rev(ctx)
revc <- mrev
errc <- merr
}(clus.Members[i])
}
for i := 0; i < len(clus.Members); i++ {
if merr := <-errc; merr != nil {
err = merr
}
if mrev := <-revc; mrev > rev {
rev = mrev
}
}
return rev, err
}
func (clus *Cluster) getRevisionHash() (map[string]int64, map[string]int64, error) {
revs := make(map[string]int64)
hashes := make(map[string]int64)
for _, m := range clus.Members {
rev, hash, err := m.RevHash()
if err != nil {
return nil, nil, err
}
revs[m.EtcdClientEndpoint] = rev
hashes[m.EtcdClientEndpoint] = hash
}
return revs, hashes, nil
}
func (clus *Cluster) compactKV(rev int64, timeout time.Duration) (err error) {
if rev <= 0 {
return nil
}
for i, m := range clus.Members {
clus.lg.Info(
"compact START",
zap.String("endpoint", m.EtcdClientEndpoint),
zap.Int64("compact-revision", rev),
zap.Duration("timeout", timeout),
)
now := time.Now()
cerr := m.Compact(rev, timeout)
succeed := true
if cerr != nil {
if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
clus.lg.Info(
"compact error is ignored",
zap.String("endpoint", m.EtcdClientEndpoint),
zap.Int64("compact-revision", rev),
zap.String("expected-error-msg", cerr.Error()),
)
} else {
clus.lg.Warn(
"compact FAIL",
zap.String("endpoint", m.EtcdClientEndpoint),
zap.Int64("compact-revision", rev),
zap.Error(cerr),
)
err = cerr
succeed = false
}
}
if succeed {
clus.lg.Info(
"compact PASS",
zap.String("endpoint", m.EtcdClientEndpoint),
zap.Int64("compact-revision", rev),
zap.Duration("timeout", timeout),
zap.Duration("took", time.Since(now)),
)
}
}
return err
}
func (clus *Cluster) checkCompact(rev int64) error {
if rev == 0 {
return nil
}
for _, m := range clus.Members {
if err := m.CheckCompact(rev); err != nil {
return err
}
}
return nil
}
func (clus *Cluster) defrag() error {
for _, m := range clus.Members {
if err := m.Defrag(); err != nil {
clus.lg.Warn(
"defrag FAIL",
zap.String("endpoint", m.EtcdClientEndpoint),
zap.Error(err),
)
return err
}
clus.lg.Info(
"defrag PASS",
zap.String("endpoint", m.EtcdClientEndpoint),
)
}
clus.lg.Info(
"defrag ALL PASS",
zap.Int("round", clus.rd),
zap.Int("case", clus.cs),
zap.Int("case-total", len(clus.cases)),
)
return nil
}
// GetCaseDelayDuration computes failure delay duration.
func (clus *Cluster) GetCaseDelayDuration() time.Duration {
return time.Duration(clus.Tester.CaseDelayMs) * time.Millisecond
}
// Report reports the number of modified keys.
func (clus *Cluster) Report() int64 {
return clus.stresser.ModifiedKeys()
}
| tests/functional/tester/cluster.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0008959386614151299,
0.0001793885021470487,
0.00016379421867895871,
0.0001705024333205074,
0.00008171318040695041
] |
{
"id": 10,
"code_window": [
"func TestCtlV3MakeMirror(t *testing.T) { testCtl(t, makeMirrorTest) }\n",
"func TestCtlV3MakeMirrorModifyDestPrefix(t *testing.T) { testCtl(t, makeMirrorModifyDestPrefixTest) }\n",
"func TestCtlV3MakeMirrorNoDestPrefix(t *testing.T) { testCtl(t, makeMirrorNoDestPrefixTest) }\n",
"\n",
"func makeMirrorTest(cx ctlCtx) {\n",
"\tvar (\n",
"\t\tflags = []string{}\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func TestCtlV3MakeMirrorWithWatchRev(t *testing.T) { testCtl(t, makeMirrorWithWatchRev) }\n"
],
"file_path": "tests/e2e/ctl_v3_make_mirror_test.go",
"type": "add",
"edit_start_line_idx": 27
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mvcc
import (
"context"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/storage/backend"
)
type RangeOptions struct {
Limit int64
Rev int64
Count bool
}
type RangeResult struct {
KVs []mvccpb.KeyValue
Rev int64
Count int
}
type ReadView interface {
// FirstRev returns the first KV revision at the time of opening the txn.
// After a compaction, the first revision increases to the compaction
// revision.
FirstRev() int64
// Rev returns the revision of the KV at the time of opening the txn.
Rev() int64
// Range gets the keys in the range at rangeRev.
// The returned rev is the current revision of the KV when the operation is executed.
// If rangeRev <=0, range gets the keys at currentRev.
// If `end` is nil, the request returns the key.
// If `end` is not nil and not empty, it gets the keys in range [key, range_end).
// If `end` is not nil and empty, it gets the keys greater than or equal to key.
// Limit limits the number of keys returned.
// If the required rev is compacted, ErrCompacted will be returned.
Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error)
}
// TxnRead represents a read-only transaction with operations that will not
// block other read transactions.
type TxnRead interface {
ReadView
// End marks the transaction is complete and ready to commit.
End()
}
type WriteView interface {
// DeleteRange deletes the given range from the store.
// A deleteRange increases the rev of the store if any key in the range exists.
// The number of key deleted will be returned.
// The returned rev is the current revision of the KV when the operation is executed.
// It also generates one event for each key delete in the event history.
// if the `end` is nil, deleteRange deletes the key.
// if the `end` is not nil, deleteRange deletes the keys in range [key, range_end).
DeleteRange(key, end []byte) (n, rev int64)
// Put puts the given key, value into the store. Put also takes additional argument lease to
// attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease
// id.
// A put also increases the rev of the store, and generates one event in the event history.
// The returned rev is the current revision of the KV when the operation is executed.
Put(key, value []byte, lease lease.LeaseID) (rev int64)
}
// TxnWrite represents a transaction that can modify the store.
type TxnWrite interface {
TxnRead
WriteView
// Changes gets the changes made since opening the write txn.
Changes() []mvccpb.KeyValue
}
// txnReadWrite coerces a read txn to a write, panicking on any write operation.
type txnReadWrite struct{ TxnRead }
func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") }
func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
panic("unexpected Put")
}
func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil }
func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} }
type ReadTxMode uint32
const (
// Use ConcurrentReadTx and the txReadBuffer is copied
ConcurrentReadTxMode = ReadTxMode(1)
// Use backend ReadTx and txReadBuffer is not copied
SharedBufReadTxMode = ReadTxMode(2)
)
type KV interface {
ReadView
WriteView
// Read creates a read transaction.
Read(mode ReadTxMode, trace *traceutil.Trace) TxnRead
// Write creates a write transaction.
Write(trace *traceutil.Trace) TxnWrite
// Hash computes the hash of the KV's backend.
Hash() (hash uint32, revision int64, err error)
// HashByRev computes the hash of all MVCC revisions up to a given revision.
HashByRev(rev int64) (hash uint32, revision int64, compactRev int64, err error)
// Compact frees all superseded keys with revisions less than rev.
Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error)
// Commit commits outstanding txns into the underlying backend.
Commit()
// Restore restores the KV store from a backend.
Restore(b backend.Backend) error
Close() error
}
// WatchableKV is a KV that can be watched.
type WatchableKV interface {
KV
Watchable
}
// Watchable is the interface that wraps the NewWatchStream function.
type Watchable interface {
// NewWatchStream returns a WatchStream that can be used to
// watch events happened or happening on the KV.
NewWatchStream() WatchStream
}
| server/storage/mvcc/kv.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0001866707461886108,
0.00016863230848684907,
0.00015891992370598018,
0.00016723595035728067,
0.0000066105994847021066
] |
{
"id": 11,
"code_window": [
"\n",
"\ttestMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)\n",
"}\n",
"\n",
"func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvExec, srcprefix, destprefix string) {\n",
"\t// set up another cluster to mirror with\n",
"\tmirrorcfg := e2e.NewConfigAutoTLS()\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"func makeMirrorWithWatchRev(cx ctlCtx) {\n",
"\tvar (\n",
"\t\tflags = []string{\"--prefix\", \"o_\", \"--no-dest-prefix\", \"--rev\", \"4\"}\n",
"\t\tkvs = []kv{{\"o_key1\", \"val1\"}, {\"o_key2\", \"val2\"}, {\"o_key3\", \"val3\"}, {\"o_key4\", \"val4\"}}\n",
"\t\tkvs2 = []kvExec{{key: \"key3\", val: \"val3\"}, {key: \"key4\", val: \"val4\"}}\n",
"\t\tsrcprefix = \"o_\"\n",
"\t\tdestprefix = \"key\"\n",
"\t)\n",
"\n",
"\ttestMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)\n",
"}\n",
"\n"
],
"file_path": "tests/e2e/ctl_v3_make_mirror_test.go",
"type": "add",
"edit_start_line_idx": 61
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"context"
"errors"
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/bgentry/speakeasy"
"go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/mirror"
"github.com/spf13/cobra"
)
var (
mminsecureTr bool
mmcert string
mmkey string
mmcacert string
mmprefix string
mmdestprefix string
mmuser string
mmpassword string
mmnodestprefix bool
)
// NewMakeMirrorCommand returns the cobra command for "makeMirror".
func NewMakeMirrorCommand() *cobra.Command {
c := &cobra.Command{
Use: "make-mirror [options] <destination>",
Short: "Makes a mirror at the destination etcd cluster",
Run: makeMirrorCommandFunc,
}
c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror")
c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster")
c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster")
c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster")
c.Flags().StringVar(&mmkey, "dest-key", "", "Identify secure client using this TLS key file")
c.Flags().StringVar(&mmcacert, "dest-cacert", "", "Verify certificates of TLS enabled secure servers using this CA bundle")
// TODO: secure by default when etcd enables secure gRPC by default.
c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "Disable transport security for client connections")
c.Flags().StringVar(&mmuser, "dest-user", "", "Destination username[:password] for authentication (prompt if password is not supplied)")
c.Flags().StringVar(&mmpassword, "dest-password", "", "Destination password for authentication (if this option is used, --user option shouldn't include password)")
return c
}
func authDestCfg() *authCfg {
if mmuser == "" {
return nil
}
var cfg authCfg
if mmpassword == "" {
splitted := strings.SplitN(mmuser, ":", 2)
if len(splitted) < 2 {
var err error
cfg.username = mmuser
cfg.password, err = speakeasy.Ask("Destination Password: ")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
} else {
cfg.username = splitted[0]
cfg.password = splitted[1]
}
} else {
cfg.username = mmuser
cfg.password = mmpassword
}
return &cfg
}
func makeMirrorCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("make-mirror takes one destination argument"))
}
dialTimeout := dialTimeoutFromCmd(cmd)
keepAliveTime := keepAliveTimeFromCmd(cmd)
keepAliveTimeout := keepAliveTimeoutFromCmd(cmd)
sec := &secureCfg{
cert: mmcert,
key: mmkey,
cacert: mmcacert,
insecureTransport: mminsecureTr,
}
auth := authDestCfg()
cc := &clientConfig{
endpoints: []string{args[0]},
dialTimeout: dialTimeout,
keepAliveTime: keepAliveTime,
keepAliveTimeout: keepAliveTimeout,
scfg: sec,
acfg: auth,
}
dc := cc.mustClient()
c := mustClientFromCmd(cmd)
err := makeMirror(context.TODO(), c, dc)
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) error {
total := int64(0)
// if destination prefix is specified and remove destination prefix is true return error
if mmnodestprefix && len(mmdestprefix) > 0 {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one"))
}
go func() {
for {
time.Sleep(30 * time.Second)
fmt.Println(atomic.LoadInt64(&total))
}
}()
s := mirror.NewSyncer(c, mmprefix, 0)
rc, errc := s.SyncBase(ctx)
// if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix
if !mmnodestprefix && len(mmdestprefix) == 0 {
mmdestprefix = mmprefix
}
for r := range rc {
for _, kv := range r.Kvs {
_, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))
if err != nil {
return err
}
atomic.AddInt64(&total, 1)
}
}
err := <-errc
if err != nil {
return err
}
wc := s.SyncUpdates(ctx)
for wr := range wc {
if wr.CompactRevision != 0 {
return rpctypes.ErrCompacted
}
var lastRev int64
ops := []clientv3.Op{}
for _, ev := range wr.Events {
nextRev := ev.Kv.ModRevision
if lastRev != 0 && nextRev > lastRev {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
ops = []clientv3.Op{}
}
lastRev = nextRev
switch ev.Type {
case mvccpb.PUT:
ops = append(ops, clientv3.OpPut(modifyPrefix(string(ev.Kv.Key)), string(ev.Kv.Value)))
atomic.AddInt64(&total, 1)
case mvccpb.DELETE:
ops = append(ops, clientv3.OpDelete(modifyPrefix(string(ev.Kv.Key))))
atomic.AddInt64(&total, 1)
default:
panic("unexpected event type")
}
}
if len(ops) != 0 {
_, err := dc.Txn(ctx).Then(ops...).Commit()
if err != nil {
return err
}
}
}
return nil
}
func modifyPrefix(key string) string {
return strings.Replace(key, mmprefix, mmdestprefix, 1)
}
| etcdctl/ctlv3/command/make_mirror_command.go | 1 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.07556100189685822,
0.006273801438510418,
0.00016591581515967846,
0.00017038198711816221,
0.01721971109509468
] |
{
"id": 11,
"code_window": [
"\n",
"\ttestMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)\n",
"}\n",
"\n",
"func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvExec, srcprefix, destprefix string) {\n",
"\t// set up another cluster to mirror with\n",
"\tmirrorcfg := e2e.NewConfigAutoTLS()\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"func makeMirrorWithWatchRev(cx ctlCtx) {\n",
"\tvar (\n",
"\t\tflags = []string{\"--prefix\", \"o_\", \"--no-dest-prefix\", \"--rev\", \"4\"}\n",
"\t\tkvs = []kv{{\"o_key1\", \"val1\"}, {\"o_key2\", \"val2\"}, {\"o_key3\", \"val3\"}, {\"o_key4\", \"val4\"}}\n",
"\t\tkvs2 = []kvExec{{key: \"key3\", val: \"val3\"}, {key: \"key4\", val: \"val4\"}}\n",
"\t\tsrcprefix = \"o_\"\n",
"\t\tdestprefix = \"key\"\n",
"\t)\n",
"\n",
"\ttestMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)\n",
"}\n",
"\n"
],
"file_path": "tests/e2e/ctl_v3_make_mirror_test.go",
"type": "add",
"edit_start_line_idx": 61
} | Snapshot:
empty
Start dumping log entries from snapshot.
WAL metadata:
nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0
WAL entries:
lastIndex=34
term index type data
8 14 norm ID:9 compaction:<physical:true >
Entry types (IRRCompaction) count is : 1
| tools/etcd-dump-logs/expectedoutput/listIRRCompaction.output | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00017235777340829372,
0.00016864050121512264,
0.00016492322902195156,
0.00016864050121512264,
0.000003717272193171084
] |
{
"id": 11,
"code_window": [
"\n",
"\ttestMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)\n",
"}\n",
"\n",
"func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvExec, srcprefix, destprefix string) {\n",
"\t// set up another cluster to mirror with\n",
"\tmirrorcfg := e2e.NewConfigAutoTLS()\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"func makeMirrorWithWatchRev(cx ctlCtx) {\n",
"\tvar (\n",
"\t\tflags = []string{\"--prefix\", \"o_\", \"--no-dest-prefix\", \"--rev\", \"4\"}\n",
"\t\tkvs = []kv{{\"o_key1\", \"val1\"}, {\"o_key2\", \"val2\"}, {\"o_key3\", \"val3\"}, {\"o_key4\", \"val4\"}}\n",
"\t\tkvs2 = []kvExec{{key: \"key3\", val: \"val3\"}, {key: \"key4\", val: \"val4\"}}\n",
"\t\tsrcprefix = \"o_\"\n",
"\t\tdestprefix = \"key\"\n",
"\t)\n",
"\n",
"\ttestMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)\n",
"}\n",
"\n"
],
"file_path": "tests/e2e/ctl_v3_make_mirror_test.go",
"type": "add",
"edit_start_line_idx": 61
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import "github.com/prometheus/client_golang/prometheus"
var (
walFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: "etcd",
Subsystem: "disk",
Name: "wal_fsync_duration_seconds",
Help: "The latency distributions of fsync called by WAL.",
// lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
// highest bucket start of 0.001 sec * 2^13 == 8.192 sec
Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
})
walWriteBytes = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "etcd",
Subsystem: "disk",
Name: "wal_write_bytes_total",
Help: "Total number of bytes written in WAL.",
})
)
func init() {
prometheus.MustRegister(walFsyncSec)
prometheus.MustRegister(walWriteBytes)
}
| server/storage/wal/metrics.go | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.00017494088388048112,
0.00017088730237446725,
0.00016645585128571838,
0.00017110233602579683,
0.0000027973719625151716
] |
{
"id": 11,
"code_window": [
"\n",
"\ttestMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)\n",
"}\n",
"\n",
"func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvExec, srcprefix, destprefix string) {\n",
"\t// set up another cluster to mirror with\n",
"\tmirrorcfg := e2e.NewConfigAutoTLS()\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"func makeMirrorWithWatchRev(cx ctlCtx) {\n",
"\tvar (\n",
"\t\tflags = []string{\"--prefix\", \"o_\", \"--no-dest-prefix\", \"--rev\", \"4\"}\n",
"\t\tkvs = []kv{{\"o_key1\", \"val1\"}, {\"o_key2\", \"val2\"}, {\"o_key3\", \"val3\"}, {\"o_key4\", \"val4\"}}\n",
"\t\tkvs2 = []kvExec{{key: \"key3\", val: \"val3\"}, {key: \"key4\", val: \"val4\"}}\n",
"\t\tsrcprefix = \"o_\"\n",
"\t\tdestprefix = \"key\"\n",
"\t)\n",
"\n",
"\ttestMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix)\n",
"}\n",
"\n"
],
"file_path": "tests/e2e/ctl_v3_make_mirror_test.go",
"type": "add",
"edit_start_line_idx": 61
} | # Prometheus Monitoring Mixin for etcd
> NOTE: This project is *alpha* stage. Flags, configuration, behaviour and design may change significantly in following releases.
A set of customisable Prometheus alerts for etcd.
Instructions for use are the same as the [kubernetes-mixin](https://github.com/kubernetes-monitoring/kubernetes-mixin).
## Background
* For more information about monitoring mixins, see this [design doc](https://docs.google.com/document/d/1A9xvzwqnFVSOZ5fD3blKODXfsat5fg6ZhnKu9LK3lB4/edit#).
## Testing alerts
Make sure to have [jsonnet](https://jsonnet.org/) and [gojsontoyaml](https://github.com/brancz/gojsontoyaml) installed.
First compile the mixin to a YAML file, which the promtool will read:
```
jsonnet -e '(import "mixin.libsonnet").prometheusAlerts' | gojsontoyaml > mixin.yaml
```
Then run the unit test:
```
promtool test rules test.yaml
```
| contrib/mixin/README.md | 0 | https://github.com/etcd-io/etcd/commit/661e0a91ef115fc5ebcb3bfc717161082f924525 | [
0.0001712010707706213,
0.00016861158655956388,
0.0001667393953539431,
0.00016789429355412722,
0.000001890770363388583
] |
{
"id": 0,
"code_window": [
"\t\t\t\tRequestHeaderAllowedNames: s.Authentication.RequestHeader.AllowedNames,\n",
"\t\t\t},\n",
"\n",
"\t\t\tAPIResourceConfigSource: storageFactory.APIResourceConfigSource,\n",
"\t\t\tStorageFactory: storageFactory,\n",
"\t\t\tEnableCoreControllers: true,\n",
"\t\t\tEventTTL: s.EventTTL,\n",
"\t\t\tKubeletClientConfig: s.KubeletConfig,\n",
"\t\t\tEnableLogsSupport: s.EnableLogsHandler,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-apiserver/app/server.go",
"type": "replace",
"edit_start_line_idx": 359
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"net"
"net/http"
"net/http/httptest"
"path"
"time"
"github.com/go-openapi/spec"
"github.com/golang/glog"
"github.com/pborman/uuid"
apps "k8s.io/api/apps/v1beta1"
autoscaling "k8s.io/api/autoscaling/v1"
certificates "k8s.io/api/certificates/v1beta1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
rbac "k8s.io/api/rbac/v1alpha1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
authauthenticator "k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authentication/authenticatorfactory"
authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
authorizerunion "k8s.io/apiserver/pkg/authorization/union"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/options"
serverstorage "k8s.io/apiserver/pkg/server/storage"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/apis/batch"
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
"k8s.io/kubernetes/pkg/generated/openapi"
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/version"
)
// Config is a struct of configuration directives for NewMasterComponents.
type Config struct {
// If nil, a default is used, partially filled configs will not get populated.
MasterConfig *master.Config
StartReplicationManager bool
// Client throttling qps
QPS float32
// Client burst qps, also burst replicas allowed in rc manager
Burst int
// TODO: Add configs for endpoints controller, scheduler etc
}
// alwaysAllow always allows an action
type alwaysAllow struct{}
func (alwaysAllow) Authorize(requestAttributes authorizer.Attributes) (authorizer.Decision, string, error) {
return authorizer.DecisionAllow, "always allow", nil
}
// alwaysEmpty simulates "no authentication" for old tests
func alwaysEmpty(req *http.Request) (user.Info, bool, error) {
return &user.DefaultInfo{
Name: "",
}, true, nil
}
// MasterReceiver can be used to provide the master to a custom incoming server function
type MasterReceiver interface {
SetMaster(m *master.Master)
}
// MasterHolder implements
type MasterHolder struct {
Initialized chan struct{}
M *master.Master
}
func (h *MasterHolder) SetMaster(m *master.Master) {
h.M = m
close(h.Initialized)
}
// startMasterOrDie starts a kubernetes master and an httpserver to handle api requests
func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) {
var m *master.Master
var s *httptest.Server
if incomingServer != nil {
s = incomingServer
} else {
s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
m.GenericAPIServer.Handler.ServeHTTP(w, req)
}))
}
stopCh := make(chan struct{})
closeFn := func() {
close(stopCh)
s.Close()
}
if masterConfig == nil {
masterConfig = NewMasterConfig()
masterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, legacyscheme.Scheme)
masterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{
InfoProps: spec.InfoProps{
Title: "Kubernetes",
Version: "unversioned",
},
}
masterConfig.GenericConfig.OpenAPIConfig.DefaultResponse = &spec.Response{
ResponseProps: spec.ResponseProps{
Description: "Default Response.",
},
}
masterConfig.GenericConfig.OpenAPIConfig.GetDefinitions = openapi.GetOpenAPIDefinitions
masterConfig.GenericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig()
}
// set the loopback client config
if masterConfig.GenericConfig.LoopbackClientConfig == nil {
masterConfig.GenericConfig.LoopbackClientConfig = &restclient.Config{QPS: 50, Burst: 100, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
}
masterConfig.GenericConfig.LoopbackClientConfig.Host = s.URL
privilegedLoopbackToken := uuid.NewRandom().String()
// wrap any available authorizer
tokens := make(map[string]*user.DefaultInfo)
tokens[privilegedLoopbackToken] = &user.DefaultInfo{
Name: user.APIServerUser,
UID: uuid.NewRandom().String(),
Groups: []string{user.SystemPrivilegedGroup},
}
tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens)
if masterConfig.GenericConfig.Authentication.Authenticator == nil {
masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty))
} else {
masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authentication.Authenticator)
}
if masterConfig.GenericConfig.Authorization.Authorizer != nil {
tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup)
masterConfig.GenericConfig.Authorization.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorization.Authorizer)
} else {
masterConfig.GenericConfig.Authorization.Authorizer = alwaysAllow{}
}
masterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken
clientset, err := clientset.NewForConfig(masterConfig.GenericConfig.LoopbackClientConfig)
if err != nil {
glog.Fatal(err)
}
sharedInformers := informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.EmptyDelegate)
if err != nil {
closeFn()
glog.Fatalf("error in bringing up the master: %v", err)
}
if masterReceiver != nil {
masterReceiver.SetMaster(m)
}
// TODO have this start method actually use the normal start sequence for the API server
// this method never actually calls the `Run` method for the API server
// fire the post hooks ourselves
m.GenericAPIServer.PrepareRun()
m.GenericAPIServer.RunPostStartHooks(stopCh)
cfg := *masterConfig.GenericConfig.LoopbackClientConfig
cfg.ContentConfig.GroupVersion = &schema.GroupVersion{}
privilegedClient, err := restclient.RESTClientFor(&cfg)
if err != nil {
closeFn()
glog.Fatal(err)
}
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
result := privilegedClient.Get().AbsPath("/healthz").Do()
status := 0
result.StatusCode(&status)
if status == 200 {
return true, nil
}
return false, nil
})
if err != nil {
closeFn()
glog.Fatal(err)
}
return m, s, closeFn
}
// Returns the master config appropriate for most integration tests.
func NewIntegrationTestMasterConfig() *master.Config {
masterConfig := NewMasterConfig()
masterConfig.ExtraConfig.EnableCoreControllers = true
masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
masterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()
return masterConfig
}
// Returns a basic master config.
func NewMasterConfig() *master.Config {
// This causes the integration tests to exercise the etcd
// prefix code, so please don't change without ensuring
// sufficient coverage in other ways.
etcdOptions := options.NewEtcdOptions(storagebackend.NewDefaultConfig(uuid.New(), nil))
etcdOptions.StorageConfig.ServerList = []string{GetEtcdURL()}
info, _ := runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON)
ns := NewSingleContentTypeSerializer(legacyscheme.Scheme, info)
resourceEncoding := serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Registry)
// FIXME (soltysh): this GroupVersionResource override should be configurable
// we need to set both for the whole group and for cronjobs, separately
resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: batch.GroupName, Resource: "cronjobs"}, schema.GroupVersion{Group: batch.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
// we also need to set both for the storage group and for volumeattachments, separately
resourceEncoding.SetVersionEncoding(storage.GroupName, *testapi.Storage.GroupVersion(), schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal})
resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: storage.GroupName, Resource: "volumeattachments"}, schema.GroupVersion{Group: storage.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal})
storageFactory := serverstorage.NewDefaultStorageFactory(etcdOptions.StorageConfig, runtime.ContentTypeJSON, ns, resourceEncoding, master.DefaultAPIResourceConfigSource(), nil)
storageFactory.SetSerializer(
schema.GroupResource{Group: v1.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: autoscaling.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: batch.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: apps.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: extensions.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: policy.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: rbac.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: certificates.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: storage.GroupName, Resource: serverstorage.AllResources},
"",
ns)
genericConfig := genericapiserver.NewConfig(legacyscheme.Codecs)
kubeVersion := version.Get()
genericConfig.Version = &kubeVersion
genericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()
err := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, genericConfig)
if err != nil {
panic(err)
}
return &master.Config{
GenericConfig: genericConfig,
ExtraConfig: master.ExtraConfig{
APIResourceConfigSource: master.DefaultAPIResourceConfigSource(),
StorageFactory: storageFactory,
EnableCoreControllers: true,
KubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250},
APIServerServicePort: 443,
MasterCount: 1,
},
}
}
// CloseFunc can be called to cleanup the master
type CloseFunc func()
func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server, CloseFunc) {
if masterConfig == nil {
masterConfig = NewMasterConfig()
masterConfig.GenericConfig.EnableProfiling = true
}
return startMasterOrDie(masterConfig, nil, nil)
}
func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) {
return startMasterOrDie(masterConfig, s, masterReceiver)
}
// SharedEtcd creates a storage config for a shared etcd instance, with a unique prefix.
func SharedEtcd() *storagebackend.Config {
cfg := storagebackend.NewDefaultConfig(path.Join(uuid.New(), "registry"), nil)
cfg.ServerList = []string{GetEtcdURL()}
return cfg
}
| test/integration/framework/master_utils.go | 1 | https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b | [
0.007039761636406183,
0.0005410918965935707,
0.00016375519044231623,
0.0001691649085842073,
0.0013251855270937085
] |
{
"id": 0,
"code_window": [
"\t\t\t\tRequestHeaderAllowedNames: s.Authentication.RequestHeader.AllowedNames,\n",
"\t\t\t},\n",
"\n",
"\t\t\tAPIResourceConfigSource: storageFactory.APIResourceConfigSource,\n",
"\t\t\tStorageFactory: storageFactory,\n",
"\t\t\tEnableCoreControllers: true,\n",
"\t\t\tEventTTL: s.EventTTL,\n",
"\t\t\tKubeletClientConfig: s.KubeletConfig,\n",
"\t\t\tEnableLogsSupport: s.EnableLogsHandler,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-apiserver/app/server.go",
"type": "replace",
"edit_start_line_idx": 359
} | // Copyright © 2014 Steve Francia <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"errors"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
)
type httpDir struct {
basePath string
fs HttpFs
}
func (d httpDir) Open(name string) (http.File, error) {
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
strings.Contains(name, "\x00") {
return nil, errors.New("http: invalid character in file path")
}
dir := string(d.basePath)
if dir == "" {
dir = "."
}
f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
if err != nil {
return nil, err
}
return f, nil
}
type HttpFs struct {
source Fs
}
func NewHttpFs(source Fs) *HttpFs {
return &HttpFs{source: source}
}
func (h HttpFs) Dir(s string) *httpDir {
return &httpDir{basePath: s, fs: h}
}
func (h HttpFs) Name() string { return "h HttpFs" }
func (h HttpFs) Create(name string) (File, error) {
return h.source.Create(name)
}
func (h HttpFs) Chmod(name string, mode os.FileMode) error {
return h.source.Chmod(name, mode)
}
func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return h.source.Chtimes(name, atime, mtime)
}
func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
return h.source.Mkdir(name, perm)
}
func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
return h.source.MkdirAll(path, perm)
}
func (h HttpFs) Open(name string) (http.File, error) {
f, err := h.source.Open(name)
if err == nil {
if httpfile, ok := f.(http.File); ok {
return httpfile, nil
}
}
return nil, err
}
func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
return h.source.OpenFile(name, flag, perm)
}
func (h HttpFs) Remove(name string) error {
return h.source.Remove(name)
}
func (h HttpFs) RemoveAll(path string) error {
return h.source.RemoveAll(path)
}
func (h HttpFs) Rename(oldname, newname string) error {
return h.source.Rename(oldname, newname)
}
func (h HttpFs) Stat(name string) (os.FileInfo, error) {
return h.source.Stat(name)
}
| vendor/github.com/spf13/afero/httpFs.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b | [
0.0001753518736222759,
0.0001674211089266464,
0.00015869717753957957,
0.00016710074851289392,
0.000004454849658941384
] |
{
"id": 0,
"code_window": [
"\t\t\t\tRequestHeaderAllowedNames: s.Authentication.RequestHeader.AllowedNames,\n",
"\t\t\t},\n",
"\n",
"\t\t\tAPIResourceConfigSource: storageFactory.APIResourceConfigSource,\n",
"\t\t\tStorageFactory: storageFactory,\n",
"\t\t\tEnableCoreControllers: true,\n",
"\t\t\tEventTTL: s.EventTTL,\n",
"\t\t\tKubeletClientConfig: s.KubeletConfig,\n",
"\t\t\tEnableLogsSupport: s.EnableLogsHandler,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-apiserver/app/server.go",
"type": "replace",
"edit_start_line_idx": 359
} | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/subtle"
"errors"
"io"
"math/big"
"golang.org/x/crypto/curve25519"
)
const (
kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
kexAlgoECDH256 = "ecdh-sha2-nistp256"
kexAlgoECDH384 = "ecdh-sha2-nistp384"
kexAlgoECDH521 = "ecdh-sha2-nistp521"
kexAlgoCurve25519SHA256 = "[email protected]"
)
// kexResult captures the outcome of a key exchange.
type kexResult struct {
// Session hash. See also RFC 4253, section 8.
H []byte
// Shared secret. See also RFC 4253, section 8.
K []byte
// Host key as hashed into H.
HostKey []byte
// Signature of H.
Signature []byte
// A cryptographic hash function that matches the security
// level of the key exchange algorithm. It is used for
// calculating H, and for deriving keys from H and K.
Hash crypto.Hash
// The session ID, which is the first H computed. This is used
// to derive key material inside the transport.
SessionID []byte
}
// handshakeMagics contains data that is always included in the
// session hash.
type handshakeMagics struct {
clientVersion, serverVersion []byte
clientKexInit, serverKexInit []byte
}
func (m *handshakeMagics) write(w io.Writer) {
writeString(w, m.clientVersion)
writeString(w, m.serverVersion)
writeString(w, m.clientKexInit)
writeString(w, m.serverKexInit)
}
// kexAlgorithm abstracts different key exchange algorithms.
type kexAlgorithm interface {
// Server runs server-side key agreement, signing the result
// with a hostkey.
Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error)
// Client runs the client-side key agreement. Caller is
// responsible for verifying the host key signature.
Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error)
}
// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement.
type dhGroup struct {
g, p, pMinus1 *big.Int
}
func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 {
return nil, errors.New("ssh: DH parameter out of bounds")
}
return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil
}
func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
hashFunc := crypto.SHA1
var x *big.Int
for {
var err error
if x, err = rand.Int(randSource, group.pMinus1); err != nil {
return nil, err
}
if x.Sign() > 0 {
break
}
}
X := new(big.Int).Exp(group.g, x, group.p)
kexDHInit := kexDHInitMsg{
X: X,
}
if err := c.writePacket(Marshal(&kexDHInit)); err != nil {
return nil, err
}
packet, err := c.readPacket()
if err != nil {
return nil, err
}
var kexDHReply kexDHReplyMsg
if err = Unmarshal(packet, &kexDHReply); err != nil {
return nil, err
}
ki, err := group.diffieHellman(kexDHReply.Y, x)
if err != nil {
return nil, err
}
h := hashFunc.New()
magics.write(h)
writeString(h, kexDHReply.HostKey)
writeInt(h, X)
writeInt(h, kexDHReply.Y)
K := make([]byte, intLength(ki))
marshalInt(K, ki)
h.Write(K)
return &kexResult{
H: h.Sum(nil),
K: K,
HostKey: kexDHReply.HostKey,
Signature: kexDHReply.Signature,
Hash: crypto.SHA1,
}, nil
}
func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
hashFunc := crypto.SHA1
packet, err := c.readPacket()
if err != nil {
return
}
var kexDHInit kexDHInitMsg
if err = Unmarshal(packet, &kexDHInit); err != nil {
return
}
var y *big.Int
for {
if y, err = rand.Int(randSource, group.pMinus1); err != nil {
return
}
if y.Sign() > 0 {
break
}
}
Y := new(big.Int).Exp(group.g, y, group.p)
ki, err := group.diffieHellman(kexDHInit.X, y)
if err != nil {
return nil, err
}
hostKeyBytes := priv.PublicKey().Marshal()
h := hashFunc.New()
magics.write(h)
writeString(h, hostKeyBytes)
writeInt(h, kexDHInit.X)
writeInt(h, Y)
K := make([]byte, intLength(ki))
marshalInt(K, ki)
h.Write(K)
H := h.Sum(nil)
// H is already a hash, but the hostkey signing will apply its
// own key-specific hash algorithm.
sig, err := signAndMarshal(priv, randSource, H)
if err != nil {
return nil, err
}
kexDHReply := kexDHReplyMsg{
HostKey: hostKeyBytes,
Y: Y,
Signature: sig,
}
packet = Marshal(&kexDHReply)
err = c.writePacket(packet)
return &kexResult{
H: H,
K: K,
HostKey: hostKeyBytes,
Signature: sig,
Hash: crypto.SHA1,
}, nil
}
// ecdh performs Elliptic Curve Diffie-Hellman key exchange as
// described in RFC 5656, section 4.
type ecdh struct {
curve elliptic.Curve
}
func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
if err != nil {
return nil, err
}
kexInit := kexECDHInitMsg{
ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y),
}
serialized := Marshal(&kexInit)
if err := c.writePacket(serialized); err != nil {
return nil, err
}
packet, err := c.readPacket()
if err != nil {
return nil, err
}
var reply kexECDHReplyMsg
if err = Unmarshal(packet, &reply); err != nil {
return nil, err
}
x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey)
if err != nil {
return nil, err
}
// generate shared secret
secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes())
h := ecHash(kex.curve).New()
magics.write(h)
writeString(h, reply.HostKey)
writeString(h, kexInit.ClientPubKey)
writeString(h, reply.EphemeralPubKey)
K := make([]byte, intLength(secret))
marshalInt(K, secret)
h.Write(K)
return &kexResult{
H: h.Sum(nil),
K: K,
HostKey: reply.HostKey,
Signature: reply.Signature,
Hash: ecHash(kex.curve),
}, nil
}
// unmarshalECKey parses and checks an EC key.
func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) {
x, y = elliptic.Unmarshal(curve, pubkey)
if x == nil {
return nil, nil, errors.New("ssh: elliptic.Unmarshal failure")
}
if !validateECPublicKey(curve, x, y) {
return nil, nil, errors.New("ssh: public key not on curve")
}
return x, y, nil
}
// validateECPublicKey checks that the point is a valid public key for
// the given curve. See [SEC1], 3.2.2
func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool {
if x.Sign() == 0 && y.Sign() == 0 {
return false
}
if x.Cmp(curve.Params().P) >= 0 {
return false
}
if y.Cmp(curve.Params().P) >= 0 {
return false
}
if !curve.IsOnCurve(x, y) {
return false
}
// We don't check if N * PubKey == 0, since
//
// - the NIST curves have cofactor = 1, so this is implicit.
// (We don't foresee an implementation that supports non NIST
// curves)
//
// - for ephemeral keys, we don't need to worry about small
// subgroup attacks.
return true
}
func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
packet, err := c.readPacket()
if err != nil {
return nil, err
}
var kexECDHInit kexECDHInitMsg
if err = Unmarshal(packet, &kexECDHInit); err != nil {
return nil, err
}
clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey)
if err != nil {
return nil, err
}
// We could cache this key across multiple users/multiple
// connection attempts, but the benefit is small. OpenSSH
// generates a new key for each incoming connection.
ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
if err != nil {
return nil, err
}
hostKeyBytes := priv.PublicKey().Marshal()
serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y)
// generate shared secret
secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes())
h := ecHash(kex.curve).New()
magics.write(h)
writeString(h, hostKeyBytes)
writeString(h, kexECDHInit.ClientPubKey)
writeString(h, serializedEphKey)
K := make([]byte, intLength(secret))
marshalInt(K, secret)
h.Write(K)
H := h.Sum(nil)
// H is already a hash, but the hostkey signing will apply its
// own key-specific hash algorithm.
sig, err := signAndMarshal(priv, rand, H)
if err != nil {
return nil, err
}
reply := kexECDHReplyMsg{
EphemeralPubKey: serializedEphKey,
HostKey: hostKeyBytes,
Signature: sig,
}
serialized := Marshal(&reply)
if err := c.writePacket(serialized); err != nil {
return nil, err
}
return &kexResult{
H: H,
K: K,
HostKey: reply.HostKey,
Signature: sig,
Hash: ecHash(kex.curve),
}, nil
}
var kexAlgoMap = map[string]kexAlgorithm{}
func init() {
// This is the group called diffie-hellman-group1-sha1 in RFC
// 4253 and Oakley Group 2 in RFC 2409.
p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16)
kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{
g: new(big.Int).SetInt64(2),
p: p,
pMinus1: new(big.Int).Sub(p, bigOne),
}
// This is the group called diffie-hellman-group14-sha1 in RFC
// 4253 and Oakley Group 14 in RFC 3526.
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
g: new(big.Int).SetInt64(2),
p: p,
pMinus1: new(big.Int).Sub(p, bigOne),
}
kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()}
kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()}
kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{}
}
// curve25519sha256 implements the [email protected] key
// agreement protocol, as described in
// https://git.libssh.org/projects/libssh.git/tree/doc/[email protected]
type curve25519sha256 struct{}
type curve25519KeyPair struct {
priv [32]byte
pub [32]byte
}
func (kp *curve25519KeyPair) generate(rand io.Reader) error {
if _, err := io.ReadFull(rand, kp.priv[:]); err != nil {
return err
}
curve25519.ScalarBaseMult(&kp.pub, &kp.priv)
return nil
}
// curve25519Zeros is just an array of 32 zero bytes so that we have something
// convenient to compare against in order to reject curve25519 points with the
// wrong order.
var curve25519Zeros [32]byte
func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
var kp curve25519KeyPair
if err := kp.generate(rand); err != nil {
return nil, err
}
if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil {
return nil, err
}
packet, err := c.readPacket()
if err != nil {
return nil, err
}
var reply kexECDHReplyMsg
if err = Unmarshal(packet, &reply); err != nil {
return nil, err
}
if len(reply.EphemeralPubKey) != 32 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
}
var servPub, secret [32]byte
copy(servPub[:], reply.EphemeralPubKey)
curve25519.ScalarMult(&secret, &kp.priv, &servPub)
if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
}
h := crypto.SHA256.New()
magics.write(h)
writeString(h, reply.HostKey)
writeString(h, kp.pub[:])
writeString(h, reply.EphemeralPubKey)
ki := new(big.Int).SetBytes(secret[:])
K := make([]byte, intLength(ki))
marshalInt(K, ki)
h.Write(K)
return &kexResult{
H: h.Sum(nil),
K: K,
HostKey: reply.HostKey,
Signature: reply.Signature,
Hash: crypto.SHA256,
}, nil
}
func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
packet, err := c.readPacket()
if err != nil {
return
}
var kexInit kexECDHInitMsg
if err = Unmarshal(packet, &kexInit); err != nil {
return
}
if len(kexInit.ClientPubKey) != 32 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
}
var kp curve25519KeyPair
if err := kp.generate(rand); err != nil {
return nil, err
}
var clientPub, secret [32]byte
copy(clientPub[:], kexInit.ClientPubKey)
curve25519.ScalarMult(&secret, &kp.priv, &clientPub)
if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
}
hostKeyBytes := priv.PublicKey().Marshal()
h := crypto.SHA256.New()
magics.write(h)
writeString(h, hostKeyBytes)
writeString(h, kexInit.ClientPubKey)
writeString(h, kp.pub[:])
ki := new(big.Int).SetBytes(secret[:])
K := make([]byte, intLength(ki))
marshalInt(K, ki)
h.Write(K)
H := h.Sum(nil)
sig, err := signAndMarshal(priv, rand, H)
if err != nil {
return nil, err
}
reply := kexECDHReplyMsg{
EphemeralPubKey: kp.pub[:],
HostKey: hostKeyBytes,
Signature: sig,
}
if err := c.writePacket(Marshal(&reply)); err != nil {
return nil, err
}
return &kexResult{
H: H,
K: K,
HostKey: hostKeyBytes,
Signature: sig,
Hash: crypto.SHA256,
}, nil
}
| vendor/golang.org/x/crypto/ssh/kex.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b | [
0.0010821950854733586,
0.000189433412742801,
0.00016114380559884012,
0.00017125999147538096,
0.00012337445514276624
] |
{
"id": 0,
"code_window": [
"\t\t\t\tRequestHeaderAllowedNames: s.Authentication.RequestHeader.AllowedNames,\n",
"\t\t\t},\n",
"\n",
"\t\t\tAPIResourceConfigSource: storageFactory.APIResourceConfigSource,\n",
"\t\t\tStorageFactory: storageFactory,\n",
"\t\t\tEnableCoreControllers: true,\n",
"\t\t\tEventTTL: s.EventTTL,\n",
"\t\t\tKubeletClientConfig: s.KubeletConfig,\n",
"\t\t\tEnableLogsSupport: s.EnableLogsHandler,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-apiserver/app/server.go",
"type": "replace",
"edit_start_line_idx": 359
} | package types
type GomegaFailHandler func(message string, callerSkip ...int)
//A simple *testing.T interface wrapper
type GomegaTestingT interface {
Errorf(format string, args ...interface{})
}
//All Gomega matchers must implement the GomegaMatcher interface
//
//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding_your_own_matchers
type GomegaMatcher interface {
Match(actual interface{}) (success bool, err error)
FailureMessage(actual interface{}) (message string)
NegatedFailureMessage(actual interface{}) (message string)
}
| vendor/github.com/onsi/gomega/types/types.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b | [
0.00016974481695797294,
0.00016582982789259404,
0.00016191483882721514,
0.00016582982789259404,
0.000003914989065378904
] |
{
"id": 1,
"code_window": [
"\n",
"\tAPIResourceConfigSource serverstorage.APIResourceConfigSource\n",
"\tStorageFactory serverstorage.StorageFactory\n",
"\tEnableCoreControllers bool\n",
"\tEndpointReconcilerConfig EndpointReconcilerConfig\n",
"\tEventTTL time.Duration\n",
"\tKubeletClientConfig kubeletclient.KubeletClientConfig\n",
"\n",
"\t// Used to start and monitor tunneling\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/master/master.go",
"type": "replace",
"edit_start_line_idx": 105
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"fmt"
"net"
"net/http"
"reflect"
"strconv"
"time"
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
appsv1beta2 "k8s.io/api/apps/v1beta2"
authenticationv1 "k8s.io/api/authentication/v1"
authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
authorizationapiv1 "k8s.io/api/authorization/v1"
authorizationapiv1beta1 "k8s.io/api/authorization/v1beta1"
autoscalingapiv1 "k8s.io/api/autoscaling/v1"
autoscalingapiv2beta1 "k8s.io/api/autoscaling/v2beta1"
batchapiv1 "k8s.io/api/batch/v1"
batchapiv1beta1 "k8s.io/api/batch/v1beta1"
certificatesapiv1beta1 "k8s.io/api/certificates/v1beta1"
apiv1 "k8s.io/api/core/v1"
eventsv1beta1 "k8s.io/api/events/v1beta1"
extensionsapiv1beta1 "k8s.io/api/extensions/v1beta1"
networkingapiv1 "k8s.io/api/networking/v1"
policyapiv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
storageapiv1 "k8s.io/api/storage/v1"
storageapiv1beta1 "k8s.io/api/storage/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apiserver/pkg/endpoints/discovery"
"k8s.io/apiserver/pkg/registry/generic"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/healthz"
serverstorage "k8s.io/apiserver/pkg/server/storage"
storagefactory "k8s.io/apiserver/pkg/storage/storagebackend/factory"
"k8s.io/client-go/informers"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
api "k8s.io/kubernetes/pkg/apis/core"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options"
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
"k8s.io/kubernetes/pkg/master/reconcilers"
"k8s.io/kubernetes/pkg/master/tunneler"
"k8s.io/kubernetes/pkg/registry/core/endpoint"
endpointsstorage "k8s.io/kubernetes/pkg/registry/core/endpoint/storage"
"k8s.io/kubernetes/pkg/routes"
"k8s.io/kubernetes/pkg/serviceaccount"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
// RESTStorage installers
admissionregistrationrest "k8s.io/kubernetes/pkg/registry/admissionregistration/rest"
appsrest "k8s.io/kubernetes/pkg/registry/apps/rest"
authenticationrest "k8s.io/kubernetes/pkg/registry/authentication/rest"
authorizationrest "k8s.io/kubernetes/pkg/registry/authorization/rest"
autoscalingrest "k8s.io/kubernetes/pkg/registry/autoscaling/rest"
batchrest "k8s.io/kubernetes/pkg/registry/batch/rest"
certificatesrest "k8s.io/kubernetes/pkg/registry/certificates/rest"
corerest "k8s.io/kubernetes/pkg/registry/core/rest"
eventsrest "k8s.io/kubernetes/pkg/registry/events/rest"
extensionsrest "k8s.io/kubernetes/pkg/registry/extensions/rest"
networkingrest "k8s.io/kubernetes/pkg/registry/networking/rest"
policyrest "k8s.io/kubernetes/pkg/registry/policy/rest"
rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest"
schedulingrest "k8s.io/kubernetes/pkg/registry/scheduling/rest"
settingsrest "k8s.io/kubernetes/pkg/registry/settings/rest"
storagerest "k8s.io/kubernetes/pkg/registry/storage/rest"
)
const (
// DefaultEndpointReconcilerInterval is the default amount of time for how often the endpoints for
// the kubernetes Service are reconciled.
DefaultEndpointReconcilerInterval = 10 * time.Second
// DefaultEndpointReconcilerTTL is the default TTL timeout for the storage layer
DefaultEndpointReconcilerTTL = 15 * time.Second
)
type ExtraConfig struct {
ClientCARegistrationHook ClientCARegistrationHook
APIResourceConfigSource serverstorage.APIResourceConfigSource
StorageFactory serverstorage.StorageFactory
EnableCoreControllers bool
EndpointReconcilerConfig EndpointReconcilerConfig
EventTTL time.Duration
KubeletClientConfig kubeletclient.KubeletClientConfig
// Used to start and monitor tunneling
Tunneler tunneler.Tunneler
EnableLogsSupport bool
ProxyTransport http.RoundTripper
// Values to build the IP addresses used by discovery
// The range of IPs to be assigned to services with type=ClusterIP or greater
ServiceIPRange net.IPNet
// The IP address for the GenericAPIServer service (must be inside ServiceIPRange)
APIServerServiceIP net.IP
// Port for the apiserver service.
APIServerServicePort int
// TODO, we can probably group service related items into a substruct to make it easier to configure
// the API server items and `Extra*` fields likely fit nicely together.
// The range of ports to be assigned to services with type=NodePort or greater
ServiceNodePortRange utilnet.PortRange
// Additional ports to be exposed on the GenericAPIServer service
// extraServicePorts is injectable in the event that more ports
// (other than the default 443/tcp) are exposed on the GenericAPIServer
// and those ports need to be load balanced by the GenericAPIServer
// service because this pkg is linked by out-of-tree projects
// like openshift which want to use the GenericAPIServer but also do
// more stuff.
ExtraServicePorts []api.ServicePort
// Additional ports to be exposed on the GenericAPIServer endpoints
// Port names should align with ports defined in ExtraServicePorts
ExtraEndpointPorts []api.EndpointPort
// If non-zero, the "kubernetes" services uses this port as NodePort.
KubernetesServiceNodePort int
// Number of masters running; all masters must be started with the
// same value for this field. (Numbers > 1 currently untested.)
MasterCount int
// MasterEndpointReconcileTTL sets the time to live in seconds of an
// endpoint record recorded by each master. The endpoints are checked at an
// interval that is 2/3 of this value and this value defaults to 15s if
// unset. In very large clusters, this value may be increased to reduce the
// possibility that the master endpoint record expires (due to other load
// on the etcd server) and causes masters to drop in and out of the
// kubernetes service record. It is not recommended to set this value below
// 15s.
MasterEndpointReconcileTTL time.Duration
// Selects which reconciler to use
EndpointReconcilerType reconcilers.Type
ServiceAccountIssuer serviceaccount.TokenGenerator
ServiceAccountAPIAudiences []string
}
type Config struct {
GenericConfig *genericapiserver.Config
ExtraConfig ExtraConfig
}
type completedConfig struct {
GenericConfig genericapiserver.CompletedConfig
ExtraConfig *ExtraConfig
}
type CompletedConfig struct {
// Embed a private pointer that cannot be instantiated outside of this package.
*completedConfig
}
// EndpointReconcilerConfig holds the endpoint reconciler and endpoint reconciliation interval to be
// used by the master.
type EndpointReconcilerConfig struct {
Reconciler reconcilers.EndpointReconciler
Interval time.Duration
}
// Master contains state for a Kubernetes cluster master/api server.
type Master struct {
GenericAPIServer *genericapiserver.GenericAPIServer
ClientCARegistrationHook ClientCARegistrationHook
}
func (c *Config) createMasterCountReconciler() reconcilers.EndpointReconciler {
endpointClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig)
return reconcilers.NewMasterCountEndpointReconciler(c.ExtraConfig.MasterCount, endpointClient)
}
func (c *Config) createNoneReconciler() reconcilers.EndpointReconciler {
return reconcilers.NewNoneEndpointReconciler()
}
func (c *Config) createLeaseReconciler() reconcilers.EndpointReconciler {
ttl := c.ExtraConfig.MasterEndpointReconcileTTL
config, err := c.ExtraConfig.StorageFactory.NewConfig(api.Resource("apiServerIPInfo"))
if err != nil {
glog.Fatalf("Error determining service IP ranges: %v", err)
}
leaseStorage, _, err := storagefactory.Create(*config)
if err != nil {
glog.Fatalf("Error creating storage factory: %v", err)
}
endpointConfig, err := c.ExtraConfig.StorageFactory.NewConfig(api.Resource("endpoints"))
if err != nil {
glog.Fatalf("Error getting storage config: %v", err)
}
endpointsStorage := endpointsstorage.NewREST(generic.RESTOptions{
StorageConfig: endpointConfig,
Decorator: generic.UndecoratedStorage,
DeleteCollectionWorkers: 0,
ResourcePrefix: c.ExtraConfig.StorageFactory.ResourcePrefix(api.Resource("endpoints")),
})
endpointRegistry := endpoint.NewRegistry(endpointsStorage)
masterLeases := reconcilers.NewLeases(leaseStorage, "/masterleases/", ttl)
return reconcilers.NewLeaseEndpointReconciler(endpointRegistry, masterLeases)
}
func (c *Config) createEndpointReconciler() reconcilers.EndpointReconciler {
glog.Infof("Using reconciler: %v", c.ExtraConfig.EndpointReconcilerType)
switch c.ExtraConfig.EndpointReconcilerType {
// there are numerous test dependencies that depend on a default controller
case "", reconcilers.MasterCountReconcilerType:
return c.createMasterCountReconciler()
case reconcilers.LeaseEndpointReconcilerType:
return c.createLeaseReconciler()
case reconcilers.NoneEndpointReconcilerType:
return c.createNoneReconciler()
default:
glog.Fatalf("Reconciler not implemented: %v", c.ExtraConfig.EndpointReconcilerType)
}
return nil
}
// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver.
func (cfg *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig {
c := completedConfig{
cfg.GenericConfig.Complete(informers),
&cfg.ExtraConfig,
}
serviceIPRange, apiServerServiceIP, err := DefaultServiceIPRange(c.ExtraConfig.ServiceIPRange)
if err != nil {
glog.Fatalf("Error determining service IP ranges: %v", err)
}
if c.ExtraConfig.ServiceIPRange.IP == nil {
c.ExtraConfig.ServiceIPRange = serviceIPRange
}
if c.ExtraConfig.APIServerServiceIP == nil {
c.ExtraConfig.APIServerServiceIP = apiServerServiceIP
}
discoveryAddresses := discovery.DefaultAddresses{DefaultAddress: c.GenericConfig.ExternalAddress}
discoveryAddresses.CIDRRules = append(discoveryAddresses.CIDRRules,
discovery.CIDRRule{IPRange: c.ExtraConfig.ServiceIPRange, Address: net.JoinHostPort(c.ExtraConfig.APIServerServiceIP.String(), strconv.Itoa(c.ExtraConfig.APIServerServicePort))})
c.GenericConfig.DiscoveryAddresses = discoveryAddresses
if c.ExtraConfig.ServiceNodePortRange.Size == 0 {
// TODO: Currently no way to specify an empty range (do we need to allow this?)
// We should probably allow this for clouds that don't require NodePort to do load-balancing (GCE)
// but then that breaks the strict nestedness of ServiceType.
// Review post-v1
c.ExtraConfig.ServiceNodePortRange = kubeoptions.DefaultServiceNodePortRange
glog.Infof("Node port range unspecified. Defaulting to %v.", c.ExtraConfig.ServiceNodePortRange)
}
if c.ExtraConfig.EndpointReconcilerConfig.Interval == 0 {
c.ExtraConfig.EndpointReconcilerConfig.Interval = DefaultEndpointReconcilerInterval
}
if c.ExtraConfig.MasterEndpointReconcileTTL == 0 {
c.ExtraConfig.MasterEndpointReconcileTTL = DefaultEndpointReconcilerTTL
}
if c.ExtraConfig.EndpointReconcilerConfig.Reconciler == nil {
c.ExtraConfig.EndpointReconcilerConfig.Reconciler = cfg.createEndpointReconciler()
}
return CompletedConfig{&c}
}
// New returns a new instance of Master from the given config.
// Certain config fields will be set to a default value if unset.
// Certain config fields must be specified, including:
// KubeletClientConfig
func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) (*Master, error) {
if reflect.DeepEqual(c.ExtraConfig.KubeletClientConfig, kubeletclient.KubeletClientConfig{}) {
return nil, fmt.Errorf("Master.New() called with empty config.KubeletClientConfig")
}
s, err := c.GenericConfig.New("kube-apiserver", delegationTarget)
if err != nil {
return nil, err
}
if c.ExtraConfig.EnableLogsSupport {
routes.Logs{}.Install(s.Handler.GoRestfulContainer)
}
m := &Master{
GenericAPIServer: s,
}
// install legacy rest storage
if c.ExtraConfig.APIResourceConfigSource.VersionEnabled(apiv1.SchemeGroupVersion) {
legacyRESTStorageProvider := corerest.LegacyRESTStorageProvider{
StorageFactory: c.ExtraConfig.StorageFactory,
ProxyTransport: c.ExtraConfig.ProxyTransport,
KubeletClientConfig: c.ExtraConfig.KubeletClientConfig,
EventTTL: c.ExtraConfig.EventTTL,
ServiceIPRange: c.ExtraConfig.ServiceIPRange,
ServiceNodePortRange: c.ExtraConfig.ServiceNodePortRange,
LoopbackClientConfig: c.GenericConfig.LoopbackClientConfig,
ServiceAccountIssuer: c.ExtraConfig.ServiceAccountIssuer,
ServiceAccountAPIAudiences: c.ExtraConfig.ServiceAccountAPIAudiences,
}
m.InstallLegacyAPI(&c, c.GenericConfig.RESTOptionsGetter, legacyRESTStorageProvider)
}
// The order here is preserved in discovery.
// If resources with identical names exist in more than one of these groups (e.g. "deployments.apps"" and "deployments.extensions"),
// the order of this list determines which group an unqualified resource name (e.g. "deployments") should prefer.
// This priority order is used for local discovery, but it ends up aggregated in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go
// with specific priorities.
// TODO: describe the priority all the way down in the RESTStorageProviders and plumb it back through the various discovery
// handlers that we have.
restStorageProviders := []RESTStorageProvider{
authenticationrest.RESTStorageProvider{Authenticator: c.GenericConfig.Authentication.Authenticator},
authorizationrest.RESTStorageProvider{Authorizer: c.GenericConfig.Authorization.Authorizer, RuleResolver: c.GenericConfig.RuleResolver},
autoscalingrest.RESTStorageProvider{},
batchrest.RESTStorageProvider{},
certificatesrest.RESTStorageProvider{},
extensionsrest.RESTStorageProvider{},
networkingrest.RESTStorageProvider{},
policyrest.RESTStorageProvider{},
rbacrest.RESTStorageProvider{Authorizer: c.GenericConfig.Authorization.Authorizer},
schedulingrest.RESTStorageProvider{},
settingsrest.RESTStorageProvider{},
storagerest.RESTStorageProvider{},
// keep apps after extensions so legacy clients resolve the extensions versions of shared resource names.
// See https://github.com/kubernetes/kubernetes/issues/42392
appsrest.RESTStorageProvider{},
admissionregistrationrest.RESTStorageProvider{},
eventsrest.RESTStorageProvider{TTL: c.ExtraConfig.EventTTL},
}
m.InstallAPIs(c.ExtraConfig.APIResourceConfigSource, c.GenericConfig.RESTOptionsGetter, restStorageProviders...)
if c.ExtraConfig.Tunneler != nil {
m.installTunneler(c.ExtraConfig.Tunneler, corev1client.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig).Nodes())
}
m.GenericAPIServer.AddPostStartHookOrDie("ca-registration", c.ExtraConfig.ClientCARegistrationHook.PostStartHook)
return m, nil
}
func (m *Master) InstallLegacyAPI(c *completedConfig, restOptionsGetter generic.RESTOptionsGetter, legacyRESTStorageProvider corerest.LegacyRESTStorageProvider) {
legacyRESTStorage, apiGroupInfo, err := legacyRESTStorageProvider.NewLegacyRESTStorage(restOptionsGetter)
if err != nil {
glog.Fatalf("Error building core storage: %v", err)
}
if c.ExtraConfig.EnableCoreControllers {
controllerName := "bootstrap-controller"
coreClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig)
bootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient, coreClient)
m.GenericAPIServer.AddPostStartHookOrDie(controllerName, bootstrapController.PostStartHook)
m.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook)
}
if err := m.GenericAPIServer.InstallLegacyAPIGroup(genericapiserver.DefaultLegacyAPIPrefix, &apiGroupInfo); err != nil {
glog.Fatalf("Error in registering group versions: %v", err)
}
}
func (m *Master) installTunneler(nodeTunneler tunneler.Tunneler, nodeClient corev1client.NodeInterface) {
nodeTunneler.Run(nodeAddressProvider{nodeClient}.externalAddresses)
m.GenericAPIServer.AddHealthzChecks(healthz.NamedCheck("SSH Tunnel Check", tunneler.TunnelSyncHealthChecker(nodeTunneler)))
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "apiserver_proxy_tunnel_sync_latency_secs",
Help: "The time since the last successful synchronization of the SSH tunnels for proxy requests.",
}, func() float64 { return float64(nodeTunneler.SecondsSinceSync()) })
}
// RESTStorageProvider is a factory type for REST storage.
type RESTStorageProvider interface {
GroupName() string
NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool)
}
// InstallAPIs will install the APIs for the restStorageProviders if they are enabled.
func (m *Master) InstallAPIs(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter, restStorageProviders ...RESTStorageProvider) {
apiGroupsInfo := []genericapiserver.APIGroupInfo{}
for _, restStorageBuilder := range restStorageProviders {
groupName := restStorageBuilder.GroupName()
if !apiResourceConfigSource.AnyVersionForGroupEnabled(groupName) {
glog.V(1).Infof("Skipping disabled API group %q.", groupName)
continue
}
apiGroupInfo, enabled := restStorageBuilder.NewRESTStorage(apiResourceConfigSource, restOptionsGetter)
if !enabled {
glog.Warningf("Problem initializing API group %q, skipping.", groupName)
continue
}
glog.V(1).Infof("Enabling API group %q.", groupName)
if postHookProvider, ok := restStorageBuilder.(genericapiserver.PostStartHookProvider); ok {
name, hook, err := postHookProvider.PostStartHook()
if err != nil {
glog.Fatalf("Error building PostStartHook: %v", err)
}
m.GenericAPIServer.AddPostStartHookOrDie(name, hook)
}
apiGroupsInfo = append(apiGroupsInfo, apiGroupInfo)
}
for i := range apiGroupsInfo {
if err := m.GenericAPIServer.InstallAPIGroup(&apiGroupsInfo[i]); err != nil {
glog.Fatalf("Error in registering group versions: %v", err)
}
}
}
type nodeAddressProvider struct {
nodeClient corev1client.NodeInterface
}
func (n nodeAddressProvider) externalAddresses() ([]string, error) {
preferredAddressTypes := []apiv1.NodeAddressType{
apiv1.NodeExternalIP,
}
nodes, err := n.nodeClient.List(metav1.ListOptions{})
if err != nil {
return nil, err
}
addrs := []string{}
for ix := range nodes.Items {
node := &nodes.Items[ix]
addr, err := nodeutil.GetPreferredNodeAddress(node, preferredAddressTypes)
if err != nil {
return nil, err
}
addrs = append(addrs, addr)
}
return addrs, nil
}
func DefaultAPIResourceConfigSource() *serverstorage.ResourceConfig {
ret := serverstorage.NewResourceConfig()
// NOTE: GroupVersions listed here will be enabled by default. Don't put alpha versions in the list.
ret.EnableVersions(
apiv1.SchemeGroupVersion,
extensionsapiv1beta1.SchemeGroupVersion,
batchapiv1.SchemeGroupVersion,
batchapiv1beta1.SchemeGroupVersion,
authenticationv1.SchemeGroupVersion,
authenticationv1beta1.SchemeGroupVersion,
autoscalingapiv1.SchemeGroupVersion,
autoscalingapiv2beta1.SchemeGroupVersion,
appsv1beta1.SchemeGroupVersion,
appsv1beta2.SchemeGroupVersion,
appsv1.SchemeGroupVersion,
policyapiv1beta1.SchemeGroupVersion,
rbacv1.SchemeGroupVersion,
rbacv1beta1.SchemeGroupVersion,
storageapiv1.SchemeGroupVersion,
storageapiv1beta1.SchemeGroupVersion,
certificatesapiv1beta1.SchemeGroupVersion,
authorizationapiv1.SchemeGroupVersion,
authorizationapiv1beta1.SchemeGroupVersion,
networkingapiv1.SchemeGroupVersion,
eventsv1beta1.SchemeGroupVersion,
admissionregistrationv1beta1.SchemeGroupVersion,
)
return ret
}
| pkg/master/master.go | 1 | https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b | [
0.9854119420051575,
0.05402832105755806,
0.0001625512377358973,
0.0001766061468515545,
0.1969805210828781
] |
{
"id": 1,
"code_window": [
"\n",
"\tAPIResourceConfigSource serverstorage.APIResourceConfigSource\n",
"\tStorageFactory serverstorage.StorageFactory\n",
"\tEnableCoreControllers bool\n",
"\tEndpointReconcilerConfig EndpointReconcilerConfig\n",
"\tEventTTL time.Duration\n",
"\tKubeletClientConfig kubeletclient.KubeletClientConfig\n",
"\n",
"\t// Used to start and monitor tunneling\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/master/master.go",
"type": "replace",
"edit_start_line_idx": 105
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
rest "k8s.io/client-go/rest"
)
// TokenReviewsGetter has a method to return a TokenReviewInterface.
// A group's client should implement this interface.
type TokenReviewsGetter interface {
TokenReviews() TokenReviewInterface
}
// TokenReviewInterface has methods to work with TokenReview resources.
type TokenReviewInterface interface {
TokenReviewExpansion
}
// tokenReviews implements TokenReviewInterface
type tokenReviews struct {
client rest.Interface
}
// newTokenReviews returns a TokenReviews
func newTokenReviews(c *AuthenticationV1Client) *tokenReviews {
return &tokenReviews{
client: c.RESTClient(),
}
}
| staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b | [
0.00018620250921230763,
0.00017960899276658893,
0.00017352790746372193,
0.0001775217242538929,
0.000004697243639384396
] |
{
"id": 1,
"code_window": [
"\n",
"\tAPIResourceConfigSource serverstorage.APIResourceConfigSource\n",
"\tStorageFactory serverstorage.StorageFactory\n",
"\tEnableCoreControllers bool\n",
"\tEndpointReconcilerConfig EndpointReconcilerConfig\n",
"\tEventTTL time.Duration\n",
"\tKubeletClientConfig kubeletclient.KubeletClientConfig\n",
"\n",
"\t// Used to start and monitor tunneling\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/master/master.go",
"type": "replace",
"edit_start_line_idx": 105
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"fake_node_status_updater.go",
"node_status_updater.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater",
deps = [
"//pkg/controller/volume/attachdetach/cache:go_default_library",
"//pkg/util/node:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| pkg/controller/volume/attachdetach/statusupdater/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b | [
0.0001749419025145471,
0.00017399442731402814,
0.00017208780627697706,
0.00017447397112846375,
0.0000011327645097480854
] |
{
"id": 1,
"code_window": [
"\n",
"\tAPIResourceConfigSource serverstorage.APIResourceConfigSource\n",
"\tStorageFactory serverstorage.StorageFactory\n",
"\tEnableCoreControllers bool\n",
"\tEndpointReconcilerConfig EndpointReconcilerConfig\n",
"\tEventTTL time.Duration\n",
"\tKubeletClientConfig kubeletclient.KubeletClientConfig\n",
"\n",
"\t// Used to start and monitor tunneling\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/master/master.go",
"type": "replace",
"edit_start_line_idx": 105
} | #!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
: ${KUBECTL:=${KUBE_ROOT}/cluster/kubectl.sh}
: ${KUBE_CONFIG_FILE:="config-test.sh"}
export KUBECTL KUBE_CONFIG_FILE
source "${KUBE_ROOT}/cluster/kube-util.sh"
prepare-e2e
test-teardown
| hack/e2e-internal/e2e-down.sh | 0 | https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b | [
0.00017532880883663893,
0.00017179800488520414,
0.00016665674047544599,
0.00017260323511436582,
0.0000035624709653347963
] |
{
"id": 2,
"code_window": [
"\t\tglog.Fatalf(\"Error building core storage: %v\", err)\n",
"\t}\n",
"\n",
"\tif c.ExtraConfig.EnableCoreControllers {\n",
"\t\tcontrollerName := \"bootstrap-controller\"\n",
"\t\tcoreClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig)\n",
"\t\tbootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient, coreClient)\n",
"\t\tm.GenericAPIServer.AddPostStartHookOrDie(controllerName, bootstrapController.PostStartHook)\n",
"\t\tm.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook)\n",
"\t}\n",
"\n",
"\tif err := m.GenericAPIServer.InstallLegacyAPIGroup(genericapiserver.DefaultLegacyAPIPrefix, &apiGroupInfo); err != nil {\n",
"\t\tglog.Fatalf(\"Error in registering group versions: %v\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcontrollerName := \"bootstrap-controller\"\n",
"\tcoreClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig)\n",
"\tbootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient, coreClient)\n",
"\tm.GenericAPIServer.AddPostStartHookOrDie(controllerName, bootstrapController.PostStartHook)\n",
"\tm.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook)\n"
],
"file_path": "pkg/master/master.go",
"type": "replace",
"edit_start_line_idx": 370
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"net"
"net/http"
"net/http/httptest"
"path"
"time"
"github.com/go-openapi/spec"
"github.com/golang/glog"
"github.com/pborman/uuid"
apps "k8s.io/api/apps/v1beta1"
autoscaling "k8s.io/api/autoscaling/v1"
certificates "k8s.io/api/certificates/v1beta1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
rbac "k8s.io/api/rbac/v1alpha1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
authauthenticator "k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authentication/authenticatorfactory"
authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
authorizerunion "k8s.io/apiserver/pkg/authorization/union"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/options"
serverstorage "k8s.io/apiserver/pkg/server/storage"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/apis/batch"
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
"k8s.io/kubernetes/pkg/generated/openapi"
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/version"
)
// Config is a struct of configuration directives for NewMasterComponents.
type Config struct {
// If nil, a default is used, partially filled configs will not get populated.
MasterConfig *master.Config
StartReplicationManager bool
// Client throttling qps
QPS float32
// Client burst qps, also burst replicas allowed in rc manager
Burst int
// TODO: Add configs for endpoints controller, scheduler etc
}
// alwaysAllow always allows an action
type alwaysAllow struct{}
func (alwaysAllow) Authorize(requestAttributes authorizer.Attributes) (authorizer.Decision, string, error) {
return authorizer.DecisionAllow, "always allow", nil
}
// alwaysEmpty simulates "no authentication" for old tests
func alwaysEmpty(req *http.Request) (user.Info, bool, error) {
return &user.DefaultInfo{
Name: "",
}, true, nil
}
// MasterReceiver can be used to provide the master to a custom incoming server function
type MasterReceiver interface {
SetMaster(m *master.Master)
}
// MasterHolder implements
type MasterHolder struct {
Initialized chan struct{}
M *master.Master
}
func (h *MasterHolder) SetMaster(m *master.Master) {
h.M = m
close(h.Initialized)
}
// startMasterOrDie starts a kubernetes master and an httpserver to handle api requests
func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) {
var m *master.Master
var s *httptest.Server
if incomingServer != nil {
s = incomingServer
} else {
s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
m.GenericAPIServer.Handler.ServeHTTP(w, req)
}))
}
stopCh := make(chan struct{})
closeFn := func() {
close(stopCh)
s.Close()
}
if masterConfig == nil {
masterConfig = NewMasterConfig()
masterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, legacyscheme.Scheme)
masterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{
InfoProps: spec.InfoProps{
Title: "Kubernetes",
Version: "unversioned",
},
}
masterConfig.GenericConfig.OpenAPIConfig.DefaultResponse = &spec.Response{
ResponseProps: spec.ResponseProps{
Description: "Default Response.",
},
}
masterConfig.GenericConfig.OpenAPIConfig.GetDefinitions = openapi.GetOpenAPIDefinitions
masterConfig.GenericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig()
}
// set the loopback client config
if masterConfig.GenericConfig.LoopbackClientConfig == nil {
masterConfig.GenericConfig.LoopbackClientConfig = &restclient.Config{QPS: 50, Burst: 100, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
}
masterConfig.GenericConfig.LoopbackClientConfig.Host = s.URL
privilegedLoopbackToken := uuid.NewRandom().String()
// wrap any available authorizer
tokens := make(map[string]*user.DefaultInfo)
tokens[privilegedLoopbackToken] = &user.DefaultInfo{
Name: user.APIServerUser,
UID: uuid.NewRandom().String(),
Groups: []string{user.SystemPrivilegedGroup},
}
tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens)
if masterConfig.GenericConfig.Authentication.Authenticator == nil {
masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty))
} else {
masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authentication.Authenticator)
}
if masterConfig.GenericConfig.Authorization.Authorizer != nil {
tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup)
masterConfig.GenericConfig.Authorization.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorization.Authorizer)
} else {
masterConfig.GenericConfig.Authorization.Authorizer = alwaysAllow{}
}
masterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken
clientset, err := clientset.NewForConfig(masterConfig.GenericConfig.LoopbackClientConfig)
if err != nil {
glog.Fatal(err)
}
sharedInformers := informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.EmptyDelegate)
if err != nil {
closeFn()
glog.Fatalf("error in bringing up the master: %v", err)
}
if masterReceiver != nil {
masterReceiver.SetMaster(m)
}
// TODO have this start method actually use the normal start sequence for the API server
// this method never actually calls the `Run` method for the API server
// fire the post hooks ourselves
m.GenericAPIServer.PrepareRun()
m.GenericAPIServer.RunPostStartHooks(stopCh)
cfg := *masterConfig.GenericConfig.LoopbackClientConfig
cfg.ContentConfig.GroupVersion = &schema.GroupVersion{}
privilegedClient, err := restclient.RESTClientFor(&cfg)
if err != nil {
closeFn()
glog.Fatal(err)
}
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
result := privilegedClient.Get().AbsPath("/healthz").Do()
status := 0
result.StatusCode(&status)
if status == 200 {
return true, nil
}
return false, nil
})
if err != nil {
closeFn()
glog.Fatal(err)
}
return m, s, closeFn
}
// Returns the master config appropriate for most integration tests.
func NewIntegrationTestMasterConfig() *master.Config {
masterConfig := NewMasterConfig()
masterConfig.ExtraConfig.EnableCoreControllers = true
masterConfig.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4")
masterConfig.ExtraConfig.APIResourceConfigSource = master.DefaultAPIResourceConfigSource()
return masterConfig
}
// Returns a basic master config.
func NewMasterConfig() *master.Config {
// This causes the integration tests to exercise the etcd
// prefix code, so please don't change without ensuring
// sufficient coverage in other ways.
etcdOptions := options.NewEtcdOptions(storagebackend.NewDefaultConfig(uuid.New(), nil))
etcdOptions.StorageConfig.ServerList = []string{GetEtcdURL()}
info, _ := runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON)
ns := NewSingleContentTypeSerializer(legacyscheme.Scheme, info)
resourceEncoding := serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Registry)
// FIXME (soltysh): this GroupVersionResource override should be configurable
// we need to set both for the whole group and for cronjobs, separately
resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: batch.GroupName, Resource: "cronjobs"}, schema.GroupVersion{Group: batch.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
// we also need to set both for the storage group and for volumeattachments, separately
resourceEncoding.SetVersionEncoding(storage.GroupName, *testapi.Storage.GroupVersion(), schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal})
resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: storage.GroupName, Resource: "volumeattachments"}, schema.GroupVersion{Group: storage.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal})
storageFactory := serverstorage.NewDefaultStorageFactory(etcdOptions.StorageConfig, runtime.ContentTypeJSON, ns, resourceEncoding, master.DefaultAPIResourceConfigSource(), nil)
storageFactory.SetSerializer(
schema.GroupResource{Group: v1.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: autoscaling.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: batch.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: apps.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: extensions.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: policy.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: rbac.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: certificates.GroupName, Resource: serverstorage.AllResources},
"",
ns)
storageFactory.SetSerializer(
schema.GroupResource{Group: storage.GroupName, Resource: serverstorage.AllResources},
"",
ns)
genericConfig := genericapiserver.NewConfig(legacyscheme.Codecs)
kubeVersion := version.Get()
genericConfig.Version = &kubeVersion
genericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()
err := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, genericConfig)
if err != nil {
panic(err)
}
return &master.Config{
GenericConfig: genericConfig,
ExtraConfig: master.ExtraConfig{
APIResourceConfigSource: master.DefaultAPIResourceConfigSource(),
StorageFactory: storageFactory,
EnableCoreControllers: true,
KubeletClientConfig: kubeletclient.KubeletClientConfig{Port: 10250},
APIServerServicePort: 443,
MasterCount: 1,
},
}
}
// CloseFunc can be called to cleanup the master
type CloseFunc func()
func RunAMaster(masterConfig *master.Config) (*master.Master, *httptest.Server, CloseFunc) {
if masterConfig == nil {
masterConfig = NewMasterConfig()
masterConfig.GenericConfig.EnableProfiling = true
}
return startMasterOrDie(masterConfig, nil, nil)
}
func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, masterReceiver MasterReceiver) (*master.Master, *httptest.Server, CloseFunc) {
return startMasterOrDie(masterConfig, s, masterReceiver)
}
// SharedEtcd creates a storage config for a shared etcd instance, with a unique prefix.
func SharedEtcd() *storagebackend.Config {
cfg := storagebackend.NewDefaultConfig(path.Join(uuid.New(), "registry"), nil)
cfg.ServerList = []string{GetEtcdURL()}
return cfg
}
| test/integration/framework/master_utils.go | 1 | https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b | [
0.009200090542435646,
0.0006516317371279001,
0.00015996942238416523,
0.00017355356249026954,
0.0016062695067375898
] |
{
"id": 2,
"code_window": [
"\t\tglog.Fatalf(\"Error building core storage: %v\", err)\n",
"\t}\n",
"\n",
"\tif c.ExtraConfig.EnableCoreControllers {\n",
"\t\tcontrollerName := \"bootstrap-controller\"\n",
"\t\tcoreClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig)\n",
"\t\tbootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient, coreClient)\n",
"\t\tm.GenericAPIServer.AddPostStartHookOrDie(controllerName, bootstrapController.PostStartHook)\n",
"\t\tm.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook)\n",
"\t}\n",
"\n",
"\tif err := m.GenericAPIServer.InstallLegacyAPIGroup(genericapiserver.DefaultLegacyAPIPrefix, &apiGroupInfo); err != nil {\n",
"\t\tglog.Fatalf(\"Error in registering group versions: %v\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcontrollerName := \"bootstrap-controller\"\n",
"\tcoreClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig)\n",
"\tbootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient, coreClient)\n",
"\tm.GenericAPIServer.AddPostStartHookOrDie(controllerName, bootstrapController.PostStartHook)\n",
"\tm.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook)\n"
],
"file_path": "pkg/master/master.go",
"type": "replace",
"edit_start_line_idx": 370
} | package negroni
import (
"fmt"
"log"
"net/http"
"os"
"runtime"
)
// Recovery is a Negroni middleware that recovers from any panics and writes a 500 if there was one.
type Recovery struct {
Logger *log.Logger
PrintStack bool
StackAll bool
StackSize int
}
// NewRecovery returns a new instance of Recovery
func NewRecovery() *Recovery {
return &Recovery{
Logger: log.New(os.Stdout, "[negroni] ", 0),
PrintStack: true,
StackAll: false,
StackSize: 1024 * 8,
}
}
func (rec *Recovery) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
defer func() {
if err := recover(); err != nil {
rw.WriteHeader(http.StatusInternalServerError)
stack := make([]byte, rec.StackSize)
stack = stack[:runtime.Stack(stack, rec.StackAll)]
f := "PANIC: %s\n%s"
rec.Logger.Printf(f, err, stack)
if rec.PrintStack {
fmt.Fprintf(rw, f, err, stack)
}
}
}()
next(rw, r)
}
| vendor/github.com/codegangsta/negroni/recovery.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b | [
0.00017265946371480823,
0.00016934612358454615,
0.0001652328355703503,
0.0001695738174021244,
0.0000025999029276135843
] |
{
"id": 2,
"code_window": [
"\t\tglog.Fatalf(\"Error building core storage: %v\", err)\n",
"\t}\n",
"\n",
"\tif c.ExtraConfig.EnableCoreControllers {\n",
"\t\tcontrollerName := \"bootstrap-controller\"\n",
"\t\tcoreClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig)\n",
"\t\tbootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient, coreClient)\n",
"\t\tm.GenericAPIServer.AddPostStartHookOrDie(controllerName, bootstrapController.PostStartHook)\n",
"\t\tm.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook)\n",
"\t}\n",
"\n",
"\tif err := m.GenericAPIServer.InstallLegacyAPIGroup(genericapiserver.DefaultLegacyAPIPrefix, &apiGroupInfo); err != nil {\n",
"\t\tglog.Fatalf(\"Error in registering group versions: %v\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcontrollerName := \"bootstrap-controller\"\n",
"\tcoreClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig)\n",
"\tbootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient, coreClient)\n",
"\tm.GenericAPIServer.AddPostStartHookOrDie(controllerName, bootstrapController.PostStartHook)\n",
"\tm.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook)\n"
],
"file_path": "pkg/master/master.go",
"type": "replace",
"edit_start_line_idx": 370
} | // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package runes provide transforms for UTF-8 encoded text.
package runes
import (
"unicode"
"unicode/utf8"
"golang.org/x/text/transform"
)
// A Set is a collection of runes.
type Set interface {
// Contains returns true if r is contained in the set.
Contains(r rune) bool
}
type setFunc func(rune) bool
func (s setFunc) Contains(r rune) bool {
return s(r)
}
// Note: using funcs here instead of wrapping types result in cleaner
// documentation and a smaller API.
// In creates a Set with a Contains method that returns true for all runes in
// the given RangeTable.
func In(rt *unicode.RangeTable) Set {
return setFunc(func(r rune) bool { return unicode.Is(rt, r) })
}
// In creates a Set with a Contains method that returns true for all runes not
// in the given RangeTable.
func NotIn(rt *unicode.RangeTable) Set {
return setFunc(func(r rune) bool { return !unicode.Is(rt, r) })
}
// Predicate creates a Set with a Contains method that returns f(r).
func Predicate(f func(rune) bool) Set {
return setFunc(f)
}
// Transformer implements the transform.Transformer interface.
type Transformer struct {
t transform.SpanningTransformer
}
func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
return t.t.Transform(dst, src, atEOF)
}
func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) {
return t.t.Span(b, atEOF)
}
func (t Transformer) Reset() { t.t.Reset() }
// Bytes returns a new byte slice with the result of converting b using t. It
// calls Reset on t. It returns nil if any error was found. This can only happen
// if an error-producing Transformer is passed to If.
func (t Transformer) Bytes(b []byte) []byte {
b, _, err := transform.Bytes(t, b)
if err != nil {
return nil
}
return b
}
// String returns a string with the result of converting s using t. It calls
// Reset on t. It returns the empty string if any error was found. This can only
// happen if an error-producing Transformer is passed to If.
func (t Transformer) String(s string) string {
s, _, err := transform.String(t, s)
if err != nil {
return ""
}
return s
}
// TODO:
// - Copy: copying strings and bytes in whole-rune units.
// - Validation (maybe)
// - Well-formed-ness (maybe)
const runeErrorString = string(utf8.RuneError)
// Remove returns a Transformer that removes runes r for which s.Contains(r).
// Illegal input bytes are replaced by RuneError before being passed to f.
func Remove(s Set) Transformer {
if f, ok := s.(setFunc); ok {
// This little trick cuts the running time of BenchmarkRemove for sets
// created by Predicate roughly in half.
// TODO: special-case RangeTables as well.
return Transformer{remove(f)}
}
return Transformer{remove(s.Contains)}
}
// TODO: remove transform.RemoveFunc.
type remove func(r rune) bool
func (remove) Reset() {}
// Span implements transform.Spanner.
func (t remove) Span(src []byte, atEOF bool) (n int, err error) {
for r, size := rune(0), 0; n < len(src); {
if r = rune(src[n]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
} else {
err = transform.ErrEndOfSpan
}
break
}
if t(r) {
err = transform.ErrEndOfSpan
break
}
n += size
}
return
}
// Transform implements transform.Transformer.
func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for r, size := rune(0), 0; nSrc < len(src); {
if r = rune(src[nSrc]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
// We replace illegal bytes with RuneError. Not doing so might
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
// The resulting byte sequence may subsequently contain runes
// for which t(r) is true that were passed unnoticed.
if !t(utf8.RuneError) {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = runeErrorString[0]
dst[nDst+1] = runeErrorString[1]
dst[nDst+2] = runeErrorString[2]
nDst += 3
}
nSrc++
continue
}
if t(r) {
nSrc += size
continue
}
if nDst+size > len(dst) {
err = transform.ErrShortDst
break
}
for i := 0; i < size; i++ {
dst[nDst] = src[nSrc]
nDst++
nSrc++
}
}
return
}
// Map returns a Transformer that maps the runes in the input using the given
// mapping. Illegal bytes in the input are converted to utf8.RuneError before
// being passed to the mapping func.
func Map(mapping func(rune) rune) Transformer {
return Transformer{mapper(mapping)}
}
type mapper func(rune) rune
func (mapper) Reset() {}
// Span implements transform.Spanner.
func (t mapper) Span(src []byte, atEOF bool) (n int, err error) {
for r, size := rune(0), 0; n < len(src); n += size {
if r = rune(src[n]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
} else {
err = transform.ErrEndOfSpan
}
break
}
if t(r) != r {
err = transform.ErrEndOfSpan
break
}
}
return n, err
}
// Transform implements transform.Transformer.
func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
var replacement rune
var b [utf8.UTFMax]byte
for r, size := rune(0), 0; nSrc < len(src); {
if r = rune(src[nSrc]); r < utf8.RuneSelf {
if replacement = t(r); replacement < utf8.RuneSelf {
if nDst == len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = byte(replacement)
nDst++
nSrc++
continue
}
size = 1
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
if replacement = t(utf8.RuneError); replacement == utf8.RuneError {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = runeErrorString[0]
dst[nDst+1] = runeErrorString[1]
dst[nDst+2] = runeErrorString[2]
nDst += 3
nSrc++
continue
}
} else if replacement = t(r); replacement == r {
if nDst+size > len(dst) {
err = transform.ErrShortDst
break
}
for i := 0; i < size; i++ {
dst[nDst] = src[nSrc]
nDst++
nSrc++
}
continue
}
n := utf8.EncodeRune(b[:], replacement)
if nDst+n > len(dst) {
err = transform.ErrShortDst
break
}
for i := 0; i < n; i++ {
dst[nDst] = b[i]
nDst++
}
nSrc += size
}
return
}
// ReplaceIllFormed returns a transformer that replaces all input bytes that are
// not part of a well-formed UTF-8 code sequence with utf8.RuneError.
func ReplaceIllFormed() Transformer {
return Transformer{&replaceIllFormed{}}
}
type replaceIllFormed struct{ transform.NopResetter }
func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) {
for n < len(src) {
// ASCII fast path.
if src[n] < utf8.RuneSelf {
n++
continue
}
r, size := utf8.DecodeRune(src[n:])
// Look for a valid non-ASCII rune.
if r != utf8.RuneError || size != 1 {
n += size
continue
}
// Look for short source data.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
break
}
// We have an invalid rune.
err = transform.ErrEndOfSpan
break
}
return n, err
}
func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for nSrc < len(src) {
// ASCII fast path.
if r := src[nSrc]; r < utf8.RuneSelf {
if nDst == len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = r
nDst++
nSrc++
continue
}
// Look for a valid non-ASCII rune.
if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 {
if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
err = transform.ErrShortDst
break
}
nDst += size
nSrc += size
continue
}
// Look for short source data.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
// We have an invalid rune.
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = runeErrorString[0]
dst[nDst+1] = runeErrorString[1]
dst[nDst+2] = runeErrorString[2]
nDst += 3
nSrc++
}
return nDst, nSrc, err
}
| vendor/golang.org/x/text/runes/runes.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b | [
0.0002681009063962847,
0.00017161716823466122,
0.00015666383842471987,
0.0001699634303804487,
0.000017710484826238826
] |
{
"id": 2,
"code_window": [
"\t\tglog.Fatalf(\"Error building core storage: %v\", err)\n",
"\t}\n",
"\n",
"\tif c.ExtraConfig.EnableCoreControllers {\n",
"\t\tcontrollerName := \"bootstrap-controller\"\n",
"\t\tcoreClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig)\n",
"\t\tbootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient, coreClient)\n",
"\t\tm.GenericAPIServer.AddPostStartHookOrDie(controllerName, bootstrapController.PostStartHook)\n",
"\t\tm.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook)\n",
"\t}\n",
"\n",
"\tif err := m.GenericAPIServer.InstallLegacyAPIGroup(genericapiserver.DefaultLegacyAPIPrefix, &apiGroupInfo); err != nil {\n",
"\t\tglog.Fatalf(\"Error in registering group versions: %v\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcontrollerName := \"bootstrap-controller\"\n",
"\tcoreClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig)\n",
"\tbootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient, coreClient)\n",
"\tm.GenericAPIServer.AddPostStartHookOrDie(controllerName, bootstrapController.PostStartHook)\n",
"\tm.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook)\n"
],
"file_path": "pkg/master/master.go",
"type": "replace",
"edit_start_line_idx": 370
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"strings"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/admission/initializer"
admissionmetrics "k8s.io/apiserver/pkg/admission/metrics"
"k8s.io/apiserver/pkg/admission/plugin/initialization"
"k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle"
mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating"
validatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/validating"
apiserverapi "k8s.io/apiserver/pkg/apis/apiserver"
apiserverapiv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1"
"k8s.io/apiserver/pkg/server"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
var configScheme = runtime.NewScheme()
func init() {
apiserverapi.AddToScheme(configScheme)
apiserverapiv1alpha1.AddToScheme(configScheme)
}
// AdmissionOptions holds the admission options
type AdmissionOptions struct {
// RecommendedPluginOrder holds an ordered list of plugin names we recommend to use by default
RecommendedPluginOrder []string
// DefaultOffPlugins is a set of plugin names that is disabled by default
DefaultOffPlugins sets.String
// EnablePlugins indicates plugins to be enabled passed through `--enable-admission-plugins`.
EnablePlugins []string
// DisablePlugins indicates plugins to be disabled passed through `--disable-admission-plugins`.
DisablePlugins []string
// ConfigFile is the file path with admission control configuration.
ConfigFile string
// Plugins contains all registered plugins.
Plugins *admission.Plugins
}
// NewAdmissionOptions creates a new instance of AdmissionOptions
// Note:
// In addition it calls RegisterAllAdmissionPlugins to register
// all generic admission plugins.
//
// Provides the list of RecommendedPluginOrder that holds sane values
// that can be used by servers that don't care about admission chain.
// Servers that do care can overwrite/append that field after creation.
func NewAdmissionOptions() *AdmissionOptions {
options := &AdmissionOptions{
Plugins: admission.NewPlugins(),
// This list is mix of mutating admission plugins and validating
// admission plugins. The apiserver always runs the validating ones
// after all the mutating ones, so their relative order in this list
// doesn't matter.
RecommendedPluginOrder: []string{lifecycle.PluginName, initialization.PluginName, mutatingwebhook.PluginName, validatingwebhook.PluginName},
DefaultOffPlugins: sets.NewString(initialization.PluginName),
}
server.RegisterAllAdmissionPlugins(options.Plugins)
return options
}
// AddFlags adds flags related to admission for a specific APIServer to the specified FlagSet
func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringSliceVar(&a.EnablePlugins, "enable-admission-plugins", a.EnablePlugins, ""+
"admission plugins that should be enabled in addition to default enabled ones. "+
"Comma-delimited list of admission plugins: "+strings.Join(a.Plugins.Registered(), ", ")+". "+
"The order of plugins in this flag does not matter.")
fs.StringSliceVar(&a.DisablePlugins, "disable-admission-plugins", a.DisablePlugins, ""+
"admission plugins that should be disabled although they are in the default enabled plugins list. "+
"Comma-delimited list of admission plugins: "+strings.Join(a.Plugins.Registered(), ", ")+". "+
"The order of plugins in this flag does not matter.")
fs.StringVar(&a.ConfigFile, "admission-control-config-file", a.ConfigFile,
"File with admission control configuration.")
}
// ApplyTo adds the admission chain to the server configuration.
// In case admission plugin names were not provided by a custer-admin they will be prepared from the recommended/default values.
// In addition the method lazily initializes a generic plugin that is appended to the list of pluginInitializers
// note this method uses:
// genericconfig.Authorizer
func (a *AdmissionOptions) ApplyTo(
c *server.Config,
informers informers.SharedInformerFactory,
kubeAPIServerClientConfig *rest.Config,
scheme *runtime.Scheme,
pluginInitializers ...admission.PluginInitializer,
) error {
if a == nil {
return nil
}
// Admission need scheme to construct admission initializer.
if scheme == nil {
return fmt.Errorf("admission depends on a scheme, it cannot be nil")
}
// Admission depends on CoreAPI to set SharedInformerFactory and ClientConfig.
if informers == nil {
return fmt.Errorf("admission depends on a Kubernetes core API shared informer, it cannot be nil")
}
pluginNames := a.enabledPluginNames()
pluginsConfigProvider, err := admission.ReadAdmissionConfiguration(pluginNames, a.ConfigFile, configScheme)
if err != nil {
return fmt.Errorf("failed to read plugin config: %v", err)
}
clientset, err := kubernetes.NewForConfig(kubeAPIServerClientConfig)
if err != nil {
return err
}
genericInitializer := initializer.New(clientset, informers, c.Authorization.Authorizer, scheme)
initializersChain := admission.PluginInitializers{}
pluginInitializers = append(pluginInitializers, genericInitializer)
initializersChain = append(initializersChain, pluginInitializers...)
admissionChain, err := a.Plugins.NewFromPlugins(pluginNames, pluginsConfigProvider, initializersChain, admission.DecoratorFunc(admissionmetrics.WithControllerMetrics))
if err != nil {
return err
}
c.AdmissionControl = admissionmetrics.WithStepMetrics(admissionChain)
return nil
}
// Validate verifies flags passed to AdmissionOptions.
func (a *AdmissionOptions) Validate() []error {
if a == nil {
return nil
}
errs := []error{}
registeredPlugins := sets.NewString(a.Plugins.Registered()...)
for _, name := range a.EnablePlugins {
if !registeredPlugins.Has(name) {
errs = append(errs, fmt.Errorf("enable-admission-plugins plugin %q is unknown", name))
}
}
for _, name := range a.DisablePlugins {
if !registeredPlugins.Has(name) {
errs = append(errs, fmt.Errorf("disable-admission-plugins plugin %q is unknown", name))
}
}
enablePlugins := sets.NewString(a.EnablePlugins...)
disablePlugins := sets.NewString(a.DisablePlugins...)
if len(enablePlugins.Intersection(disablePlugins).List()) > 0 {
errs = append(errs, fmt.Errorf("%v in enable-admission-plugins and disable-admission-plugins "+
"overlapped", enablePlugins.Intersection(disablePlugins).List()))
}
// Verify RecommendedPluginOrder.
recommendPlugins := sets.NewString(a.RecommendedPluginOrder...)
intersections := registeredPlugins.Intersection(recommendPlugins)
if !intersections.Equal(recommendPlugins) {
// Developer error, this should never run in.
errs = append(errs, fmt.Errorf("plugins %v in RecommendedPluginOrder are not registered",
recommendPlugins.Difference(intersections).List()))
}
if !intersections.Equal(registeredPlugins) {
// Developer error, this should never run in.
errs = append(errs, fmt.Errorf("plugins %v registered are not in RecommendedPluginOrder",
registeredPlugins.Difference(intersections).List()))
}
return errs
}
// enabledPluginNames makes use of RecommendedPluginOrder, DefaultOffPlugins,
// EnablePlugins, DisablePlugins fields
// to prepare a list of ordered plugin names that are enabled.
func (a *AdmissionOptions) enabledPluginNames() []string {
allOffPlugins := append(a.DefaultOffPlugins.List(), a.DisablePlugins...)
disabledPlugins := sets.NewString(allOffPlugins...)
enabledPlugins := sets.NewString(a.EnablePlugins...)
disabledPlugins = disabledPlugins.Difference(enabledPlugins)
orderedPlugins := []string{}
for _, plugin := range a.RecommendedPluginOrder {
if !disabledPlugins.Has(plugin) {
orderedPlugins = append(orderedPlugins, plugin)
}
}
return orderedPlugins
}
| staging/src/k8s.io/apiserver/pkg/server/options/admission.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee5bc39c4c94bf361ffc5870ae71971f152b0a2b | [
0.0007499236962758005,
0.00021760509116575122,
0.00016045414668042213,
0.0001690268109086901,
0.0001394457503920421
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.