hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 2,
"code_window": [
"}\n",
"\n",
"// Update statistics from http request and response data\n",
"func (st *HTTPStats) updateStats(api string, r *http.Request, w *logger.ResponseWriter, durationSecs float64) {\n",
"\t// A successful request has a 2xx response code\n",
"\tsuccessReq := (w.StatusCode >= 200 && w.StatusCode < 300)\n",
"\n",
"\tif !strings.HasSuffix(r.URL.Path, prometheusMetricsPath) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (st *HTTPStats) updateStats(api string, r *http.Request, w *logger.ResponseWriter) {\n"
],
"file_path": "cmd/http-stats.go",
"type": "replace",
"edit_start_line_idx": 163
} | /*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"net/http"
"strings"
"sync"
"sync/atomic"
"github.com/minio/minio/cmd/logger"
"github.com/prometheus/client_golang/prometheus"
)
// ConnStats - Network statistics
// Count total input/output transferred bytes during
// the server's life.
type ConnStats struct {
totalInputBytes uint64
totalOutputBytes uint64
s3InputBytes uint64
s3OutputBytes uint64
}
// Increase total input bytes
func (s *ConnStats) incInputBytes(n int) {
atomic.AddUint64(&s.totalInputBytes, uint64(n))
}
// Increase total output bytes
func (s *ConnStats) incOutputBytes(n int) {
atomic.AddUint64(&s.totalOutputBytes, uint64(n))
}
// Return total input bytes
func (s *ConnStats) getTotalInputBytes() uint64 {
return atomic.LoadUint64(&s.totalInputBytes)
}
// Return total output bytes
func (s *ConnStats) getTotalOutputBytes() uint64 {
return atomic.LoadUint64(&s.totalOutputBytes)
}
// Increase outbound input bytes
func (s *ConnStats) incS3InputBytes(n int) {
atomic.AddUint64(&s.s3InputBytes, uint64(n))
}
// Increase outbound output bytes
func (s *ConnStats) incS3OutputBytes(n int) {
atomic.AddUint64(&s.s3OutputBytes, uint64(n))
}
// Return outbound input bytes
func (s *ConnStats) getS3InputBytes() uint64 {
return atomic.LoadUint64(&s.s3InputBytes)
}
// Return outbound output bytes
func (s *ConnStats) getS3OutputBytes() uint64 {
return atomic.LoadUint64(&s.s3OutputBytes)
}
// Return connection stats (total input/output bytes and total s3 input/output bytes)
func (s *ConnStats) toServerConnStats() ServerConnStats {
return ServerConnStats{
TotalInputBytes: s.getTotalInputBytes(),
TotalOutputBytes: s.getTotalOutputBytes(),
S3InputBytes: s.getS3InputBytes(),
S3OutputBytes: s.getS3OutputBytes(),
}
}
// Prepare new ConnStats structure
func newConnStats() *ConnStats {
return &ConnStats{}
}
// HTTPAPIStats holds statistics information about
// a given API in the requests.
type HTTPAPIStats struct {
apiStats map[string]int
sync.RWMutex
}
// Inc increments the api stats counter.
func (stats *HTTPAPIStats) Inc(api string) {
if stats == nil {
return
}
stats.Lock()
defer stats.Unlock()
if stats.apiStats == nil {
stats.apiStats = make(map[string]int)
}
stats.apiStats[api]++
}
// Dec increments the api stats counter.
func (stats *HTTPAPIStats) Dec(api string) {
if stats == nil {
return
}
stats.Lock()
defer stats.Unlock()
if val, ok := stats.apiStats[api]; ok && val > 0 {
stats.apiStats[api]--
}
}
// Load returns the recorded stats.
func (stats *HTTPAPIStats) Load() map[string]int {
stats.Lock()
defer stats.Unlock()
var apiStats = make(map[string]int, len(stats.apiStats))
for k, v := range stats.apiStats {
apiStats[k] = v
}
return apiStats
}
// HTTPStats holds statistics information about
// HTTP requests made by all clients
type HTTPStats struct {
currentS3Requests HTTPAPIStats
totalS3Requests HTTPAPIStats
totalS3Errors HTTPAPIStats
}
// Converts http stats into struct to be sent back to the client.
func (st *HTTPStats) toServerHTTPStats() ServerHTTPStats {
serverStats := ServerHTTPStats{}
serverStats.CurrentS3Requests = ServerHTTPAPIStats{
APIStats: st.currentS3Requests.Load(),
}
serverStats.TotalS3Requests = ServerHTTPAPIStats{
APIStats: st.totalS3Requests.Load(),
}
serverStats.TotalS3Errors = ServerHTTPAPIStats{
APIStats: st.totalS3Errors.Load(),
}
return serverStats
}
// Update statistics from http request and response data
func (st *HTTPStats) updateStats(api string, r *http.Request, w *logger.ResponseWriter, durationSecs float64) {
// A successful request has a 2xx response code
successReq := (w.StatusCode >= 200 && w.StatusCode < 300)
if !strings.HasSuffix(r.URL.Path, prometheusMetricsPath) {
st.totalS3Requests.Inc(api)
if !successReq && w.StatusCode != 0 {
st.totalS3Errors.Inc(api)
}
}
if r.Method == http.MethodGet {
// Increment the prometheus http request response histogram with appropriate label
httpRequestsDuration.With(prometheus.Labels{"api": api}).Observe(durationSecs)
}
}
// Prepare new HTTPStats structure
func newHTTPStats() *HTTPStats {
return &HTTPStats{}
}
| cmd/http-stats.go | 1 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.9983731508255005,
0.2665843963623047,
0.00017292141274083406,
0.027087830007076263,
0.3889082074165344
] |
{
"id": 2,
"code_window": [
"}\n",
"\n",
"// Update statistics from http request and response data\n",
"func (st *HTTPStats) updateStats(api string, r *http.Request, w *logger.ResponseWriter, durationSecs float64) {\n",
"\t// A successful request has a 2xx response code\n",
"\tsuccessReq := (w.StatusCode >= 200 && w.StatusCode < 300)\n",
"\n",
"\tif !strings.HasSuffix(r.URL.Path, prometheusMetricsPath) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (st *HTTPStats) updateStats(api string, r *http.Request, w *logger.ResponseWriter) {\n"
],
"file_path": "cmd/http-stats.go",
"type": "replace",
"edit_start_line_idx": 163
} | // +build linux
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package certs
import "github.com/rjeczalik/notify"
var (
// eventWrite contains the notify events that will cause a write
eventWrite = []notify.Event{notify.InCloseWrite}
)
| pkg/certs/event_linux.go | 0 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.00017458594811614603,
0.00017099380784202367,
0.0001677949185250327,
0.0001706005714368075,
0.000002786335699056508
] |
{
"id": 2,
"code_window": [
"}\n",
"\n",
"// Update statistics from http request and response data\n",
"func (st *HTTPStats) updateStats(api string, r *http.Request, w *logger.ResponseWriter, durationSecs float64) {\n",
"\t// A successful request has a 2xx response code\n",
"\tsuccessReq := (w.StatusCode >= 200 && w.StatusCode < 300)\n",
"\n",
"\tif !strings.HasSuffix(r.URL.Path, prometheusMetricsPath) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (st *HTTPStats) updateStats(api string, r *http.Request, w *logger.ResponseWriter) {\n"
],
"file_path": "cmd/http-stats.go",
"type": "replace",
"edit_start_line_idx": 163
} | # AssumeRoleWithClientGrants [](https://slack.min.io)
**Table of Contents**
- [Introduction](#introduction)
- [API Request Parameters](#api-request-parameters)
- [Token](#token)
- [Version](#version)
- [DurationSeconds](#durationseconds)
- [Policy](#policy)
- [Response Elements](#response-elements)
- [Errors](#errors)
- [Sample `POST` Request](#sample-post-request)
- [Sample Response](#sample-response)
- [Using ClientGrants API](#using-clientgrants-api)
- [Explore Further](#explore-further)
## Introduction
Returns a set of temporary security credentials for applications/clients who have been authenticated through client credential grants provided by identity provider. Example providers include KeyCloak, Okta etc.
Calling AssumeRoleWithClientGrants does not require the use of MinIO default credentials. Therefore, client application can be distributed that requests temporary security credentials without including MinIO default credentials. Instead, the identity of the caller is validated by using a JWT access token from the identity provider. The temporary security credentials returned by this API consists of an access key, a secret key, and a security token. Applications can use these temporary security credentials to sign calls to MinIO API operations.
By default, the temporary security credentials created by AssumeRoleWithClientGrants last for one hour. However, use the optional DurationSeconds parameter to specify the duration of the credentials. This value varies from 900 seconds (15 minutes) up to the maximum session duration of 7 days.
## API Request Parameters
### Token
The OAuth 2.0 access token that is provided by the identity provider. Application must get this token by authenticating the application using client credential grants before the application makes an AssumeRoleWithClientGrants call.
| Params | Value |
| :-- | :-- |
| *Type* | *String* |
| *Length Constraints* | *Minimum length of 4. Maximum length of 2048.* |
| *Required* | *Yes* |
### Version
Indicates STS API version information, the only supported value is '2011-06-15'. This value is borrowed from AWS STS API documentation for compatibility reasons.
| Params | Value |
| :-- | :-- |
| *Type* | *String* |
| *Required* | *Yes* |
### DurationSeconds
The duration, in seconds. The value can range from 900 seconds (15 minutes) up to 7 days. If value is higher than this setting, then operation fails. By default, the value is set to 3600 seconds. If no *DurationSeconds* is specified expiry seconds is obtained from *Token*.
| Params | Value |
| :-- | :-- |
| *Type* | *Integer* |
| *Valid Range* | *Minimum value of 900. Maximum value of 604800.* |
| *Required* | *No* |
### Policy
An IAM policy in JSON format that you want to use as an inline session policy. This parameter is optional. Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the canned policy name and the policy set here. You cannot use this policy to grant more permissions than those allowed by the canned policy name being assumed.
| Params | Value |
| :-- | :-- |
| *Type* | *String* |
| *Valid Range* | *Minimum length of 1. Maximum length of 2048.* |
| *Required* | *No* |
### Response Elements
XML response for this API is similar to [AWS STS AssumeRoleWithWebIdentity](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html#API_AssumeRoleWithWebIdentity_ResponseElements)
### Errors
XML error response for this API is similar to [AWS STS AssumeRoleWithWebIdentity](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html#API_AssumeRoleWithWebIdentity_Errors)
## Sample `POST` Request
```
http://minio.cluster:9000?Action=AssumeRoleWithClientGrants&DurationSeconds=3600&Token=eyJ4NXQiOiJOVEF4Wm1NeE5ETXlaRGczTVRVMVpHTTBNekV6T0RKaFpXSTRORE5sWkRVMU9HRmtOakZpTVEiLCJraWQiOiJOVEF4Wm1NeE5ETXlaRGczTVRVMVpHTTBNekV6T0RKaFpXSTRORE5sWkRVMU9HRmtOakZpTVEiLCJhbGciOiJSUzI1NiJ9.eyJhdWQiOiJQb0VnWFA2dVZPNDVJc0VOUm5nRFhqNUF1NVlhIiwiYXpwIjoiUG9FZ1hQNnVWTzQ1SXNFTlJuZ0RYajVBdTVZYSIsImlzcyI6Imh0dHBzOlwvXC9sb2NhbGhvc3Q6OTQ0M1wvb2F1dGgyXC90b2tlbiIsImV4cCI6MTU0MTgwOTU4MiwiaWF0IjoxNTQxODA1OTgyLCJqdGkiOiI2Y2YyMGIwZS1lNGZmLTQzZmQtYTdiYS1kYTc3YTE3YzM2MzYifQ.Jm29jPliRvrK6Os34nSK3rhzIYLFjE__zdVGNng3uGKXGKzP3We_i6NPnhA0szJXMOKglXzUF1UgSz8MctbaxFS8XDusQPVe4LkB_45hwBm6TmBxzui911nt-1RbBLN_jZIlvl2lPrbTUH5hSn9kEkph6seWanTNQpz9tNEoVa6R_OX3kpJqxe8tLQUWw453A1JTwFNhdHa6-f1K8_Q_eEZ_4gOYINQ9t_fhTibdbkXZkJQFLop-Jwoybi9s4nwQU_dATocgcufq5eCeNItQeleT-23lGxIz0X7CiJrJynYLdd-ER0F77SumqEb5iCxhxuf4H7dovwd1kAmyKzLxpw&Version=2011-06-15
```
## Sample Response
```
<?xml version="1.0" encoding="UTF-8"?>
<AssumeRoleWithClientGrantsResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
<AssumeRoleWithClientGrantsResult>
<AssumedRoleUser>
<Arn/>
<AssumeRoleId/>
</AssumedRoleUser>
<Credentials>
<AccessKeyId>Y4RJU1RNFGK48LGO9I2S</AccessKeyId>
<SecretAccessKey>sYLRKS1Z7hSjluf6gEbb9066hnx315wHTiACPAjg</SecretAccessKey>
<Expiration>2019-08-08T20:26:12Z</Expiration>
<SessionToken>eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3NLZXkiOiJZNFJKVTFSTkZHSzQ4TEdPOUkyUyIsImF1ZCI6IlBvRWdYUDZ1Vk80NUlzRU5SbmdEWGo1QXU1WWEiLCJhenAiOiJQb0VnWFA2dVZPNDVJc0VOUm5nRFhqNUF1NVlhIiwiZXhwIjoxNTQxODExMDcxLCJpYXQiOjE1NDE4MDc0NzEsImlzcyI6Imh0dHBzOi8vbG9jYWxob3N0Ojk0NDMvb2F1dGgyL3Rva2VuIiwianRpIjoiYTBiMjc2MjktZWUxYS00M2JmLTg3MzktZjMzNzRhNGNkYmMwIn0.ewHqKVFTaP-j_kgZrcOEKroNUjk10GEp8bqQjxBbYVovV0nHO985VnRESFbcT6XMDDKHZiWqN2vi_ETX_u3Q-w</SessionToken>
</Credentials>
</AssumeRoleWithClientGrantsResult>
<ResponseMetadata/>
</AssumeRoleWithClientGrantsResponse>
```
## Using ClientGrants API
```
export MINIO_ACCESS_KEY=minio
export MINIO_SECRET_KEY=minio123
export MINIO_IDENTITY_OPENID_CONFIG_URL=http://localhost:8080/auth/realms/demo/.well-known/openid-configuration
export MINIO_IDENTITY_OPENID_CLIENT_ID="843351d4-1080-11ea-aa20-271ecba3924a"
minio server /mnt/export
```
Testing with an example
> Obtaining client ID and secrets follow [Keycloak configuring documentation](https://github.com/minio/minio/blob/master/docs/sts/keycloak.md)
```
$ go run client-grants.go -cid PoEgXP6uVO45IsENRngDXj5Au5Ya -csec eKsw6z8CtOJVBtrOWvhRWL4TUCga
##### Credentials
{
"accessKey": "NUIBORZYTV2HG2BMRSXR",
"secretKey": "qQlP5O7CFPc5m5IXf1vYhuVTFj7BRVJqh0FqZ86S",
"expiration": "2018-08-21T17:10:29-07:00",
"sessionToken": "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3NLZXkiOiJOVUlCT1JaWVRWMkhHMkJNUlNYUiIsImF1ZCI6IlBvRWdYUDZ1Vk80NUlzRU5SbmdEWGo1QXU1WWEiLCJhenAiOiJQb0VnWFA2dVZPNDVJc0VOUm5nRFhqNUF1NVlhIiwiZXhwIjoxNTM0ODk2NjI5LCJpYXQiOjE1MzQ4OTMwMjksImlzcyI6Imh0dHBzOi8vbG9jYWxob3N0Ojk0NDMvb2F1dGgyL3Rva2VuIiwianRpIjoiNjY2OTZjZTctN2U1Ny00ZjU5LWI0MWQtM2E1YTMzZGZiNjA4In0.eJONnVaSVHypiXKEARSMnSKgr-2mlC2Sr4fEGJitLcJF_at3LeNdTHv0_oHsv6ZZA3zueVGgFlVXMlREgr9LXA"
}
```
## Explore Further
- [MinIO Admin Complete Guide](https://docs.min.io/docs/minio-admin-complete-guide.html)
- [The MinIO documentation website](https://docs.min.io)
| docs/sts/client-grants.md | 0 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.0006486541824415326,
0.00022175272169988602,
0.00016511707508470863,
0.00016866052465047687,
0.00013163431140128523
] |
{
"id": 2,
"code_window": [
"}\n",
"\n",
"// Update statistics from http request and response data\n",
"func (st *HTTPStats) updateStats(api string, r *http.Request, w *logger.ResponseWriter, durationSecs float64) {\n",
"\t// A successful request has a 2xx response code\n",
"\tsuccessReq := (w.StatusCode >= 200 && w.StatusCode < 300)\n",
"\n",
"\tif !strings.HasSuffix(r.URL.Path, prometheusMetricsPath) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (st *HTTPStats) updateStats(api string, r *http.Request, w *logger.ResponseWriter) {\n"
],
"file_path": "cmd/http-stats.go",
"type": "replace",
"edit_start_line_idx": 163
} | /*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package condition
// JWT claims supported substitutions.
// https://www.iana.org/assignments/jwt/jwt.xhtml#claims
const (
// JWTSub - JWT subject claim substitution.
JWTSub Key = "jwt:sub"
// JWTIss issuer claim substitution.
JWTIss Key = "jwt:iss"
// JWTAud audience claim substitution.
JWTAud Key = "jwt:aud"
// JWTJti JWT unique identifier claim substitution.
JWTJti Key = "jwt:jti"
JWTUpn Key = "jwt:upn"
JWTName Key = "jwt:name"
JWTGroups Key = "jwt:groups"
JWTGivenName Key = "jwt:given_name"
JWTFamilyName Key = "jwt:family_name"
JWTMiddleName Key = "jwt:middle_name"
JWTNickName Key = "jwt:nickname"
JWTPrefUsername Key = "jwt:preferred_username"
JWTProfile Key = "jwt:profile"
JWTPicture Key = "jwt:picture"
JWTWebsite Key = "jwt:website"
JWTEmail Key = "jwt:email"
JWTGender Key = "jwt:gender"
JWTBirthdate Key = "jwt:birthdate"
JWTPhoneNumber Key = "jwt:phone_number"
JWTAddress Key = "jwt:address"
JWTScope Key = "jwt:scope"
JWTClientID Key = "jwt:client_id"
)
// JWTKeys - Supported JWT keys, non-exhaustive list please
// expand as new claims are standardized.
var JWTKeys = []Key{
JWTSub,
JWTIss,
JWTAud,
JWTJti,
JWTName,
JWTUpn,
JWTGroups,
JWTGivenName,
JWTFamilyName,
JWTMiddleName,
JWTNickName,
JWTPrefUsername,
JWTProfile,
JWTPicture,
JWTWebsite,
JWTEmail,
JWTGender,
JWTBirthdate,
JWTPhoneNumber,
JWTAddress,
JWTScope,
JWTClientID,
}
| pkg/bucket/policy/condition/jwt.go | 0 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.00017281690088566393,
0.00017041372484527528,
0.00016764570318628103,
0.00017047670553438365,
0.0000017205245512741385
] |
{
"id": 3,
"code_window": [
"\t\t\tst.totalS3Errors.Inc(api)\n",
"\t\t}\n",
"\t}\n",
"\n",
"\tif r.Method == http.MethodGet {\n",
"\t\t// Increment the prometheus http request response histogram with appropriate label\n",
"\t\thttpRequestsDuration.With(prometheus.Labels{\"api\": api}).Observe(durationSecs)\n",
"\t}\n",
"}\n",
"\n",
"// Prepare new HTTPStats structure\n",
"func newHTTPStats() *HTTPStats {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Increment the prometheus http request response histogram with appropriate label\n",
"\thttpRequestsDuration.With(prometheus.Labels{\"api\": api}).Observe(w.TimeToFirstByte.Seconds())\n"
],
"file_path": "cmd/http-stats.go",
"type": "replace",
"edit_start_line_idx": 174
} | /*
* MinIO Cloud Storage, (C) 2015-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"time"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/madmin"
)
const (
copyDirective = "COPY"
replaceDirective = "REPLACE"
)
// Parses location constraint from the incoming reader.
func parseLocationConstraint(r *http.Request) (location string, s3Error APIErrorCode) {
// If the request has no body with content-length set to 0,
// we do not have to validate location constraint. Bucket will
// be created at default region.
locationConstraint := createBucketLocationConfiguration{}
err := xmlDecoder(r.Body, &locationConstraint, r.ContentLength)
if err != nil && r.ContentLength != 0 {
logger.LogIf(GlobalContext, err)
// Treat all other failures as XML parsing errors.
return "", ErrMalformedXML
} // else for both err as nil or io.EOF
location = locationConstraint.Location
if location == "" {
location = globalServerRegion
}
return location, ErrNone
}
// Validates input location is same as configured region
// of MinIO server.
func isValidLocation(location string) bool {
return globalServerRegion == "" || globalServerRegion == location
}
// Supported headers that needs to be extracted.
var supportedHeaders = []string{
"content-type",
"cache-control",
"content-language",
"content-encoding",
"content-disposition",
xhttp.AmzStorageClass,
xhttp.AmzObjectTagging,
"expires",
xhttp.AmzBucketReplicationStatus,
// Add more supported headers here.
}
// isDirectiveValid - check if tagging-directive is valid.
func isDirectiveValid(v string) bool {
// Check if set metadata-directive is valid.
return isDirectiveCopy(v) || isDirectiveReplace(v)
}
// Check if the directive COPY is requested.
func isDirectiveCopy(value string) bool {
// By default if directive is not set we
// treat it as 'COPY' this function returns true.
return value == copyDirective || value == ""
}
// Check if the directive REPLACE is requested.
func isDirectiveReplace(value string) bool {
return value == replaceDirective
}
// userMetadataKeyPrefixes contains the prefixes of used-defined metadata keys.
// All values stored with a key starting with one of the following prefixes
// must be extracted from the header.
var userMetadataKeyPrefixes = []string{
"X-Amz-Meta-",
"X-Minio-Meta-",
"x-amz-meta-",
"x-minio-meta-",
}
// extractMetadata extracts metadata from HTTP header and HTTP queryString.
func extractMetadata(ctx context.Context, r *http.Request) (metadata map[string]string, err error) {
query := r.URL.Query()
header := r.Header
metadata = make(map[string]string)
// Extract all query values.
err = extractMetadataFromMap(ctx, query, metadata)
if err != nil {
return nil, err
}
// Extract all header values.
err = extractMetadataFromMap(ctx, header, metadata)
if err != nil {
return nil, err
}
// Set content-type to default value if it is not set.
if _, ok := metadata[strings.ToLower(xhttp.ContentType)]; !ok {
metadata[strings.ToLower(xhttp.ContentType)] = "application/octet-stream"
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
for k := range metadata {
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
delete(metadata, k)
}
}
if contentEncoding, ok := metadata[strings.ToLower(xhttp.ContentEncoding)]; ok {
contentEncoding = trimAwsChunkedContentEncoding(contentEncoding)
if contentEncoding != "" {
// Make sure to trim and save the content-encoding
// parameter for a streaming signature which is set
// to a custom value for example: "aws-chunked,gzip".
metadata[strings.ToLower(xhttp.ContentEncoding)] = contentEncoding
} else {
// Trimmed content encoding is empty when the header
// value is set to "aws-chunked" only.
// Make sure to delete the content-encoding parameter
// for a streaming signature which is set to value
// for example: "aws-chunked"
delete(metadata, strings.ToLower(xhttp.ContentEncoding))
}
}
// Success.
return metadata, nil
}
// extractMetadata extracts metadata from map values.
func extractMetadataFromMap(ctx context.Context, v map[string][]string, m map[string]string) error {
if v == nil {
logger.LogIf(ctx, errInvalidArgument)
return errInvalidArgument
}
// Save all supported headers.
for _, supportedHeader := range supportedHeaders {
if value, ok := v[http.CanonicalHeaderKey(supportedHeader)]; ok {
m[supportedHeader] = value[0]
} else if value, ok := v[supportedHeader]; ok {
m[supportedHeader] = value[0]
}
}
for key := range v {
for _, prefix := range userMetadataKeyPrefixes {
if !strings.HasPrefix(strings.ToLower(key), strings.ToLower(prefix)) {
continue
}
value, ok := v[key]
if ok {
m[key] = strings.Join(value, ",")
break
}
}
}
return nil
}
// The Query string for the redirect URL the client is
// redirected on successful upload.
func getRedirectPostRawQuery(objInfo ObjectInfo) string {
redirectValues := make(url.Values)
redirectValues.Set("bucket", objInfo.Bucket)
redirectValues.Set("key", objInfo.Name)
redirectValues.Set("etag", "\""+objInfo.ETag+"\"")
return redirectValues.Encode()
}
// Returns access credentials in the request Authorization header.
func getReqAccessCred(r *http.Request, region string) (cred auth.Credentials) {
cred, _, _ = getReqAccessKeyV4(r, region, serviceS3)
if cred.AccessKey == "" {
cred, _, _ = getReqAccessKeyV2(r)
}
if cred.AccessKey == "" {
claims, owner, _ := webRequestAuthenticate(r)
if owner {
return globalActiveCred
}
if claims != nil {
cred, _ = globalIAMSys.GetUser(claims.AccessKey)
}
}
return cred
}
// Extract request params to be sent with event notifiation.
func extractReqParams(r *http.Request) map[string]string {
if r == nil {
return nil
}
region := globalServerRegion
cred := getReqAccessCred(r, region)
// Success.
return map[string]string{
"region": region,
"accessKey": cred.AccessKey,
"sourceIPAddress": handlers.GetSourceIP(r),
// Add more fields here.
}
}
// Extract response elements to be sent with event notifiation.
func extractRespElements(w http.ResponseWriter) map[string]string {
if w == nil {
return map[string]string{}
}
return map[string]string{
"requestId": w.Header().Get(xhttp.AmzRequestID),
"content-length": w.Header().Get(xhttp.ContentLength),
// Add more fields here.
}
}
// Trims away `aws-chunked` from the content-encoding header if present.
// Streaming signature clients can have custom content-encoding such as
// `aws-chunked,gzip` here we need to only save `gzip`.
// For more refer http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string) {
if contentEnc == "" {
return contentEnc
}
var newEncs []string
for _, enc := range strings.Split(contentEnc, ",") {
if enc != streamingContentEncoding {
newEncs = append(newEncs, enc)
}
}
return strings.Join(newEncs, ",")
}
// Validate form field size for s3 specification requirement.
func validateFormFieldSize(ctx context.Context, formValues http.Header) error {
// Iterate over form values
for k := range formValues {
// Check if value's field exceeds S3 limit
if int64(len(formValues.Get(k))) > maxFormFieldSize {
logger.LogIf(ctx, errSizeUnexpected)
return errSizeUnexpected
}
}
// Success.
return nil
}
// Extract form fields and file data from a HTTP POST Policy
func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) {
/// HTML Form values
fileName = ""
// Canonicalize the form values into http.Header.
formValues = make(http.Header)
for k, v := range form.Value {
formValues[http.CanonicalHeaderKey(k)] = v
}
// Validate form values.
if err = validateFormFieldSize(ctx, formValues); err != nil {
return nil, "", 0, nil, err
}
// this means that filename="" was not specified for file key and Go has
// an ugly way of handling this situation. Refer here
// https://golang.org/src/mime/multipart/formdata.go#L61
if len(form.File) == 0 {
var b = &bytes.Buffer{}
for _, v := range formValues["File"] {
b.WriteString(v)
}
fileSize = int64(b.Len())
filePart = ioutil.NopCloser(b)
return filePart, fileName, fileSize, formValues, nil
}
// Iterator until we find a valid File field and break
for k, v := range form.File {
canonicalFormName := http.CanonicalHeaderKey(k)
if canonicalFormName == "File" {
if len(v) == 0 {
logger.LogIf(ctx, errInvalidArgument)
return nil, "", 0, nil, errInvalidArgument
}
// Fetch fileHeader which has the uploaded file information
fileHeader := v[0]
// Set filename
fileName = fileHeader.Filename
// Open the uploaded part
filePart, err = fileHeader.Open()
if err != nil {
logger.LogIf(ctx, err)
return nil, "", 0, nil, err
}
// Compute file size
fileSize, err = filePart.(io.Seeker).Seek(0, 2)
if err != nil {
logger.LogIf(ctx, err)
return nil, "", 0, nil, err
}
// Reset Seek to the beginning
_, err = filePart.(io.Seeker).Seek(0, 0)
if err != nil {
logger.LogIf(ctx, err)
return nil, "", 0, nil, err
}
// File found and ready for reading
break
}
}
return filePart, fileName, fileSize, formValues, nil
}
// Log headers and body.
func httpTraceAll(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !globalHTTPTrace.HasSubscribers() {
f.ServeHTTP(w, r)
return
}
trace := Trace(f, true, w, r)
globalHTTPTrace.Publish(trace)
}
}
// Log only the headers.
func httpTraceHdrs(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !globalHTTPTrace.HasSubscribers() {
f.ServeHTTP(w, r)
return
}
trace := Trace(f, false, w, r)
globalHTTPTrace.Publish(trace)
}
}
func collectAPIStats(api string, f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
globalHTTPStats.currentS3Requests.Inc(api)
defer globalHTTPStats.currentS3Requests.Dec(api)
statsWriter := logger.NewResponseWriter(w)
f.ServeHTTP(statsWriter, r)
// Time duration in secs since the call started.
// We don't need to do nanosecond precision in this
// simply for the fact that it is not human readable.
durationSecs := time.Since(statsWriter.StartTime).Seconds()
globalHTTPStats.updateStats(api, r, statsWriter, durationSecs)
}
}
// Returns "/bucketName/objectName" for path-style or virtual-host-style requests.
func getResource(path string, host string, domains []string) (string, error) {
if len(domains) == 0 {
return path, nil
}
// If virtual-host-style is enabled construct the "resource" properly.
if strings.Contains(host, ":") {
// In bucket.mydomain.com:9000, strip out :9000
var err error
if host, _, err = net.SplitHostPort(host); err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("host", host)
reqInfo.AppendTags("path", path)
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
logger.LogIf(ctx, err)
return "", err
}
}
for _, domain := range domains {
if host == minioReservedBucket+"."+domain {
continue
}
if !strings.HasSuffix(host, "."+domain) {
continue
}
bucket := strings.TrimSuffix(host, "."+domain)
return SlashSeparator + pathJoin(bucket, path), nil
}
return path, nil
}
var regexVersion = regexp.MustCompile(`(\w\d+)`)
func extractAPIVersion(r *http.Request) string {
return regexVersion.FindString(r.URL.Path)
}
func methodNotAllowedHandler(api string) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
code := "XMinio" + api + "VersionMismatch"
writeErrorResponseString(r.Context(), w, APIError{
Code: code,
Description: "Not allowed (" + r.Method + " " + r.URL.String() + " on " + api + " API)",
HTTPStatusCode: http.StatusMethodNotAllowed,
}, r.URL)
}
}
// If none of the http routes match respond with appropriate errors
func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodOptions {
return
}
version := extractAPIVersion(r)
switch {
case strings.HasPrefix(r.URL.Path, peerRESTPrefix):
desc := fmt.Sprintf("Expected 'peer' API version '%s', instead found '%s', please upgrade the servers",
peerRESTVersion, version)
writeErrorResponseString(r.Context(), w, APIError{
Code: "XMinioPeerVersionMismatch",
Description: desc,
HTTPStatusCode: http.StatusUpgradeRequired,
}, r.URL)
case strings.HasPrefix(r.URL.Path, storageRESTPrefix):
desc := fmt.Sprintf("Expected 'storage' API version '%s', instead found '%s', please upgrade the servers",
storageRESTVersion, version)
writeErrorResponseString(r.Context(), w, APIError{
Code: "XMinioStorageVersionMismatch",
Description: desc,
HTTPStatusCode: http.StatusUpgradeRequired,
}, r.URL)
case strings.HasPrefix(r.URL.Path, lockRESTPrefix):
desc := fmt.Sprintf("Expected 'lock' API version '%s', instead found '%s', please upgrade the servers",
lockRESTVersion, version)
writeErrorResponseString(r.Context(), w, APIError{
Code: "XMinioLockVersionMismatch",
Description: desc,
HTTPStatusCode: http.StatusUpgradeRequired,
}, r.URL)
case strings.HasPrefix(r.URL.Path, adminPathPrefix):
var desc string
if version == "v1" {
desc = fmt.Sprintf("Server expects client requests with 'admin' API version '%s', found '%s', please upgrade the client to latest releases", madmin.AdminAPIVersion, version)
} else if version == madmin.AdminAPIVersion {
desc = fmt.Sprintf("This 'admin' API is not supported by server in '%s'", getMinioMode())
} else {
desc = fmt.Sprintf("Unexpected client 'admin' API version found '%s', expected '%s', please downgrade the client to older releases", version, madmin.AdminAPIVersion)
}
writeErrorResponseJSON(r.Context(), w, APIError{
Code: "XMinioAdminVersionMismatch",
Description: desc,
HTTPStatusCode: http.StatusUpgradeRequired,
}, r.URL)
default:
desc := fmt.Sprintf("Unknown API request at %s", r.URL.Path)
writeErrorResponse(r.Context(), w, APIError{
Code: "XMinioUnknownAPIRequest",
Description: desc,
HTTPStatusCode: http.StatusBadRequest,
}, r.URL, guessIsBrowserReq(r))
}
}
// gets host name for current node
func getHostName(r *http.Request) (hostName string) {
if globalIsDistErasure {
hostName = GetLocalPeer(globalEndpoints)
} else {
hostName = r.Host
}
return
}
// Proxy any request to an endpoint.
func proxyRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, ep ProxyEndpoint) (success bool) {
success = true
// Make sure we remove any existing headers before
// proxying the request to another node.
for k := range w.Header() {
w.Header().Del(k)
}
f := handlers.NewForwarder(&handlers.Forwarder{
PassHost: true,
RoundTripper: ep.Transport,
ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {
success = false
if err != nil && !errors.Is(err, context.Canceled) {
logger.LogIf(GlobalContext, err)
}
},
})
r.URL.Scheme = "http"
if globalIsSSL {
r.URL.Scheme = "https"
}
r.URL.Host = ep.Host
f.ServeHTTP(w, r)
return
}
| cmd/handler-utils.go | 1 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.13857105374336243,
0.0029320253524929285,
0.00016393093392252922,
0.0001743669854477048,
0.018645700067281723
] |
{
"id": 3,
"code_window": [
"\t\t\tst.totalS3Errors.Inc(api)\n",
"\t\t}\n",
"\t}\n",
"\n",
"\tif r.Method == http.MethodGet {\n",
"\t\t// Increment the prometheus http request response histogram with appropriate label\n",
"\t\thttpRequestsDuration.With(prometheus.Labels{\"api\": api}).Observe(durationSecs)\n",
"\t}\n",
"}\n",
"\n",
"// Prepare new HTTPStats structure\n",
"func newHTTPStats() *HTTPStats {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Increment the prometheus http request response histogram with appropriate label\n",
"\thttpRequestsDuration.With(prometheus.Labels{\"api\": api}).Observe(w.TimeToFirstByte.Seconds())\n"
],
"file_path": "cmd/http-stats.go",
"type": "replace",
"edit_start_line_idx": 174
} | /*
* MinIO Cloud Storage, (C) 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package http
import (
"crypto/tls"
"errors"
"io/ioutil"
"net/http"
"runtime/pprof"
"sync"
"sync/atomic"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/config/api"
"github.com/minio/minio/pkg/certs"
"github.com/minio/minio/pkg/env"
)
const (
serverShutdownPoll = 500 * time.Millisecond
// DefaultShutdownTimeout - default shutdown timeout used for graceful http server shutdown.
DefaultShutdownTimeout = 5 * time.Second
// DefaultMaxHeaderBytes - default maximum HTTP header size in bytes.
DefaultMaxHeaderBytes = 1 * humanize.MiByte
)
// Server - extended http.Server supports multiple addresses to serve and enhanced connection handling.
type Server struct {
http.Server
Addrs []string // addresses on which the server listens for new connection.
ShutdownTimeout time.Duration // timeout used for graceful server shutdown.
listenerMutex sync.Mutex // to guard 'listener' field.
listener *httpListener // HTTP listener for all 'Addrs' field.
inShutdown uint32 // indicates whether the server is in shutdown or not
requestCount int32 // counter holds no. of request in progress.
}
// GetRequestCount - returns number of request in progress.
func (srv *Server) GetRequestCount() int {
return int(atomic.LoadInt32(&srv.requestCount))
}
// Start - start HTTP server
func (srv *Server) Start() (err error) {
// Take a copy of server fields.
var tlsConfig *tls.Config
if srv.TLSConfig != nil {
tlsConfig = srv.TLSConfig.Clone()
}
handler := srv.Handler // if srv.Handler holds non-synced state -> possible data race
addrs := set.CreateStringSet(srv.Addrs...).ToSlice() // copy and remove duplicates
// Create new HTTP listener.
var listener *httpListener
listener, err = newHTTPListener(
addrs,
)
if err != nil {
return err
}
// Wrap given handler to do additional
// * return 503 (service unavailable) if the server in shutdown.
wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// If server is in shutdown.
if atomic.LoadUint32(&srv.inShutdown) != 0 {
// To indicate disable keep-alives
w.Header().Set("Connection", "close")
w.WriteHeader(http.StatusForbidden)
w.Write([]byte(http.ErrServerClosed.Error()))
w.(http.Flusher).Flush()
return
}
atomic.AddInt32(&srv.requestCount, 1)
defer atomic.AddInt32(&srv.requestCount, -1)
// Handle request using passed handler.
handler.ServeHTTP(w, r)
})
srv.listenerMutex.Lock()
srv.Handler = wrappedHandler
srv.listener = listener
srv.listenerMutex.Unlock()
// Start servicing with listener.
if tlsConfig != nil {
return srv.Server.Serve(tls.NewListener(listener, tlsConfig))
}
return srv.Server.Serve(listener)
}
// Shutdown - shuts down HTTP server.
func (srv *Server) Shutdown() error {
srv.listenerMutex.Lock()
if srv.listener == nil {
srv.listenerMutex.Unlock()
return http.ErrServerClosed
}
srv.listenerMutex.Unlock()
if atomic.AddUint32(&srv.inShutdown, 1) > 1 {
// shutdown in progress
return http.ErrServerClosed
}
// Close underneath HTTP listener.
srv.listenerMutex.Lock()
err := srv.listener.Close()
srv.listenerMutex.Unlock()
if err != nil {
return err
}
// Wait for opened connection to be closed up to Shutdown timeout.
shutdownTimeout := srv.ShutdownTimeout
shutdownTimer := time.NewTimer(shutdownTimeout)
ticker := time.NewTicker(serverShutdownPoll)
defer ticker.Stop()
for {
select {
case <-shutdownTimer.C:
// Write all running goroutines.
tmp, err := ioutil.TempFile("", "minio-goroutines-*.txt")
if err == nil {
_ = pprof.Lookup("goroutine").WriteTo(tmp, 1)
tmp.Close()
return errors.New("timed out. some connections are still active. goroutines written to " + tmp.Name())
}
return errors.New("timed out. some connections are still active")
case <-ticker.C:
if atomic.LoadInt32(&srv.requestCount) <= 0 {
return nil
}
}
}
}
// Secure Go implementations of modern TLS ciphers
// The following ciphers are excluded because:
// - RC4 ciphers: RC4 is broken
// - 3DES ciphers: Because of the 64 bit blocksize of DES (Sweet32)
// - CBC-SHA256 ciphers: No countermeasures against Lucky13 timing attack
// - CBC-SHA ciphers: Legacy ciphers (SHA-1) and non-constant time
// implementation of CBC.
// (CBC-SHA ciphers can be enabled again if required)
// - RSA key exchange ciphers: Disabled because of dangerous PKCS1-v1.5 RSA
// padding scheme. See Bleichenbacher attacks.
var secureCipherSuites = []uint16{
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
}
// Go only provides constant-time implementations of Curve25519 and NIST P-256 curve.
var secureCurves = []tls.CurveID{tls.X25519, tls.CurveP256}
// NewServer - creates new HTTP server using given arguments.
func NewServer(addrs []string, handler http.Handler, getCert certs.GetCertificateFunc) *Server {
secureCiphers := env.Get(api.EnvAPISecureCiphers, config.EnableOn) == config.EnableOn
var tlsConfig *tls.Config
if getCert != nil {
tlsConfig = &tls.Config{
// TLS hardening
PreferServerCipherSuites: true,
MinVersion: tls.VersionTLS12,
NextProtos: []string{"h2", "http/1.1"},
}
tlsConfig.GetCertificate = getCert
}
if secureCiphers && tlsConfig != nil {
tlsConfig.CipherSuites = secureCipherSuites
tlsConfig.CurvePreferences = secureCurves
}
httpServer := &Server{
Addrs: addrs,
ShutdownTimeout: DefaultShutdownTimeout,
}
httpServer.Handler = handler
httpServer.TLSConfig = tlsConfig
httpServer.MaxHeaderBytes = DefaultMaxHeaderBytes
return httpServer
}
| cmd/http/server.go | 0 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.0015661438228562474,
0.0002838204673025757,
0.0001603889832040295,
0.0001742123713484034,
0.0003245199332013726
] |
{
"id": 3,
"code_window": [
"\t\t\tst.totalS3Errors.Inc(api)\n",
"\t\t}\n",
"\t}\n",
"\n",
"\tif r.Method == http.MethodGet {\n",
"\t\t// Increment the prometheus http request response histogram with appropriate label\n",
"\t\thttpRequestsDuration.With(prometheus.Labels{\"api\": api}).Observe(durationSecs)\n",
"\t}\n",
"}\n",
"\n",
"// Prepare new HTTPStats structure\n",
"func newHTTPStats() *HTTPStats {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Increment the prometheus http request response histogram with appropriate label\n",
"\thttpRequestsDuration.With(prometheus.Labels{\"api\": api}).Observe(w.TimeToFirstByte.Seconds())\n"
],
"file_path": "cmd/http-stats.go",
"type": "replace",
"edit_start_line_idx": 174
} | /*
* MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package openid
import (
"errors"
"fmt"
"sync"
)
// ID - holds identification name authentication validator target.
type ID string
// Validator interface describes basic implementation
// requirements of various authentication providers.
type Validator interface {
// Validate is a custom validator function for this provider,
// each validation is authenticationType or provider specific.
Validate(token string, duration string) (map[string]interface{}, error)
// ID returns provider name of this provider.
ID() ID
}
// ErrTokenExpired - error token expired
var (
ErrTokenExpired = errors.New("token expired")
)
// Validators - holds list of providers indexed by provider id.
type Validators struct {
sync.RWMutex
providers map[ID]Validator
}
// Add - adds unique provider to provider list.
func (list *Validators) Add(provider Validator) error {
list.Lock()
defer list.Unlock()
if _, ok := list.providers[provider.ID()]; ok {
return fmt.Errorf("provider %v already exists", provider.ID())
}
list.providers[provider.ID()] = provider
return nil
}
// List - returns available provider IDs.
func (list *Validators) List() []ID {
list.RLock()
defer list.RUnlock()
keys := []ID{}
for k := range list.providers {
keys = append(keys, k)
}
return keys
}
// Get - returns the provider for the given providerID, if not found
// returns an error.
func (list *Validators) Get(id ID) (p Validator, err error) {
list.RLock()
defer list.RUnlock()
var ok bool
if p, ok = list.providers[id]; !ok {
return nil, fmt.Errorf("provider %v doesn't exist", id)
}
return p, nil
}
// NewValidators - creates Validators.
func NewValidators() *Validators {
return &Validators{providers: make(map[ID]Validator)}
}
| cmd/config/identity/openid/validators.go | 0 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.00017920814570970833,
0.00016998047067318112,
0.00015999154129531235,
0.0001710403012111783,
0.000005574042006628588
] |
{
"id": 3,
"code_window": [
"\t\t\tst.totalS3Errors.Inc(api)\n",
"\t\t}\n",
"\t}\n",
"\n",
"\tif r.Method == http.MethodGet {\n",
"\t\t// Increment the prometheus http request response histogram with appropriate label\n",
"\t\thttpRequestsDuration.With(prometheus.Labels{\"api\": api}).Observe(durationSecs)\n",
"\t}\n",
"}\n",
"\n",
"// Prepare new HTTPStats structure\n",
"func newHTTPStats() *HTTPStats {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Increment the prometheus http request response histogram with appropriate label\n",
"\thttpRequestsDuration.With(prometheus.Labels{\"api\": api}).Observe(w.TimeToFirstByte.Seconds())\n"
],
"file_path": "cmd/http-stats.go",
"type": "replace",
"edit_start_line_idx": 174
} | .alert {
border: 0;
position: fixed;
max-width: 500px;
margin: 0;
box-shadow: 0 4px 5px rgba(0, 0, 0, 0.1);
color: @white;
width: 100%;
right: 20px;
border-radius: 3px;
padding: 17px 50px 17px 17px;
z-index: 10010;
.animation-duration(800ms);
.animation-fill-mode(both);
&:not(.progress) {
top: 20px;
@media(min-width: (@screen-sm-min)) {
left: 50%;
margin-left: -250px;
}
}
&.progress {
bottom: 20px;
right: 20px;
}
&.alert-danger {
background: @red;
}
&.alert-success {
background: @green;
}
&.alert-info {
background: @blue;
}
@media(max-width: (@screen-xs-max)) {
left: 20px;
width: ~"calc(100% - 40px)";
max-width: 100%;
}
.progress {
margin: 10px 10px 8px 0;
height: 5px;
box-shadow: none;
border-radius: 1px;
background-color: @blue;
border-radius: 2px;
overflow: hidden;
}
.progress-bar {
box-shadow: none;
background-color: @white;
height: 100%;
}
.close {
position: absolute;
top: 15px;
}
} | browser/app/less/inc/alert.less | 0 | https://github.com/minio/minio/commit/3a0082f0f18abc158e4ee6d4ce02d012a36a0901 | [
0.00018160638865083456,
0.00017857736384030432,
0.00017693433619569987,
0.0001787737855920568,
0.0000015803116184542887
] |
{
"id": 1,
"code_window": [
"\n",
"\tvar prober volume.DynamicPluginProber = nil // TODO (#51147) inject mock\n",
"\tkubelet.volumePluginMgr, err =\n",
"\t\tNewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient.CoreV1()), allPlugins, prober)\n",
"\trequire.NoError(t, err, \"Failed to initialize VolumePluginMgr\")\n",
"\n",
"\tkubelet.mounter = &mount.FakeMounter{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tNewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient), allPlugins, prober)\n"
],
"file_path": "pkg/kubelet/kubelet_test.go",
"type": "replace",
"edit_start_line_idx": 328
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package token implements a manager of serviceaccount tokens for pods running
// on the node.
package token
import (
"fmt"
"sync"
"time"
"github.com/golang/glog"
authenticationv1 "k8s.io/api/authentication/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
const (
maxTTL = 24 * time.Hour
gcPeriod = time.Minute
)
// NewManager returns a new token manager.
func NewManager(c corev1.CoreV1Interface) *Manager {
m := &Manager{
getToken: func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return c.ServiceAccounts(namespace).CreateToken(name, tr)
},
cache: make(map[string]*authenticationv1.TokenRequest),
clock: clock.RealClock{},
}
go wait.Forever(m.cleanup, gcPeriod)
return m
}
// Manager manages service account tokens for pods.
type Manager struct {
// cacheMutex guards the cache
cacheMutex sync.RWMutex
cache map[string]*authenticationv1.TokenRequest
// mocked for testing
getToken func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
clock clock.Clock
}
// GetServiceAccountToken gets a service account token for a pod from cache or
// from the TokenRequest API. This process is as follows:
// * Check the cache for the current token request.
// * If the token exists and does not require a refresh, return the current token.
// * Attempt to refresh the token.
// * If the token is refreshed successfully, save it in the cache and return the token.
// * If refresh fails and the old token is still valid, log an error and return the old token.
// * If refresh fails and the old token is no longer valid, return an error
func (m *Manager) GetServiceAccountToken(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
key := keyFunc(name, namespace, tr)
ctr, ok := m.get(key)
if ok && !m.requiresRefresh(ctr) {
return ctr, nil
}
tr, err := m.getToken(name, namespace, tr)
if err != nil {
switch {
case !ok:
return nil, fmt.Errorf("failed to fetch token: %v", err)
case m.expired(ctr):
return nil, fmt.Errorf("token %s expired and refresh failed: %v", key, err)
default:
glog.Errorf("couldn't update token %s: %v", key, err)
return ctr, nil
}
}
m.set(key, tr)
return tr, nil
}
func (m *Manager) cleanup() {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
for k, tr := range m.cache {
if m.expired(tr) {
delete(m.cache, k)
}
}
}
func (m *Manager) get(key string) (*authenticationv1.TokenRequest, bool) {
m.cacheMutex.RLock()
defer m.cacheMutex.RUnlock()
ctr, ok := m.cache[key]
return ctr, ok
}
func (m *Manager) set(key string, tr *authenticationv1.TokenRequest) {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
m.cache[key] = tr
}
func (m *Manager) expired(t *authenticationv1.TokenRequest) bool {
return m.clock.Now().After(t.Status.ExpirationTimestamp.Time)
}
// requiresRefresh returns true if the token is older than 80% of its total
// ttl, or if the token is older than 24 hours.
func (m *Manager) requiresRefresh(tr *authenticationv1.TokenRequest) bool {
if tr.Spec.ExpirationSeconds == nil {
glog.Errorf("expiration seconds was nil for tr: %#v", tr)
return false
}
now := m.clock.Now()
exp := tr.Status.ExpirationTimestamp.Time
iat := exp.Add(-1 * time.Duration(*tr.Spec.ExpirationSeconds) * time.Second)
if now.After(iat.Add(maxTTL)) {
return true
}
// Require a refresh if within 20% of the TTL from the expiration time.
if now.After(exp.Add(-1 * time.Duration((*tr.Spec.ExpirationSeconds*20)/100) * time.Second)) {
return true
}
return false
}
// keys should be nonconfidential and safe to log
func keyFunc(name, namespace string, tr *authenticationv1.TokenRequest) string {
return fmt.Sprintf("%q/%q/%#v", name, namespace, tr.Spec)
}
| pkg/kubelet/token/token_manager.go | 1 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.0009131134720519185,
0.00023360500927083194,
0.00016239350952673703,
0.00017002665845211595,
0.0001908183330669999
] |
{
"id": 1,
"code_window": [
"\n",
"\tvar prober volume.DynamicPluginProber = nil // TODO (#51147) inject mock\n",
"\tkubelet.volumePluginMgr, err =\n",
"\t\tNewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient.CoreV1()), allPlugins, prober)\n",
"\trequire.NoError(t, err, \"Failed to initialize VolumePluginMgr\")\n",
"\n",
"\tkubelet.mounter = &mount.FakeMounter{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tNewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient), allPlugins, prober)\n"
],
"file_path": "pkg/kubelet/kubelet_test.go",
"type": "replace",
"edit_start_line_idx": 328
} | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ChaCha20 implements the core ChaCha20 function as specified in https://tools.ietf.org/html/rfc7539#section-2.3.
package chacha20
import "encoding/binary"
const rounds = 20
// core applies the ChaCha20 core function to 16-byte input in, 32-byte key k,
// and 16-byte constant c, and puts the result into 64-byte array out.
func core(out *[64]byte, in *[16]byte, k *[32]byte) {
j0 := uint32(0x61707865)
j1 := uint32(0x3320646e)
j2 := uint32(0x79622d32)
j3 := uint32(0x6b206574)
j4 := binary.LittleEndian.Uint32(k[0:4])
j5 := binary.LittleEndian.Uint32(k[4:8])
j6 := binary.LittleEndian.Uint32(k[8:12])
j7 := binary.LittleEndian.Uint32(k[12:16])
j8 := binary.LittleEndian.Uint32(k[16:20])
j9 := binary.LittleEndian.Uint32(k[20:24])
j10 := binary.LittleEndian.Uint32(k[24:28])
j11 := binary.LittleEndian.Uint32(k[28:32])
j12 := binary.LittleEndian.Uint32(in[0:4])
j13 := binary.LittleEndian.Uint32(in[4:8])
j14 := binary.LittleEndian.Uint32(in[8:12])
j15 := binary.LittleEndian.Uint32(in[12:16])
x0, x1, x2, x3, x4, x5, x6, x7 := j0, j1, j2, j3, j4, j5, j6, j7
x8, x9, x10, x11, x12, x13, x14, x15 := j8, j9, j10, j11, j12, j13, j14, j15
for i := 0; i < rounds; i += 2 {
x0 += x4
x12 ^= x0
x12 = (x12 << 16) | (x12 >> (16))
x8 += x12
x4 ^= x8
x4 = (x4 << 12) | (x4 >> (20))
x0 += x4
x12 ^= x0
x12 = (x12 << 8) | (x12 >> (24))
x8 += x12
x4 ^= x8
x4 = (x4 << 7) | (x4 >> (25))
x1 += x5
x13 ^= x1
x13 = (x13 << 16) | (x13 >> 16)
x9 += x13
x5 ^= x9
x5 = (x5 << 12) | (x5 >> 20)
x1 += x5
x13 ^= x1
x13 = (x13 << 8) | (x13 >> 24)
x9 += x13
x5 ^= x9
x5 = (x5 << 7) | (x5 >> 25)
x2 += x6
x14 ^= x2
x14 = (x14 << 16) | (x14 >> 16)
x10 += x14
x6 ^= x10
x6 = (x6 << 12) | (x6 >> 20)
x2 += x6
x14 ^= x2
x14 = (x14 << 8) | (x14 >> 24)
x10 += x14
x6 ^= x10
x6 = (x6 << 7) | (x6 >> 25)
x3 += x7
x15 ^= x3
x15 = (x15 << 16) | (x15 >> 16)
x11 += x15
x7 ^= x11
x7 = (x7 << 12) | (x7 >> 20)
x3 += x7
x15 ^= x3
x15 = (x15 << 8) | (x15 >> 24)
x11 += x15
x7 ^= x11
x7 = (x7 << 7) | (x7 >> 25)
x0 += x5
x15 ^= x0
x15 = (x15 << 16) | (x15 >> 16)
x10 += x15
x5 ^= x10
x5 = (x5 << 12) | (x5 >> 20)
x0 += x5
x15 ^= x0
x15 = (x15 << 8) | (x15 >> 24)
x10 += x15
x5 ^= x10
x5 = (x5 << 7) | (x5 >> 25)
x1 += x6
x12 ^= x1
x12 = (x12 << 16) | (x12 >> 16)
x11 += x12
x6 ^= x11
x6 = (x6 << 12) | (x6 >> 20)
x1 += x6
x12 ^= x1
x12 = (x12 << 8) | (x12 >> 24)
x11 += x12
x6 ^= x11
x6 = (x6 << 7) | (x6 >> 25)
x2 += x7
x13 ^= x2
x13 = (x13 << 16) | (x13 >> 16)
x8 += x13
x7 ^= x8
x7 = (x7 << 12) | (x7 >> 20)
x2 += x7
x13 ^= x2
x13 = (x13 << 8) | (x13 >> 24)
x8 += x13
x7 ^= x8
x7 = (x7 << 7) | (x7 >> 25)
x3 += x4
x14 ^= x3
x14 = (x14 << 16) | (x14 >> 16)
x9 += x14
x4 ^= x9
x4 = (x4 << 12) | (x4 >> 20)
x3 += x4
x14 ^= x3
x14 = (x14 << 8) | (x14 >> 24)
x9 += x14
x4 ^= x9
x4 = (x4 << 7) | (x4 >> 25)
}
x0 += j0
x1 += j1
x2 += j2
x3 += j3
x4 += j4
x5 += j5
x6 += j6
x7 += j7
x8 += j8
x9 += j9
x10 += j10
x11 += j11
x12 += j12
x13 += j13
x14 += j14
x15 += j15
binary.LittleEndian.PutUint32(out[0:4], x0)
binary.LittleEndian.PutUint32(out[4:8], x1)
binary.LittleEndian.PutUint32(out[8:12], x2)
binary.LittleEndian.PutUint32(out[12:16], x3)
binary.LittleEndian.PutUint32(out[16:20], x4)
binary.LittleEndian.PutUint32(out[20:24], x5)
binary.LittleEndian.PutUint32(out[24:28], x6)
binary.LittleEndian.PutUint32(out[28:32], x7)
binary.LittleEndian.PutUint32(out[32:36], x8)
binary.LittleEndian.PutUint32(out[36:40], x9)
binary.LittleEndian.PutUint32(out[40:44], x10)
binary.LittleEndian.PutUint32(out[44:48], x11)
binary.LittleEndian.PutUint32(out[48:52], x12)
binary.LittleEndian.PutUint32(out[52:56], x13)
binary.LittleEndian.PutUint32(out[56:60], x14)
binary.LittleEndian.PutUint32(out[60:64], x15)
}
// XORKeyStream crypts bytes from in to out using the given key and counters.
// In and out must overlap entirely or not at all. Counter contains the raw
// ChaCha20 counter bytes (i.e. block counter followed by nonce).
func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
var block [64]byte
var counterCopy [16]byte
copy(counterCopy[:], counter[:])
for len(in) >= 64 {
core(&block, &counterCopy, key)
for i, x := range block {
out[i] = in[i] ^ x
}
u := uint32(1)
for i := 0; i < 4; i++ {
u += uint32(counterCopy[i])
counterCopy[i] = byte(u)
u >>= 8
}
in = in[64:]
out = out[64:]
}
if len(in) > 0 {
core(&block, &counterCopy, key)
for i, v := range in {
out[i] = v ^ block[i]
}
}
}
| vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.00017829917487688363,
0.0001734893157845363,
0.00016298875561915338,
0.00017454964108765125,
0.0000036577184800989926
] |
{
"id": 1,
"code_window": [
"\n",
"\tvar prober volume.DynamicPluginProber = nil // TODO (#51147) inject mock\n",
"\tkubelet.volumePluginMgr, err =\n",
"\t\tNewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient.CoreV1()), allPlugins, prober)\n",
"\trequire.NoError(t, err, \"Failed to initialize VolumePluginMgr\")\n",
"\n",
"\tkubelet.mounter = &mount.FakeMounter{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tNewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient), allPlugins, prober)\n"
],
"file_path": "pkg/kubelet/kubelet_test.go",
"type": "replace",
"edit_start_line_idx": 328
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
load("//pkg/version:def.bzl", "version_x_defs")
go_binary(
name = "kubeadm",
embed = [":go_default_library"],
pure = "on",
x_defs = version_x_defs(),
)
go_library(
name = "go_default_library",
srcs = ["kubeadm.go"],
importpath = "k8s.io/kubernetes/cmd/kubeadm",
deps = ["//cmd/kubeadm/app:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//cmd/kubeadm/app:all-srcs",
"//cmd/kubeadm/test:all-srcs",
],
tags = ["automanaged"],
)
| cmd/kubeadm/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.00017541577108204365,
0.000174808781594038,
0.00017404525715392083,
0.00017488707089796662,
6.155626124382252e-7
] |
{
"id": 1,
"code_window": [
"\n",
"\tvar prober volume.DynamicPluginProber = nil // TODO (#51147) inject mock\n",
"\tkubelet.volumePluginMgr, err =\n",
"\t\tNewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient.CoreV1()), allPlugins, prober)\n",
"\trequire.NoError(t, err, \"Failed to initialize VolumePluginMgr\")\n",
"\n",
"\tkubelet.mounter = &mount.FakeMounter{}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tNewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient), allPlugins, prober)\n"
],
"file_path": "pkg/kubelet/kubelet_test.go",
"type": "replace",
"edit_start_line_idx": 328
} | apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:kube-proxy
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:podsecuritypolicy:privileged
subjects:
- kind: ServiceAccount
name: kube-proxy
namespace: kube-system
| cluster/gce/addons/podsecuritypolicies/kube-proxy-binding.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.0001753737888066098,
0.00017483200645074248,
0.00017429022409487516,
0.00017483200645074248,
5.417823558673263e-7
] |
{
"id": 2,
"code_window": [
" \"//vendor/k8s.io/api/authentication/v1:go_default_library\",\n",
" \"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library\",\n",
" \"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library\",\n",
" \"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library\",\n",
" ],\n",
")\n",
"\n",
"go_test(\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//vendor/k8s.io/client-go/kubernetes:go_default_library\",\n"
],
"file_path": "pkg/kubelet/token/BUILD",
"type": "replace",
"edit_start_line_idx": 26
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package token implements a manager of serviceaccount tokens for pods running
// on the node.
package token
import (
"fmt"
"sync"
"time"
"github.com/golang/glog"
authenticationv1 "k8s.io/api/authentication/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
const (
maxTTL = 24 * time.Hour
gcPeriod = time.Minute
)
// NewManager returns a new token manager.
func NewManager(c corev1.CoreV1Interface) *Manager {
m := &Manager{
getToken: func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return c.ServiceAccounts(namespace).CreateToken(name, tr)
},
cache: make(map[string]*authenticationv1.TokenRequest),
clock: clock.RealClock{},
}
go wait.Forever(m.cleanup, gcPeriod)
return m
}
// Manager manages service account tokens for pods.
type Manager struct {
// cacheMutex guards the cache
cacheMutex sync.RWMutex
cache map[string]*authenticationv1.TokenRequest
// mocked for testing
getToken func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
clock clock.Clock
}
// GetServiceAccountToken gets a service account token for a pod from cache or
// from the TokenRequest API. This process is as follows:
// * Check the cache for the current token request.
// * If the token exists and does not require a refresh, return the current token.
// * Attempt to refresh the token.
// * If the token is refreshed successfully, save it in the cache and return the token.
// * If refresh fails and the old token is still valid, log an error and return the old token.
// * If refresh fails and the old token is no longer valid, return an error
func (m *Manager) GetServiceAccountToken(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
key := keyFunc(name, namespace, tr)
ctr, ok := m.get(key)
if ok && !m.requiresRefresh(ctr) {
return ctr, nil
}
tr, err := m.getToken(name, namespace, tr)
if err != nil {
switch {
case !ok:
return nil, fmt.Errorf("failed to fetch token: %v", err)
case m.expired(ctr):
return nil, fmt.Errorf("token %s expired and refresh failed: %v", key, err)
default:
glog.Errorf("couldn't update token %s: %v", key, err)
return ctr, nil
}
}
m.set(key, tr)
return tr, nil
}
func (m *Manager) cleanup() {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
for k, tr := range m.cache {
if m.expired(tr) {
delete(m.cache, k)
}
}
}
func (m *Manager) get(key string) (*authenticationv1.TokenRequest, bool) {
m.cacheMutex.RLock()
defer m.cacheMutex.RUnlock()
ctr, ok := m.cache[key]
return ctr, ok
}
func (m *Manager) set(key string, tr *authenticationv1.TokenRequest) {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
m.cache[key] = tr
}
func (m *Manager) expired(t *authenticationv1.TokenRequest) bool {
return m.clock.Now().After(t.Status.ExpirationTimestamp.Time)
}
// requiresRefresh returns true if the token is older than 80% of its total
// ttl, or if the token is older than 24 hours.
func (m *Manager) requiresRefresh(tr *authenticationv1.TokenRequest) bool {
if tr.Spec.ExpirationSeconds == nil {
glog.Errorf("expiration seconds was nil for tr: %#v", tr)
return false
}
now := m.clock.Now()
exp := tr.Status.ExpirationTimestamp.Time
iat := exp.Add(-1 * time.Duration(*tr.Spec.ExpirationSeconds) * time.Second)
if now.After(iat.Add(maxTTL)) {
return true
}
// Require a refresh if within 20% of the TTL from the expiration time.
if now.After(exp.Add(-1 * time.Duration((*tr.Spec.ExpirationSeconds*20)/100) * time.Second)) {
return true
}
return false
}
// keys should be nonconfidential and safe to log
func keyFunc(name, namespace string, tr *authenticationv1.TokenRequest) string {
return fmt.Sprintf("%q/%q/%#v", name, namespace, tr.Spec)
}
| pkg/kubelet/token/token_manager.go | 1 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.011371348984539509,
0.0009269852889701724,
0.00016407403745688498,
0.00017050707538146526,
0.0027915348764508963
] |
{
"id": 2,
"code_window": [
" \"//vendor/k8s.io/api/authentication/v1:go_default_library\",\n",
" \"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library\",\n",
" \"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library\",\n",
" \"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library\",\n",
" ],\n",
")\n",
"\n",
"go_test(\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//vendor/k8s.io/client-go/kubernetes:go_default_library\",\n"
],
"file_path": "pkg/kubelet/token/BUILD",
"type": "replace",
"edit_start_line_idx": 26
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
filegroup(
name = "go_default_library_protos",
srcs = ["v3lock.proto"],
visibility = ["//visibility:public"],
)
go_library(
name = "go_default_library",
srcs = ["v3lock.pb.go"],
importpath = "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/google.golang.org/genproto/googleapis/api/annotations:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.0009239721694029868,
0.0004919784842059016,
0.00017248716903850436,
0.00043572724098339677,
0.00032906015985645354
] |
{
"id": 2,
"code_window": [
" \"//vendor/k8s.io/api/authentication/v1:go_default_library\",\n",
" \"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library\",\n",
" \"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library\",\n",
" \"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library\",\n",
" ],\n",
")\n",
"\n",
"go_test(\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//vendor/k8s.io/client-go/kubernetes:go_default_library\",\n"
],
"file_path": "pkg/kubelet/token/BUILD",
"type": "replace",
"edit_start_line_idx": 26
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure_dd
import (
"fmt"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
// interface exposed by the cloud provider implementing Disk functionality
type DiskController interface {
CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error)
DeleteBlobDisk(diskUri string) error
CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error)
DeleteManagedDisk(diskURI string) error
// Attaches the disk to the host machine.
AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
// Detaches the disk, identified by disk name or uri, from the host machine.
DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error
// Check if a list of volumes are attached to the node with the specified NodeName
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
// Get the LUN number of the disk that is attached to the host
GetDiskLun(diskName, diskUri string, nodeName types.NodeName) (int32, error)
// Get the next available LUN number to attach a new VHD
GetNextDiskLun(nodeName types.NodeName) (int32, error)
// Create a VHD blob
CreateVolume(name, storageAccount, storageAccountType, location string, requestGB int) (string, string, int, error)
// Delete a VHD blob
DeleteVolume(diskURI string) error
// Expand the disk to new size
ResizeDisk(diskName string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
}
type azureDataDiskPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &azureDataDiskPlugin{}
var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{}
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.VolumePluginWithAttachLimits = &azureDataDiskPlugin{}
var _ volume.ExpandableVolumePlugin = &azureDataDiskPlugin{}
const (
azureDataDiskPluginName = "kubernetes.io/azure-disk"
)
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&azureDataDiskPlugin{}}
}
func (plugin *azureDataDiskPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *azureDataDiskPlugin) GetPluginName() string {
return azureDataDiskPluginName
}
func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.DataDiskURI, nil
}
func (plugin *azureDataDiskPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil) ||
(spec.Volume != nil && spec.Volume.AzureDisk != nil)
}
func (plugin *azureDataDiskPlugin) RequiresRemount() bool {
return false
}
func (plugin *azureDataDiskPlugin) SupportsMountOption() bool {
return true
}
func (plugin *azureDataDiskPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *azureDataDiskPlugin) GetVolumeLimits() (map[string]int64, error) {
volumeLimits := map[string]int64{
util.AzureVolumeLimitKey: 16,
}
cloud := plugin.host.GetCloudProvider()
// if we can't fetch cloudprovider we return an error
// hoping external CCM or admin can set it. Returning
// default values from here will mean, no one can
// override them.
if cloud == nil {
return nil, fmt.Errorf("No cloudprovider present")
}
if cloud.ProviderName() != azure.CloudProviderName {
return nil, fmt.Errorf("Expected Azure cloudprovider, got %s", cloud.ProviderName())
}
return volumeLimits, nil
}
func (plugin *azureDataDiskPlugin) VolumeLimitKey(spec *volume.Spec) string {
return util.AzureVolumeLimitKey
}
func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
// NewAttacher initializes an Attacher
func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
azure, err := getCloud(plugin.host)
if err != nil {
glog.Errorf("failed to get azure cloud in NewAttacher, plugin.host : %s, err:%v", plugin.host.GetHostName(), err)
return nil, err
}
return &azureDiskAttacher{
plugin: plugin,
cloud: azure,
}, nil
}
func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
azure, err := getCloud(plugin.host)
if err != nil {
glog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName())
return nil, err
}
return &azureDiskDetacher{
plugin: plugin,
cloud: azure,
}, nil
}
func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host, plugin)
return &azureDiskDeleter{
spec: spec,
plugin: plugin,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
if len(options.PVC.Spec.AccessModes) == 0 {
options.PVC.Spec.AccessModes = plugin.GetAccessModes()
}
return &azureDiskProvisioner{
plugin: plugin,
options: options,
}, nil
}
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, options volume.VolumeOptions) (volume.Mounter, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host, plugin)
return &azureDiskMounter{
plugin: plugin,
spec: spec,
options: options,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
disk := makeDataDisk(volName, podUID, "", plugin.host, plugin)
return &azureDiskUnmounter{
plugin: plugin,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) RequiresFSResize() bool {
return true
}
func (plugin *azureDataDiskPlugin) ExpandVolumeDevice(
spec *volume.Spec,
newSize resource.Quantity,
oldSize resource.Quantity) (resource.Quantity, error) {
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.AzureDisk == nil {
return oldSize, fmt.Errorf("invalid PV spec")
}
diskController, err := getDiskController(plugin.host)
if err != nil {
return oldSize, err
}
return diskController.ResizeDisk(spec.PersistentVolume.Spec.AzureDisk.DiskName, oldSize, newSize)
}
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {
return nil, err
}
azureVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DataDiskURI: sourceName,
},
},
}
return volume.NewSpecFromVolume(azureVolume), nil
}
func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
m := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(m, deviceMountPath)
}
| pkg/volume/azure_dd/azure_dd.go | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.0047386037185788155,
0.0003493958502076566,
0.00016001533367671072,
0.00016904942458495498,
0.000848808791488409
] |
{
"id": 2,
"code_window": [
" \"//vendor/k8s.io/api/authentication/v1:go_default_library\",\n",
" \"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library\",\n",
" \"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library\",\n",
" \"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library\",\n",
" ],\n",
")\n",
"\n",
"go_test(\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//vendor/k8s.io/client-go/kubernetes:go_default_library\",\n"
],
"file_path": "pkg/kubelet/token/BUILD",
"type": "replace",
"edit_start_line_idx": 26
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package user
import (
"fmt"
"k8s.io/apimachinery/pkg/util/validation/field"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/policy"
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
)
// mustRunAs implements the RunAsUserStrategy interface
type mustRunAs struct {
opts *policy.RunAsUserStrategyOptions
}
// NewMustRunAs provides a strategy that requires the container to run as a specific UID in a range.
func NewMustRunAs(options *policy.RunAsUserStrategyOptions) (RunAsUserStrategy, error) {
if options == nil {
return nil, fmt.Errorf("MustRunAs requires run as user options")
}
if len(options.Ranges) == 0 {
return nil, fmt.Errorf("MustRunAs requires at least one range")
}
return &mustRunAs{
opts: options,
}, nil
}
// Generate creates the uid based on policy rules. MustRunAs returns the first range's Min.
func (s *mustRunAs) Generate(pod *api.Pod, container *api.Container) (*int64, error) {
return &s.opts.Ranges[0].Min, nil
}
// Validate ensures that the specified values fall within the range of the strategy.
func (s *mustRunAs) Validate(fldPath *field.Path, _ *api.Pod, _ *api.Container, runAsNonRoot *bool, runAsUser *int64) field.ErrorList {
allErrs := field.ErrorList{}
if runAsUser == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("runAsUser"), ""))
return allErrs
}
if !s.isValidUID(*runAsUser) {
detail := fmt.Sprintf("must be in the ranges: %v", s.opts.Ranges)
allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *runAsUser, detail))
}
return allErrs
}
func (s *mustRunAs) isValidUID(id int64) bool {
for _, rng := range s.opts.Ranges {
if psputil.UserFallsInRange(id, rng) {
return true
}
}
return false
}
| pkg/security/podsecuritypolicy/user/mustrunas.go | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.0049034832045435905,
0.0007642804994247854,
0.00016886080265976489,
0.00017279910389333963,
0.0015644736122339964
] |
{
"id": 3,
"code_window": [
"// on the node.\n",
"package token\n",
"\n",
"import (\n",
"\t\"fmt\"\n",
"\t\"sync\"\n",
"\t\"time\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"errors\"\n"
],
"file_path": "pkg/kubelet/token/token_manager.go",
"type": "add",
"edit_start_line_idx": 21
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package token implements a manager of serviceaccount tokens for pods running
// on the node.
package token
import (
"fmt"
"sync"
"time"
"github.com/golang/glog"
authenticationv1 "k8s.io/api/authentication/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
const (
maxTTL = 24 * time.Hour
gcPeriod = time.Minute
)
// NewManager returns a new token manager.
func NewManager(c corev1.CoreV1Interface) *Manager {
m := &Manager{
getToken: func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return c.ServiceAccounts(namespace).CreateToken(name, tr)
},
cache: make(map[string]*authenticationv1.TokenRequest),
clock: clock.RealClock{},
}
go wait.Forever(m.cleanup, gcPeriod)
return m
}
// Manager manages service account tokens for pods.
type Manager struct {
// cacheMutex guards the cache
cacheMutex sync.RWMutex
cache map[string]*authenticationv1.TokenRequest
// mocked for testing
getToken func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
clock clock.Clock
}
// GetServiceAccountToken gets a service account token for a pod from cache or
// from the TokenRequest API. This process is as follows:
// * Check the cache for the current token request.
// * If the token exists and does not require a refresh, return the current token.
// * Attempt to refresh the token.
// * If the token is refreshed successfully, save it in the cache and return the token.
// * If refresh fails and the old token is still valid, log an error and return the old token.
// * If refresh fails and the old token is no longer valid, return an error
func (m *Manager) GetServiceAccountToken(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
key := keyFunc(name, namespace, tr)
ctr, ok := m.get(key)
if ok && !m.requiresRefresh(ctr) {
return ctr, nil
}
tr, err := m.getToken(name, namespace, tr)
if err != nil {
switch {
case !ok:
return nil, fmt.Errorf("failed to fetch token: %v", err)
case m.expired(ctr):
return nil, fmt.Errorf("token %s expired and refresh failed: %v", key, err)
default:
glog.Errorf("couldn't update token %s: %v", key, err)
return ctr, nil
}
}
m.set(key, tr)
return tr, nil
}
func (m *Manager) cleanup() {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
for k, tr := range m.cache {
if m.expired(tr) {
delete(m.cache, k)
}
}
}
func (m *Manager) get(key string) (*authenticationv1.TokenRequest, bool) {
m.cacheMutex.RLock()
defer m.cacheMutex.RUnlock()
ctr, ok := m.cache[key]
return ctr, ok
}
func (m *Manager) set(key string, tr *authenticationv1.TokenRequest) {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
m.cache[key] = tr
}
func (m *Manager) expired(t *authenticationv1.TokenRequest) bool {
return m.clock.Now().After(t.Status.ExpirationTimestamp.Time)
}
// requiresRefresh returns true if the token is older than 80% of its total
// ttl, or if the token is older than 24 hours.
func (m *Manager) requiresRefresh(tr *authenticationv1.TokenRequest) bool {
if tr.Spec.ExpirationSeconds == nil {
glog.Errorf("expiration seconds was nil for tr: %#v", tr)
return false
}
now := m.clock.Now()
exp := tr.Status.ExpirationTimestamp.Time
iat := exp.Add(-1 * time.Duration(*tr.Spec.ExpirationSeconds) * time.Second)
if now.After(iat.Add(maxTTL)) {
return true
}
// Require a refresh if within 20% of the TTL from the expiration time.
if now.After(exp.Add(-1 * time.Duration((*tr.Spec.ExpirationSeconds*20)/100) * time.Second)) {
return true
}
return false
}
// keys should be nonconfidential and safe to log
func keyFunc(name, namespace string, tr *authenticationv1.TokenRequest) string {
return fmt.Sprintf("%q/%q/%#v", name, namespace, tr.Spec)
}
| pkg/kubelet/token/token_manager.go | 1 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.0013857352314516902,
0.00033857932430692017,
0.00016830768436193466,
0.0002167770580854267,
0.00033459317637607455
] |
{
"id": 3,
"code_window": [
"// on the node.\n",
"package token\n",
"\n",
"import (\n",
"\t\"fmt\"\n",
"\t\"sync\"\n",
"\t\"time\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"errors\"\n"
],
"file_path": "pkg/kubelet/token/token_manager.go",
"type": "add",
"edit_start_line_idx": 21
} | // linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build mipsle,linux
package unix
const (
SYS_SYSCALL = 4000
SYS_EXIT = 4001
SYS_FORK = 4002
SYS_READ = 4003
SYS_WRITE = 4004
SYS_OPEN = 4005
SYS_CLOSE = 4006
SYS_WAITPID = 4007
SYS_CREAT = 4008
SYS_LINK = 4009
SYS_UNLINK = 4010
SYS_EXECVE = 4011
SYS_CHDIR = 4012
SYS_TIME = 4013
SYS_MKNOD = 4014
SYS_CHMOD = 4015
SYS_LCHOWN = 4016
SYS_BREAK = 4017
SYS_UNUSED18 = 4018
SYS_LSEEK = 4019
SYS_GETPID = 4020
SYS_MOUNT = 4021
SYS_UMOUNT = 4022
SYS_SETUID = 4023
SYS_GETUID = 4024
SYS_STIME = 4025
SYS_PTRACE = 4026
SYS_ALARM = 4027
SYS_UNUSED28 = 4028
SYS_PAUSE = 4029
SYS_UTIME = 4030
SYS_STTY = 4031
SYS_GTTY = 4032
SYS_ACCESS = 4033
SYS_NICE = 4034
SYS_FTIME = 4035
SYS_SYNC = 4036
SYS_KILL = 4037
SYS_RENAME = 4038
SYS_MKDIR = 4039
SYS_RMDIR = 4040
SYS_DUP = 4041
SYS_PIPE = 4042
SYS_TIMES = 4043
SYS_PROF = 4044
SYS_BRK = 4045
SYS_SETGID = 4046
SYS_GETGID = 4047
SYS_SIGNAL = 4048
SYS_GETEUID = 4049
SYS_GETEGID = 4050
SYS_ACCT = 4051
SYS_UMOUNT2 = 4052
SYS_LOCK = 4053
SYS_IOCTL = 4054
SYS_FCNTL = 4055
SYS_MPX = 4056
SYS_SETPGID = 4057
SYS_ULIMIT = 4058
SYS_UNUSED59 = 4059
SYS_UMASK = 4060
SYS_CHROOT = 4061
SYS_USTAT = 4062
SYS_DUP2 = 4063
SYS_GETPPID = 4064
SYS_GETPGRP = 4065
SYS_SETSID = 4066
SYS_SIGACTION = 4067
SYS_SGETMASK = 4068
SYS_SSETMASK = 4069
SYS_SETREUID = 4070
SYS_SETREGID = 4071
SYS_SIGSUSPEND = 4072
SYS_SIGPENDING = 4073
SYS_SETHOSTNAME = 4074
SYS_SETRLIMIT = 4075
SYS_GETRLIMIT = 4076
SYS_GETRUSAGE = 4077
SYS_GETTIMEOFDAY = 4078
SYS_SETTIMEOFDAY = 4079
SYS_GETGROUPS = 4080
SYS_SETGROUPS = 4081
SYS_RESERVED82 = 4082
SYS_SYMLINK = 4083
SYS_UNUSED84 = 4084
SYS_READLINK = 4085
SYS_USELIB = 4086
SYS_SWAPON = 4087
SYS_REBOOT = 4088
SYS_READDIR = 4089
SYS_MMAP = 4090
SYS_MUNMAP = 4091
SYS_TRUNCATE = 4092
SYS_FTRUNCATE = 4093
SYS_FCHMOD = 4094
SYS_FCHOWN = 4095
SYS_GETPRIORITY = 4096
SYS_SETPRIORITY = 4097
SYS_PROFIL = 4098
SYS_STATFS = 4099
SYS_FSTATFS = 4100
SYS_IOPERM = 4101
SYS_SOCKETCALL = 4102
SYS_SYSLOG = 4103
SYS_SETITIMER = 4104
SYS_GETITIMER = 4105
SYS_STAT = 4106
SYS_LSTAT = 4107
SYS_FSTAT = 4108
SYS_UNUSED109 = 4109
SYS_IOPL = 4110
SYS_VHANGUP = 4111
SYS_IDLE = 4112
SYS_VM86 = 4113
SYS_WAIT4 = 4114
SYS_SWAPOFF = 4115
SYS_SYSINFO = 4116
SYS_IPC = 4117
SYS_FSYNC = 4118
SYS_SIGRETURN = 4119
SYS_CLONE = 4120
SYS_SETDOMAINNAME = 4121
SYS_UNAME = 4122
SYS_MODIFY_LDT = 4123
SYS_ADJTIMEX = 4124
SYS_MPROTECT = 4125
SYS_SIGPROCMASK = 4126
SYS_CREATE_MODULE = 4127
SYS_INIT_MODULE = 4128
SYS_DELETE_MODULE = 4129
SYS_GET_KERNEL_SYMS = 4130
SYS_QUOTACTL = 4131
SYS_GETPGID = 4132
SYS_FCHDIR = 4133
SYS_BDFLUSH = 4134
SYS_SYSFS = 4135
SYS_PERSONALITY = 4136
SYS_AFS_SYSCALL = 4137
SYS_SETFSUID = 4138
SYS_SETFSGID = 4139
SYS__LLSEEK = 4140
SYS_GETDENTS = 4141
SYS__NEWSELECT = 4142
SYS_FLOCK = 4143
SYS_MSYNC = 4144
SYS_READV = 4145
SYS_WRITEV = 4146
SYS_CACHEFLUSH = 4147
SYS_CACHECTL = 4148
SYS_SYSMIPS = 4149
SYS_UNUSED150 = 4150
SYS_GETSID = 4151
SYS_FDATASYNC = 4152
SYS__SYSCTL = 4153
SYS_MLOCK = 4154
SYS_MUNLOCK = 4155
SYS_MLOCKALL = 4156
SYS_MUNLOCKALL = 4157
SYS_SCHED_SETPARAM = 4158
SYS_SCHED_GETPARAM = 4159
SYS_SCHED_SETSCHEDULER = 4160
SYS_SCHED_GETSCHEDULER = 4161
SYS_SCHED_YIELD = 4162
SYS_SCHED_GET_PRIORITY_MAX = 4163
SYS_SCHED_GET_PRIORITY_MIN = 4164
SYS_SCHED_RR_GET_INTERVAL = 4165
SYS_NANOSLEEP = 4166
SYS_MREMAP = 4167
SYS_ACCEPT = 4168
SYS_BIND = 4169
SYS_CONNECT = 4170
SYS_GETPEERNAME = 4171
SYS_GETSOCKNAME = 4172
SYS_GETSOCKOPT = 4173
SYS_LISTEN = 4174
SYS_RECV = 4175
SYS_RECVFROM = 4176
SYS_RECVMSG = 4177
SYS_SEND = 4178
SYS_SENDMSG = 4179
SYS_SENDTO = 4180
SYS_SETSOCKOPT = 4181
SYS_SHUTDOWN = 4182
SYS_SOCKET = 4183
SYS_SOCKETPAIR = 4184
SYS_SETRESUID = 4185
SYS_GETRESUID = 4186
SYS_QUERY_MODULE = 4187
SYS_POLL = 4188
SYS_NFSSERVCTL = 4189
SYS_SETRESGID = 4190
SYS_GETRESGID = 4191
SYS_PRCTL = 4192
SYS_RT_SIGRETURN = 4193
SYS_RT_SIGACTION = 4194
SYS_RT_SIGPROCMASK = 4195
SYS_RT_SIGPENDING = 4196
SYS_RT_SIGTIMEDWAIT = 4197
SYS_RT_SIGQUEUEINFO = 4198
SYS_RT_SIGSUSPEND = 4199
SYS_PREAD64 = 4200
SYS_PWRITE64 = 4201
SYS_CHOWN = 4202
SYS_GETCWD = 4203
SYS_CAPGET = 4204
SYS_CAPSET = 4205
SYS_SIGALTSTACK = 4206
SYS_SENDFILE = 4207
SYS_GETPMSG = 4208
SYS_PUTPMSG = 4209
SYS_MMAP2 = 4210
SYS_TRUNCATE64 = 4211
SYS_FTRUNCATE64 = 4212
SYS_STAT64 = 4213
SYS_LSTAT64 = 4214
SYS_FSTAT64 = 4215
SYS_PIVOT_ROOT = 4216
SYS_MINCORE = 4217
SYS_MADVISE = 4218
SYS_GETDENTS64 = 4219
SYS_FCNTL64 = 4220
SYS_RESERVED221 = 4221
SYS_GETTID = 4222
SYS_READAHEAD = 4223
SYS_SETXATTR = 4224
SYS_LSETXATTR = 4225
SYS_FSETXATTR = 4226
SYS_GETXATTR = 4227
SYS_LGETXATTR = 4228
SYS_FGETXATTR = 4229
SYS_LISTXATTR = 4230
SYS_LLISTXATTR = 4231
SYS_FLISTXATTR = 4232
SYS_REMOVEXATTR = 4233
SYS_LREMOVEXATTR = 4234
SYS_FREMOVEXATTR = 4235
SYS_TKILL = 4236
SYS_SENDFILE64 = 4237
SYS_FUTEX = 4238
SYS_SCHED_SETAFFINITY = 4239
SYS_SCHED_GETAFFINITY = 4240
SYS_IO_SETUP = 4241
SYS_IO_DESTROY = 4242
SYS_IO_GETEVENTS = 4243
SYS_IO_SUBMIT = 4244
SYS_IO_CANCEL = 4245
SYS_EXIT_GROUP = 4246
SYS_LOOKUP_DCOOKIE = 4247
SYS_EPOLL_CREATE = 4248
SYS_EPOLL_CTL = 4249
SYS_EPOLL_WAIT = 4250
SYS_REMAP_FILE_PAGES = 4251
SYS_SET_TID_ADDRESS = 4252
SYS_RESTART_SYSCALL = 4253
SYS_FADVISE64 = 4254
SYS_STATFS64 = 4255
SYS_FSTATFS64 = 4256
SYS_TIMER_CREATE = 4257
SYS_TIMER_SETTIME = 4258
SYS_TIMER_GETTIME = 4259
SYS_TIMER_GETOVERRUN = 4260
SYS_TIMER_DELETE = 4261
SYS_CLOCK_SETTIME = 4262
SYS_CLOCK_GETTIME = 4263
SYS_CLOCK_GETRES = 4264
SYS_CLOCK_NANOSLEEP = 4265
SYS_TGKILL = 4266
SYS_UTIMES = 4267
SYS_MBIND = 4268
SYS_GET_MEMPOLICY = 4269
SYS_SET_MEMPOLICY = 4270
SYS_MQ_OPEN = 4271
SYS_MQ_UNLINK = 4272
SYS_MQ_TIMEDSEND = 4273
SYS_MQ_TIMEDRECEIVE = 4274
SYS_MQ_NOTIFY = 4275
SYS_MQ_GETSETATTR = 4276
SYS_VSERVER = 4277
SYS_WAITID = 4278
SYS_ADD_KEY = 4280
SYS_REQUEST_KEY = 4281
SYS_KEYCTL = 4282
SYS_SET_THREAD_AREA = 4283
SYS_INOTIFY_INIT = 4284
SYS_INOTIFY_ADD_WATCH = 4285
SYS_INOTIFY_RM_WATCH = 4286
SYS_MIGRATE_PAGES = 4287
SYS_OPENAT = 4288
SYS_MKDIRAT = 4289
SYS_MKNODAT = 4290
SYS_FCHOWNAT = 4291
SYS_FUTIMESAT = 4292
SYS_FSTATAT64 = 4293
SYS_UNLINKAT = 4294
SYS_RENAMEAT = 4295
SYS_LINKAT = 4296
SYS_SYMLINKAT = 4297
SYS_READLINKAT = 4298
SYS_FCHMODAT = 4299
SYS_FACCESSAT = 4300
SYS_PSELECT6 = 4301
SYS_PPOLL = 4302
SYS_UNSHARE = 4303
SYS_SPLICE = 4304
SYS_SYNC_FILE_RANGE = 4305
SYS_TEE = 4306
SYS_VMSPLICE = 4307
SYS_MOVE_PAGES = 4308
SYS_SET_ROBUST_LIST = 4309
SYS_GET_ROBUST_LIST = 4310
SYS_KEXEC_LOAD = 4311
SYS_GETCPU = 4312
SYS_EPOLL_PWAIT = 4313
SYS_IOPRIO_SET = 4314
SYS_IOPRIO_GET = 4315
SYS_UTIMENSAT = 4316
SYS_SIGNALFD = 4317
SYS_TIMERFD = 4318
SYS_EVENTFD = 4319
SYS_FALLOCATE = 4320
SYS_TIMERFD_CREATE = 4321
SYS_TIMERFD_GETTIME = 4322
SYS_TIMERFD_SETTIME = 4323
SYS_SIGNALFD4 = 4324
SYS_EVENTFD2 = 4325
SYS_EPOLL_CREATE1 = 4326
SYS_DUP3 = 4327
SYS_PIPE2 = 4328
SYS_INOTIFY_INIT1 = 4329
SYS_PREADV = 4330
SYS_PWRITEV = 4331
SYS_RT_TGSIGQUEUEINFO = 4332
SYS_PERF_EVENT_OPEN = 4333
SYS_ACCEPT4 = 4334
SYS_RECVMMSG = 4335
SYS_FANOTIFY_INIT = 4336
SYS_FANOTIFY_MARK = 4337
SYS_PRLIMIT64 = 4338
SYS_NAME_TO_HANDLE_AT = 4339
SYS_OPEN_BY_HANDLE_AT = 4340
SYS_CLOCK_ADJTIME = 4341
SYS_SYNCFS = 4342
SYS_SENDMMSG = 4343
SYS_SETNS = 4344
SYS_PROCESS_VM_READV = 4345
SYS_PROCESS_VM_WRITEV = 4346
SYS_KCMP = 4347
SYS_FINIT_MODULE = 4348
SYS_SCHED_SETATTR = 4349
SYS_SCHED_GETATTR = 4350
SYS_RENAMEAT2 = 4351
SYS_SECCOMP = 4352
SYS_GETRANDOM = 4353
SYS_MEMFD_CREATE = 4354
SYS_BPF = 4355
SYS_EXECVEAT = 4356
SYS_USERFAULTFD = 4357
SYS_MEMBARRIER = 4358
SYS_MLOCK2 = 4359
SYS_COPY_FILE_RANGE = 4360
SYS_PREADV2 = 4361
SYS_PWRITEV2 = 4362
SYS_PKEY_MPROTECT = 4363
SYS_PKEY_ALLOC = 4364
SYS_PKEY_FREE = 4365
SYS_STATX = 4366
)
| vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.00022731043281964958,
0.0001801213074941188,
0.0001645583106437698,
0.00017149392806459218,
0.00001849510044849012
] |
{
"id": 3,
"code_window": [
"// on the node.\n",
"package token\n",
"\n",
"import (\n",
"\t\"fmt\"\n",
"\t\"sync\"\n",
"\t\"time\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"errors\"\n"
],
"file_path": "pkg/kubelet/token/token_manager.go",
"type": "add",
"edit_start_line_idx": 21
} | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements Selections.
package types
import (
"bytes"
"fmt"
)
// SelectionKind describes the kind of a selector expression x.f
// (excluding qualified identifiers).
type SelectionKind int
const (
FieldVal SelectionKind = iota // x.f is a struct field selector
MethodVal // x.f is a method selector
MethodExpr // x.f is a method expression
)
// A Selection describes a selector expression x.f.
// For the declarations:
//
// type T struct{ x int; E }
// type E struct{}
// func (e E) m() {}
// var p *T
//
// the following relations exist:
//
// Selector Kind Recv Obj Type Index Indirect
//
// p.x FieldVal T x int {0} true
// p.m MethodVal *T m func (e *T) m() {1, 0} true
// T.m MethodExpr T m func m(_ T) {1, 0} false
//
type Selection struct {
kind SelectionKind
recv Type // type of x
obj Object // object denoted by x.f
index []int // path from x to x.f
indirect bool // set if there was any pointer indirection on the path
}
// Kind returns the selection kind.
func (s *Selection) Kind() SelectionKind { return s.kind }
// Recv returns the type of x in x.f.
func (s *Selection) Recv() Type { return s.recv }
// Obj returns the object denoted by x.f; a *Var for
// a field selection, and a *Func in all other cases.
func (s *Selection) Obj() Object { return s.obj }
// Type returns the type of x.f, which may be different from the type of f.
// See Selection for more information.
func (s *Selection) Type() Type {
switch s.kind {
case MethodVal:
// The type of x.f is a method with its receiver type set
// to the type of x.
sig := *s.obj.(*Func).typ.(*Signature)
recv := *sig.recv
recv.typ = s.recv
sig.recv = &recv
return &sig
case MethodExpr:
// The type of x.f is a function (without receiver)
// and an additional first argument with the same type as x.
// TODO(gri) Similar code is already in call.go - factor!
// TODO(gri) Compute this eagerly to avoid allocations.
sig := *s.obj.(*Func).typ.(*Signature)
arg0 := *sig.recv
sig.recv = nil
arg0.typ = s.recv
var params []*Var
if sig.params != nil {
params = sig.params.vars
}
sig.params = NewTuple(append([]*Var{&arg0}, params...)...)
return &sig
}
// In all other cases, the type of x.f is the type of x.
return s.obj.Type()
}
// Index describes the path from x to f in x.f.
// The last index entry is the field or method index of the type declaring f;
// either:
//
// 1) the list of declared methods of a named type; or
// 2) the list of methods of an interface type; or
// 3) the list of fields of a struct type.
//
// The earlier index entries are the indices of the embedded fields implicitly
// traversed to get from (the type of) x to f, starting at embedding depth 0.
func (s *Selection) Index() []int { return s.index }
// Indirect reports whether any pointer indirection was required to get from
// x to f in x.f.
func (s *Selection) Indirect() bool { return s.indirect }
func (s *Selection) String() string { return SelectionString(s, nil) }
// SelectionString returns the string form of s.
// The Qualifier controls the printing of
// package-level objects, and may be nil.
//
// Examples:
// "field (T) f int"
// "method (T) f(X) Y"
// "method expr (T) f(X) Y"
//
func SelectionString(s *Selection, qf Qualifier) string {
var k string
switch s.kind {
case FieldVal:
k = "field "
case MethodVal:
k = "method "
case MethodExpr:
k = "method expr "
default:
unreachable()
}
var buf bytes.Buffer
buf.WriteString(k)
buf.WriteByte('(')
WriteType(&buf, s.Recv(), qf)
fmt.Fprintf(&buf, ") %s", s.obj.Name())
if T := s.Type(); s.kind == FieldVal {
buf.WriteByte(' ')
WriteType(&buf, T, qf)
} else {
WriteSignature(&buf, T.(*Signature), qf)
}
return buf.String()
}
| third_party/forked/golang/go/types/selection.go | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.00027862456045113504,
0.00017815416504163295,
0.0001679205452091992,
0.00017078546807169914,
0.00002695331750146579
] |
{
"id": 3,
"code_window": [
"// on the node.\n",
"package token\n",
"\n",
"import (\n",
"\t\"fmt\"\n",
"\t\"sync\"\n",
"\t\"time\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"errors\"\n"
],
"file_path": "pkg/kubelet/token/token_manager.go",
"type": "add",
"edit_start_line_idx": 21
} | // +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CertificateSigningRequest) DeepCopyInto(out *CertificateSigningRequest) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequest.
func (in *CertificateSigningRequest) DeepCopy() *CertificateSigningRequest {
if in == nil {
return nil
}
out := new(CertificateSigningRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CertificateSigningRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CertificateSigningRequestCondition) DeepCopyInto(out *CertificateSigningRequestCondition) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestCondition.
func (in *CertificateSigningRequestCondition) DeepCopy() *CertificateSigningRequestCondition {
if in == nil {
return nil
}
out := new(CertificateSigningRequestCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CertificateSigningRequestList) DeepCopyInto(out *CertificateSigningRequestList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CertificateSigningRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestList.
func (in *CertificateSigningRequestList) DeepCopy() *CertificateSigningRequestList {
if in == nil {
return nil
}
out := new(CertificateSigningRequestList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CertificateSigningRequestList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningRequestSpec) {
*out = *in
if in.Request != nil {
in, out := &in.Request, &out.Request
*out = make([]byte, len(*in))
copy(*out, *in)
}
if in.Usages != nil {
in, out := &in.Usages, &out.Usages
*out = make([]KeyUsage, len(*in))
copy(*out, *in)
}
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Extra != nil {
in, out := &in.Extra, &out.Extra
*out = make(map[string]ExtraValue, len(*in))
for key, val := range *in {
if val == nil {
(*out)[key] = nil
} else {
(*out)[key] = make([]string, len(val))
copy((*out)[key], val)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestSpec.
func (in *CertificateSigningRequestSpec) DeepCopy() *CertificateSigningRequestSpec {
if in == nil {
return nil
}
out := new(CertificateSigningRequestSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CertificateSigningRequestStatus) DeepCopyInto(out *CertificateSigningRequestStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]CertificateSigningRequestCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Certificate != nil {
in, out := &in.Certificate, &out.Certificate
*out = make([]byte, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestStatus.
func (in *CertificateSigningRequestStatus) DeepCopy() *CertificateSigningRequestStatus {
if in == nil {
return nil
}
out := new(CertificateSigningRequestStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
{
in := &in
*out = make(ExtraValue, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
func (in ExtraValue) DeepCopy() ExtraValue {
if in == nil {
return nil
}
out := new(ExtraValue)
in.DeepCopyInto(out)
return *out
}
| staging/src/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.00018931642989628017,
0.00017129401385318488,
0.00016656270599924028,
0.00017018511425703764,
0.000005326874543243321
] |
{
"id": 4,
"code_window": [
"\tauthenticationv1 \"k8s.io/api/authentication/v1\"\n",
"\t\"k8s.io/apimachinery/pkg/util/clock\"\n",
"\t\"k8s.io/apimachinery/pkg/util/wait\"\n",
"\tcorev1 \"k8s.io/client-go/kubernetes/typed/core/v1\"\n",
")\n",
"\n",
"const (\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tclientset \"k8s.io/client-go/kubernetes\"\n"
],
"file_path": "pkg/kubelet/token/token_manager.go",
"type": "replace",
"edit_start_line_idx": 29
} | load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_library(
name = "go_default_library",
srcs = ["token_manager.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/token",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["token_manager_test.go"],
embed = [":go_default_library"],
deps = [
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
],
)
| pkg/kubelet/token/BUILD | 1 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.1293374001979828,
0.027968879789114,
0.00017237420252058655,
0.001247766544111073,
0.05078961327672005
] |
{
"id": 4,
"code_window": [
"\tauthenticationv1 \"k8s.io/api/authentication/v1\"\n",
"\t\"k8s.io/apimachinery/pkg/util/clock\"\n",
"\t\"k8s.io/apimachinery/pkg/util/wait\"\n",
"\tcorev1 \"k8s.io/client-go/kubernetes/typed/core/v1\"\n",
")\n",
"\n",
"const (\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tclientset \"k8s.io/client-go/kubernetes\"\n"
],
"file_path": "pkg/kubelet/token/token_manager.go",
"type": "replace",
"edit_start_line_idx": 29
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"defaults.go",
"doc.go",
"register.go",
"zz_generated.conversion.go",
"zz_generated.defaults.go",
],
importpath = "k8s.io/kubernetes/pkg/apis/storage/v1beta1",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/apis/storage:go_default_library",
"//pkg/features:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/apis/storage/v1beta1/util:all-srcs",
],
tags = ["automanaged"],
)
go_test(
name = "go_default_xtest",
srcs = ["defaults_test.go"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/storage/install:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
)
| pkg/apis/storage/v1beta1/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.0005821379600092769,
0.00036417375667952,
0.0001690955541562289,
0.00035033756284974515,
0.00018839299445971847
] |
{
"id": 4,
"code_window": [
"\tauthenticationv1 \"k8s.io/api/authentication/v1\"\n",
"\t\"k8s.io/apimachinery/pkg/util/clock\"\n",
"\t\"k8s.io/apimachinery/pkg/util/wait\"\n",
"\tcorev1 \"k8s.io/client-go/kubernetes/typed/core/v1\"\n",
")\n",
"\n",
"const (\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tclientset \"k8s.io/client-go/kubernetes\"\n"
],
"file_path": "pkg/kubelet/token/token_manager.go",
"type": "replace",
"edit_start_line_idx": 29
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package printers
import (
"io"
"k8s.io/apimachinery/pkg/runtime"
)
// NewDiscardingPrinter is a printer that discards all objects
func NewDiscardingPrinter() ResourcePrinterFunc {
return ResourcePrinterFunc(func(runtime.Object, io.Writer) error {
return nil
})
}
| pkg/kubectl/genericclioptions/printers/discard.go | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.017107563093304634,
0.0044074044562876225,
0.00017237420252058655,
0.0001748404320096597,
0.0073324404656887054
] |
{
"id": 4,
"code_window": [
"\tauthenticationv1 \"k8s.io/api/authentication/v1\"\n",
"\t\"k8s.io/apimachinery/pkg/util/clock\"\n",
"\t\"k8s.io/apimachinery/pkg/util/wait\"\n",
"\tcorev1 \"k8s.io/client-go/kubernetes/typed/core/v1\"\n",
")\n",
"\n",
"const (\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tclientset \"k8s.io/client-go/kubernetes\"\n"
],
"file_path": "pkg/kubelet/token/token_manager.go",
"type": "replace",
"edit_start_line_idx": 29
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
crv1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
AddToScheme(Scheme)
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
crv1.AddToScheme(scheme)
}
| staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme/register.go | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.009688659571111202,
0.0019255117513239384,
0.00017218316497746855,
0.0002743024961091578,
0.003483135486021638
] |
{
"id": 5,
"code_window": [
")\n",
"\n",
"// NewManager returns a new token manager.\n",
"func NewManager(c corev1.CoreV1Interface) *Manager {\n",
"\tm := &Manager{\n",
"\t\tgetToken: func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"func NewManager(c clientset.Interface) *Manager {\n"
],
"file_path": "pkg/kubelet/token/token_manager.go",
"type": "replace",
"edit_start_line_idx": 38
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package token implements a manager of serviceaccount tokens for pods running
// on the node.
package token
import (
"fmt"
"sync"
"time"
"github.com/golang/glog"
authenticationv1 "k8s.io/api/authentication/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
const (
maxTTL = 24 * time.Hour
gcPeriod = time.Minute
)
// NewManager returns a new token manager.
func NewManager(c corev1.CoreV1Interface) *Manager {
m := &Manager{
getToken: func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return c.ServiceAccounts(namespace).CreateToken(name, tr)
},
cache: make(map[string]*authenticationv1.TokenRequest),
clock: clock.RealClock{},
}
go wait.Forever(m.cleanup, gcPeriod)
return m
}
// Manager manages service account tokens for pods.
type Manager struct {
// cacheMutex guards the cache
cacheMutex sync.RWMutex
cache map[string]*authenticationv1.TokenRequest
// mocked for testing
getToken func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
clock clock.Clock
}
// GetServiceAccountToken gets a service account token for a pod from cache or
// from the TokenRequest API. This process is as follows:
// * Check the cache for the current token request.
// * If the token exists and does not require a refresh, return the current token.
// * Attempt to refresh the token.
// * If the token is refreshed successfully, save it in the cache and return the token.
// * If refresh fails and the old token is still valid, log an error and return the old token.
// * If refresh fails and the old token is no longer valid, return an error
func (m *Manager) GetServiceAccountToken(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
key := keyFunc(name, namespace, tr)
ctr, ok := m.get(key)
if ok && !m.requiresRefresh(ctr) {
return ctr, nil
}
tr, err := m.getToken(name, namespace, tr)
if err != nil {
switch {
case !ok:
return nil, fmt.Errorf("failed to fetch token: %v", err)
case m.expired(ctr):
return nil, fmt.Errorf("token %s expired and refresh failed: %v", key, err)
default:
glog.Errorf("couldn't update token %s: %v", key, err)
return ctr, nil
}
}
m.set(key, tr)
return tr, nil
}
func (m *Manager) cleanup() {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
for k, tr := range m.cache {
if m.expired(tr) {
delete(m.cache, k)
}
}
}
func (m *Manager) get(key string) (*authenticationv1.TokenRequest, bool) {
m.cacheMutex.RLock()
defer m.cacheMutex.RUnlock()
ctr, ok := m.cache[key]
return ctr, ok
}
func (m *Manager) set(key string, tr *authenticationv1.TokenRequest) {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
m.cache[key] = tr
}
func (m *Manager) expired(t *authenticationv1.TokenRequest) bool {
return m.clock.Now().After(t.Status.ExpirationTimestamp.Time)
}
// requiresRefresh returns true if the token is older than 80% of its total
// ttl, or if the token is older than 24 hours.
func (m *Manager) requiresRefresh(tr *authenticationv1.TokenRequest) bool {
if tr.Spec.ExpirationSeconds == nil {
glog.Errorf("expiration seconds was nil for tr: %#v", tr)
return false
}
now := m.clock.Now()
exp := tr.Status.ExpirationTimestamp.Time
iat := exp.Add(-1 * time.Duration(*tr.Spec.ExpirationSeconds) * time.Second)
if now.After(iat.Add(maxTTL)) {
return true
}
// Require a refresh if within 20% of the TTL from the expiration time.
if now.After(exp.Add(-1 * time.Duration((*tr.Spec.ExpirationSeconds*20)/100) * time.Second)) {
return true
}
return false
}
// keys should be nonconfidential and safe to log
func keyFunc(name, namespace string, tr *authenticationv1.TokenRequest) string {
return fmt.Sprintf("%q/%q/%#v", name, namespace, tr.Spec)
}
| pkg/kubelet/token/token_manager.go | 1 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.9974648952484131,
0.5322674512863159,
0.000164406665135175,
0.990514874458313,
0.49422118067741394
] |
{
"id": 5,
"code_window": [
")\n",
"\n",
"// NewManager returns a new token manager.\n",
"func NewManager(c corev1.CoreV1Interface) *Manager {\n",
"\tm := &Manager{\n",
"\t\tgetToken: func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"func NewManager(c clientset.Interface) *Manager {\n"
],
"file_path": "pkg/kubelet/token/token_manager.go",
"type": "replace",
"edit_start_line_idx": 38
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package admission
import (
"fmt"
"testing"
"k8s.io/apimachinery/pkg/runtime/schema"
auditinternal "k8s.io/apiserver/pkg/apis/audit"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// fakeHandler implements Interface
type fakeHandler struct {
// return value of Admit()
admit error
// annotations add to attributesRecord during Admit() phase
admitAnnotations map[string]string
// return value of Validate()
validate error
// annotations add to attributesRecord during Validate() phase
validateAnnotations map[string]string
// return value of Handles()
handles bool
}
var _ Interface = &fakeHandler{}
var _ MutationInterface = &fakeHandler{}
var _ ValidationInterface = &fakeHandler{}
func (h fakeHandler) Admit(a Attributes) error {
for k, v := range h.admitAnnotations {
a.AddAnnotation(k, v)
}
return h.admit
}
func (h fakeHandler) Validate(a Attributes) error {
for k, v := range h.validateAnnotations {
a.AddAnnotation(k, v)
}
return h.validate
}
func (h fakeHandler) Handles(o Operation) bool {
return h.handles
}
func attributes() Attributes {
return NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, "", "", schema.GroupVersionResource{}, "", "", nil)
}
func TestWithAudit(t *testing.T) {
var testCases = map[string]struct {
admit error
admitAnnotations map[string]string
validate error
validateAnnotations map[string]string
handles bool
}{
"not handle": {
nil,
nil,
nil,
nil,
false,
},
"allow": {
nil,
nil,
nil,
nil,
true,
},
"allow with annotations": {
nil,
map[string]string{
"plugin.example.com/foo": "bar",
},
nil,
nil,
true,
},
"allow with annotations overwrite": {
nil,
map[string]string{
"plugin.example.com/foo": "bar",
},
nil,
map[string]string{
"plugin.example.com/foo": "bar",
},
true,
},
"forbidden error": {
NewForbidden(attributes(), fmt.Errorf("quota exceeded")),
nil,
NewForbidden(attributes(), fmt.Errorf("quota exceeded")),
nil,
true,
},
"forbidden error with annotations": {
NewForbidden(attributes(), fmt.Errorf("quota exceeded")),
nil,
NewForbidden(attributes(), fmt.Errorf("quota exceeded")),
map[string]string{
"plugin.example.com/foo": "bar",
},
true,
},
"forbidden error with annotations overwrite": {
NewForbidden(attributes(), fmt.Errorf("quota exceeded")),
map[string]string{
"plugin.example.com/foo": "bar",
},
NewForbidden(attributes(), fmt.Errorf("quota exceeded")),
map[string]string{
"plugin.example.com/foo": "bar",
},
true,
},
}
for tcName, tc := range testCases {
var handler Interface = fakeHandler{tc.admit, tc.admitAnnotations, tc.validate, tc.validateAnnotations, tc.handles}
ae := &auditinternal.Event{Level: auditinternal.LevelMetadata}
auditHandler := WithAudit(handler, ae)
a := attributes()
assert.Equal(t, handler.Handles(Create), auditHandler.Handles(Create), tcName+": WithAudit decorator should not effect the return value")
mutator, ok := handler.(MutationInterface)
require.True(t, ok)
auditMutator, ok := auditHandler.(MutationInterface)
require.True(t, ok)
assert.Equal(t, mutator.Admit(a), auditMutator.Admit(a), tcName+": WithAudit decorator should not effect the return value")
validator, ok := handler.(ValidationInterface)
require.True(t, ok)
auditValidator, ok := auditHandler.(ValidationInterface)
require.True(t, ok)
assert.Equal(t, validator.Validate(a), auditValidator.Validate(a), tcName+": WithAudit decorator should not effect the return value")
annotations := make(map[string]string, len(tc.admitAnnotations)+len(tc.validateAnnotations))
for k, v := range tc.admitAnnotations {
annotations[k] = v
}
for k, v := range tc.validateAnnotations {
annotations[k] = v
}
if len(annotations) == 0 {
assert.Nil(t, ae.Annotations, tcName+": unexptected annotations set in audit event")
} else {
assert.Equal(t, annotations, ae.Annotations, tcName+": unexptected annotations set in audit event")
}
}
}
| staging/src/k8s.io/apiserver/pkg/admission/audit_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.00020360162307042629,
0.00017453738837502897,
0.00016678503016009927,
0.00017090447363443673,
0.000009364644029119518
] |
{
"id": 5,
"code_window": [
")\n",
"\n",
"// NewManager returns a new token manager.\n",
"func NewManager(c corev1.CoreV1Interface) *Manager {\n",
"\tm := &Manager{\n",
"\t\tgetToken: func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"func NewManager(c clientset.Interface) *Manager {\n"
],
"file_path": "pkg/kubelet/token/token_manager.go",
"type": "replace",
"edit_start_line_idx": 38
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package internalversion
// EvictionListerExpansion allows custom methods to be added to
// EvictionLister.
type EvictionListerExpansion interface{}
// EvictionNamespaceListerExpansion allows custom methods to be added to
// EvictionNamespaceLister.
type EvictionNamespaceListerExpansion interface{}
// PodSecurityPolicyListerExpansion allows custom methods to be added to
// PodSecurityPolicyLister.
type PodSecurityPolicyListerExpansion interface{}
| pkg/client/listers/policy/internalversion/expansion_generated.go | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.0003189063863828778,
0.00020967761520296335,
0.0001658825931372121,
0.00017696073336992413,
0.00006322605622699484
] |
{
"id": 5,
"code_window": [
")\n",
"\n",
"// NewManager returns a new token manager.\n",
"func NewManager(c corev1.CoreV1Interface) *Manager {\n",
"\tm := &Manager{\n",
"\t\tgetToken: func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"func NewManager(c clientset.Interface) *Manager {\n"
],
"file_path": "pkg/kubelet/token/token_manager.go",
"type": "replace",
"edit_start_line_idx": 38
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"events_client.go",
"generated_expansion.go",
],
importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion",
visibility = ["//visibility:public"],
deps = [
"//pkg/client/clientset_generated/internalclientset/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/client/clientset_generated/internalclientset/typed/events/internalversion/fake:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| pkg/client/clientset_generated/internalclientset/typed/events/internalversion/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/90ba15ee742743fee911ac635fef7c40a406fd5b | [
0.0001761625026119873,
0.00017368499538861215,
0.0001717563281999901,
0.0001734105753712356,
0.000001581817173246236
] |
{
"id": 0,
"code_window": [
"\t\"github.com/ethereum/go-ethereum/core/vm/runtime\"\n",
"\t\"github.com/ethereum/go-ethereum/eth/tracers/logger\"\n",
"\t\"github.com/ethereum/go-ethereum/internal/flags\"\n",
"\t\"github.com/ethereum/go-ethereum/log\"\n",
"\t\"github.com/ethereum/go-ethereum/params\"\n",
"\t\"github.com/urfave/cli/v2\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/ethereum/go-ethereum/trie\"\n"
],
"file_path": "cmd/evm/runner.go",
"type": "add",
"edit_start_line_idx": 42
} | // Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"math/big"
"os"
goruntime "runtime"
"runtime/pprof"
"testing"
"time"
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm/runtime"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2"
)
var runCommand = &cli.Command{
Action: runCmd,
Name: "run",
Usage: "run arbitrary evm binary",
ArgsUsage: "<code>",
Description: `The run command runs arbitrary EVM code.`,
}
// readGenesis will read the given JSON format genesis file and return
// the initialized Genesis structure
func readGenesis(genesisPath string) *core.Genesis {
// Make sure we have a valid genesis JSON
//genesisPath := ctx.Args().First()
if len(genesisPath) == 0 {
utils.Fatalf("Must supply path to genesis JSON file")
}
file, err := os.Open(genesisPath)
if err != nil {
utils.Fatalf("Failed to read genesis file: %v", err)
}
defer file.Close()
genesis := new(core.Genesis)
if err := json.NewDecoder(file).Decode(genesis); err != nil {
utils.Fatalf("invalid genesis file: %v", err)
}
return genesis
}
type execStats struct {
time time.Duration // The execution time.
allocs int64 // The number of heap allocations during execution.
bytesAllocated int64 // The cumulative number of bytes allocated during execution.
}
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []byte, gasLeft uint64, stats execStats, err error) {
if bench {
result := testing.Benchmark(func(b *testing.B) {
for i := 0; i < b.N; i++ {
output, gasLeft, err = execFunc()
}
})
// Get the average execution time from the benchmarking result.
// There are other useful stats here that could be reported.
stats.time = time.Duration(result.NsPerOp())
stats.allocs = result.AllocsPerOp()
stats.bytesAllocated = result.AllocedBytesPerOp()
} else {
var memStatsBefore, memStatsAfter goruntime.MemStats
goruntime.ReadMemStats(&memStatsBefore)
startTime := time.Now()
output, gasLeft, err = execFunc()
stats.time = time.Since(startTime)
goruntime.ReadMemStats(&memStatsAfter)
stats.allocs = int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs)
stats.bytesAllocated = int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc)
}
return output, gasLeft, stats, err
}
func runCmd(ctx *cli.Context) error {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
logconfig := &logger.Config{
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.Bool(DisableStackFlag.Name),
DisableStorage: ctx.Bool(DisableStorageFlag.Name),
EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
Debug: ctx.Bool(DebugFlag.Name),
}
var (
tracer vm.EVMLogger
debugLogger *logger.StructLogger
statedb *state.StateDB
chainConfig *params.ChainConfig
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
genesisConfig *core.Genesis
)
if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.Bool(DebugFlag.Name) {
debugLogger = logger.NewStructLogger(logconfig)
tracer = debugLogger
} else {
debugLogger = logger.NewStructLogger(logconfig)
}
if ctx.String(GenesisFlag.Name) != "" {
gen := readGenesis(ctx.String(GenesisFlag.Name))
genesisConfig = gen
db := rawdb.NewMemoryDatabase()
genesis := gen.MustCommit(db)
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)
chainConfig = gen.Config
} else {
statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
genesisConfig = new(core.Genesis)
}
if ctx.String(SenderFlag.Name) != "" {
sender = common.HexToAddress(ctx.String(SenderFlag.Name))
}
statedb.CreateAccount(sender)
if ctx.String(ReceiverFlag.Name) != "" {
receiver = common.HexToAddress(ctx.String(ReceiverFlag.Name))
}
var code []byte
codeFileFlag := ctx.String(CodeFileFlag.Name)
codeFlag := ctx.String(CodeFlag.Name)
// The '--code' or '--codefile' flag overrides code in state
if codeFileFlag != "" || codeFlag != "" {
var hexcode []byte
if codeFileFlag != "" {
var err error
// If - is specified, it means that code comes from stdin
if codeFileFlag == "-" {
//Try reading from stdin
if hexcode, err = io.ReadAll(os.Stdin); err != nil {
fmt.Printf("Could not load code from stdin: %v\n", err)
os.Exit(1)
}
} else {
// Codefile with hex assembly
if hexcode, err = os.ReadFile(codeFileFlag); err != nil {
fmt.Printf("Could not load code from file: %v\n", err)
os.Exit(1)
}
}
} else {
hexcode = []byte(codeFlag)
}
hexcode = bytes.TrimSpace(hexcode)
if len(hexcode)%2 != 0 {
fmt.Printf("Invalid input length for hex data (%d)\n", len(hexcode))
os.Exit(1)
}
code = common.FromHex(string(hexcode))
} else if fn := ctx.Args().First(); len(fn) > 0 {
// EASM-file to compile
src, err := os.ReadFile(fn)
if err != nil {
return err
}
bin, err := compiler.Compile(fn, src, false)
if err != nil {
return err
}
code = common.Hex2Bytes(bin)
}
initialGas := ctx.Uint64(GasFlag.Name)
if genesisConfig.GasLimit != 0 {
initialGas = genesisConfig.GasLimit
}
runtimeConfig := runtime.Config{
Origin: sender,
State: statedb,
GasLimit: initialGas,
GasPrice: flags.GlobalBig(ctx, PriceFlag.Name),
Value: flags.GlobalBig(ctx, ValueFlag.Name),
Difficulty: genesisConfig.Difficulty,
Time: genesisConfig.Timestamp,
Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
EVMConfig: vm.Config{
Tracer: tracer,
Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name),
},
}
if cpuProfilePath := ctx.String(CPUProfileFlag.Name); cpuProfilePath != "" {
f, err := os.Create(cpuProfilePath)
if err != nil {
fmt.Println("could not create CPU profile: ", err)
os.Exit(1)
}
if err := pprof.StartCPUProfile(f); err != nil {
fmt.Println("could not start CPU profile: ", err)
os.Exit(1)
}
defer pprof.StopCPUProfile()
}
if chainConfig != nil {
runtimeConfig.ChainConfig = chainConfig
} else {
runtimeConfig.ChainConfig = params.AllEthashProtocolChanges
}
var hexInput []byte
if inputFileFlag := ctx.String(InputFileFlag.Name); inputFileFlag != "" {
var err error
if hexInput, err = os.ReadFile(inputFileFlag); err != nil {
fmt.Printf("could not load input from file: %v\n", err)
os.Exit(1)
}
} else {
hexInput = []byte(ctx.String(InputFlag.Name))
}
hexInput = bytes.TrimSpace(hexInput)
if len(hexInput)%2 != 0 {
fmt.Println("input length must be even")
os.Exit(1)
}
input := common.FromHex(string(hexInput))
var execFunc func() ([]byte, uint64, error)
if ctx.Bool(CreateFlag.Name) {
input = append(code, input...)
execFunc = func() ([]byte, uint64, error) {
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
return output, gasLeft, err
}
} else {
if len(code) > 0 {
statedb.SetCode(receiver, code)
}
execFunc = func() ([]byte, uint64, error) {
return runtime.Call(receiver, input, &runtimeConfig)
}
}
bench := ctx.Bool(BenchFlag.Name)
output, leftOverGas, stats, err := timedExec(bench, execFunc)
if ctx.Bool(DumpFlag.Name) {
statedb.Commit(true)
statedb.IntermediateRoot(true)
fmt.Println(string(statedb.Dump(nil)))
}
if memProfilePath := ctx.String(MemProfileFlag.Name); memProfilePath != "" {
f, err := os.Create(memProfilePath)
if err != nil {
fmt.Println("could not create memory profile: ", err)
os.Exit(1)
}
if err := pprof.WriteHeapProfile(f); err != nil {
fmt.Println("could not write memory profile: ", err)
os.Exit(1)
}
f.Close()
}
if ctx.Bool(DebugFlag.Name) {
if debugLogger != nil {
fmt.Fprintln(os.Stderr, "#### TRACE ####")
logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
}
fmt.Fprintln(os.Stderr, "#### LOGS ####")
logger.WriteLogs(os.Stderr, statedb.Logs())
}
if bench || ctx.Bool(StatDumpFlag.Name) {
fmt.Fprintf(os.Stderr, `EVM gas used: %d
execution time: %v
allocations: %d
allocated bytes: %d
`, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated)
}
if tracer == nil {
fmt.Printf("%#x\n", output)
if err != nil {
fmt.Printf(" error: %v\n", err)
}
}
return nil
}
| cmd/evm/runner.go | 1 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.11133332550525665,
0.006911322008818388,
0.00016234336362686008,
0.0001719750725897029,
0.026116136461496353
] |
{
"id": 0,
"code_window": [
"\t\"github.com/ethereum/go-ethereum/core/vm/runtime\"\n",
"\t\"github.com/ethereum/go-ethereum/eth/tracers/logger\"\n",
"\t\"github.com/ethereum/go-ethereum/internal/flags\"\n",
"\t\"github.com/ethereum/go-ethereum/log\"\n",
"\t\"github.com/ethereum/go-ethereum/params\"\n",
"\t\"github.com/urfave/cli/v2\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/ethereum/go-ethereum/trie\"\n"
],
"file_path": "cmd/evm/runner.go",
"type": "add",
"edit_start_line_idx": 42
} | // Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package tracers
import (
"bufio"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"os"
"runtime"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
)
const (
// defaultTraceTimeout is the amount of time a single transaction can execute
// by default before being forcefully aborted.
defaultTraceTimeout = 5 * time.Second
// defaultTraceReexec is the number of blocks the tracer is willing to go back
// and reexecute to produce missing historical state necessary to run a specific
// trace.
defaultTraceReexec = uint64(128)
// defaultTracechainMemLimit is the size of the triedb, at which traceChain
// switches over and tries to use a disk-backed database instead of building
// on top of memory.
// For non-archive nodes, this limit _will_ be overblown, as disk-backed tries
// will only be found every ~15K blocks or so.
defaultTracechainMemLimit = common.StorageSize(500 * 1024 * 1024)
// maximumPendingTraceStates is the maximum number of states allowed waiting
// for tracing. The creation of trace state will be paused if the unused
// trace states exceed this limit.
maximumPendingTraceStates = 128
)
var errTxNotFound = errors.New("transaction not found")
// StateReleaseFunc is used to deallocate resources held by constructing a
// historical state for tracing purposes.
type StateReleaseFunc func()
// Backend interface provides the common API services (that are provided by
// both full and light clients) with access to necessary functions.
type Backend interface {
HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error)
HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error)
BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error)
BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error)
GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error)
RPCGasCap() uint64
ChainConfig() *params.ChainConfig
Engine() consensus.Engine
ChainDb() ethdb.Database
StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error)
StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error)
}
// API is the collection of tracing APIs exposed over the private debugging endpoint.
type API struct {
backend Backend
}
// NewAPI creates a new API definition for the tracing methods of the Ethereum service.
func NewAPI(backend Backend) *API {
return &API{backend: backend}
}
type chainContext struct {
api *API
ctx context.Context
}
func (context *chainContext) Engine() consensus.Engine {
return context.api.backend.Engine()
}
func (context *chainContext) GetHeader(hash common.Hash, number uint64) *types.Header {
header, err := context.api.backend.HeaderByNumber(context.ctx, rpc.BlockNumber(number))
if err != nil {
return nil
}
if header.Hash() == hash {
return header
}
header, err = context.api.backend.HeaderByHash(context.ctx, hash)
if err != nil {
return nil
}
return header
}
// chainContext constructs the context reader which is used by the evm for reading
// the necessary chain context.
func (api *API) chainContext(ctx context.Context) core.ChainContext {
return &chainContext{api: api, ctx: ctx}
}
// blockByNumber is the wrapper of the chain access function offered by the backend.
// It will return an error if the block is not found.
func (api *API) blockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {
block, err := api.backend.BlockByNumber(ctx, number)
if err != nil {
return nil, err
}
if block == nil {
return nil, fmt.Errorf("block #%d not found", number)
}
return block, nil
}
// blockByHash is the wrapper of the chain access function offered by the backend.
// It will return an error if the block is not found.
func (api *API) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
block, err := api.backend.BlockByHash(ctx, hash)
if err != nil {
return nil, err
}
if block == nil {
return nil, fmt.Errorf("block %s not found", hash.Hex())
}
return block, nil
}
// blockByNumberAndHash is the wrapper of the chain access function offered by
// the backend. It will return an error if the block is not found.
//
// Note this function is friendly for the light client which can only retrieve the
// historical(before the CHT) header/block by number.
func (api *API) blockByNumberAndHash(ctx context.Context, number rpc.BlockNumber, hash common.Hash) (*types.Block, error) {
block, err := api.blockByNumber(ctx, number)
if err != nil {
return nil, err
}
if block.Hash() == hash {
return block, nil
}
return api.blockByHash(ctx, hash)
}
// TraceConfig holds extra parameters to trace functions.
type TraceConfig struct {
*logger.Config
Tracer *string
Timeout *string
Reexec *uint64
// Config specific to given tracer. Note struct logger
// config are historically embedded in main object.
TracerConfig json.RawMessage
}
// TraceCallConfig is the config for traceCall API. It holds one more
// field to override the state for tracing.
type TraceCallConfig struct {
TraceConfig
StateOverrides *ethapi.StateOverride
BlockOverrides *ethapi.BlockOverrides
}
// StdTraceConfig holds extra parameters to standard-json trace functions.
type StdTraceConfig struct {
logger.Config
Reexec *uint64
TxHash common.Hash
}
// txTraceResult is the result of a single transaction trace.
type txTraceResult struct {
Result interface{} `json:"result,omitempty"` // Trace results produced by the tracer
Error string `json:"error,omitempty"` // Trace failure produced by the tracer
}
// blockTraceTask represents a single block trace task when an entire chain is
// being traced.
type blockTraceTask struct {
statedb *state.StateDB // Intermediate state prepped for tracing
block *types.Block // Block to trace the transactions from
release StateReleaseFunc // The function to release the held resource for this task
results []*txTraceResult // Trace results produced by the task
}
// blockTraceResult represents the results of tracing a single block when an entire
// chain is being traced.
type blockTraceResult struct {
Block hexutil.Uint64 `json:"block"` // Block number corresponding to this trace
Hash common.Hash `json:"hash"` // Block hash corresponding to this trace
Traces []*txTraceResult `json:"traces"` // Trace results produced by the task
}
// txTraceTask represents a single transaction trace task when an entire block
// is being traced.
type txTraceTask struct {
statedb *state.StateDB // Intermediate state prepped for tracing
index int // Transaction offset in the block
}
// TraceChain returns the structured logs created during the execution of EVM
// between two blocks (excluding start) and returns them as a JSON object.
func (api *API) TraceChain(ctx context.Context, start, end rpc.BlockNumber, config *TraceConfig) (*rpc.Subscription, error) { // Fetch the block interval that we want to trace
from, err := api.blockByNumber(ctx, start)
if err != nil {
return nil, err
}
to, err := api.blockByNumber(ctx, end)
if err != nil {
return nil, err
}
if from.Number().Cmp(to.Number()) >= 0 {
return nil, fmt.Errorf("end block (#%d) needs to come after start block (#%d)", end, start)
}
// Tracing a chain is a **long** operation, only do with subscriptions
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
}
sub := notifier.CreateSubscription()
resCh := api.traceChain(from, to, config, notifier.Closed())
go func() {
for result := range resCh {
notifier.Notify(sub.ID, result)
}
}()
return sub, nil
}
// traceChain configures a new tracer according to the provided configuration, and
// executes all the transactions contained within. The tracing chain range includes
// the end block but excludes the start one. The return value will be one item per
// transaction, dependent on the requested tracer.
// The tracing procedure should be aborted in case the closed signal is received.
func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed <-chan interface{}) chan *blockTraceResult {
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
reexec = *config.Reexec
}
blocks := int(end.NumberU64() - start.NumberU64())
threads := runtime.NumCPU()
if threads > blocks {
threads = blocks
}
var (
pend = new(sync.WaitGroup)
ctx = context.Background()
taskCh = make(chan *blockTraceTask, threads)
resCh = make(chan *blockTraceTask, threads)
tracker = newStateTracker(maximumPendingTraceStates, start.NumberU64())
)
for th := 0; th < threads; th++ {
pend.Add(1)
go func() {
defer pend.Done()
// Fetch and execute the block trace taskCh
for task := range taskCh {
var (
signer = types.MakeSigner(api.backend.ChainConfig(), task.block.Number())
blockCtx = core.NewEVMBlockContext(task.block.Header(), api.chainContext(ctx), nil)
)
// Trace all the transactions contained within
for i, tx := range task.block.Transactions() {
msg, _ := core.TransactionToMessage(tx, signer, task.block.BaseFee())
txctx := &Context{
BlockHash: task.block.Hash(),
BlockNumber: task.block.Number(),
TxIndex: i,
TxHash: tx.Hash(),
}
res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config)
if err != nil {
task.results[i] = &txTraceResult{Error: err.Error()}
log.Warn("Tracing failed", "hash", tx.Hash(), "block", task.block.NumberU64(), "err", err)
break
}
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
task.statedb.Finalise(api.backend.ChainConfig().IsEIP158(task.block.Number()))
task.results[i] = &txTraceResult{Result: res}
}
// Tracing state is used up, queue it for de-referencing. Note the
// state is the parent state of trace block, use block.number-1 as
// the state number.
tracker.releaseState(task.block.NumberU64()-1, task.release)
// Stream the result back to the result catcher or abort on teardown
select {
case resCh <- task:
case <-closed:
return
}
}
}()
}
// Start a goroutine to feed all the blocks into the tracers
go func() {
var (
logged time.Time
begin = time.Now()
number uint64
traced uint64
failed error
statedb *state.StateDB
release StateReleaseFunc
)
// Ensure everything is properly cleaned up on any exit path
defer func() {
close(taskCh)
pend.Wait()
// Clean out any pending release functions of trace states.
tracker.callReleases()
// Log the chain result
switch {
case failed != nil:
log.Warn("Chain tracing failed", "start", start.NumberU64(), "end", end.NumberU64(), "transactions", traced, "elapsed", time.Since(begin), "err", failed)
case number < end.NumberU64():
log.Warn("Chain tracing aborted", "start", start.NumberU64(), "end", end.NumberU64(), "abort", number, "transactions", traced, "elapsed", time.Since(begin))
default:
log.Info("Chain tracing finished", "start", start.NumberU64(), "end", end.NumberU64(), "transactions", traced, "elapsed", time.Since(begin))
}
close(resCh)
}()
// Feed all the blocks both into the tracer, as well as fast process concurrently
for number = start.NumberU64(); number < end.NumberU64(); number++ {
// Stop tracing if interruption was requested
select {
case <-closed:
return
default:
}
// Print progress logs if long enough time elapsed
if time.Since(logged) > 8*time.Second {
logged = time.Now()
log.Info("Tracing chain segment", "start", start.NumberU64(), "end", end.NumberU64(), "current", number, "transactions", traced, "elapsed", time.Since(begin))
}
// Retrieve the parent block and target block for tracing.
block, err := api.blockByNumber(ctx, rpc.BlockNumber(number))
if err != nil {
failed = err
break
}
next, err := api.blockByNumber(ctx, rpc.BlockNumber(number+1))
if err != nil {
failed = err
break
}
// Make sure the state creator doesn't go too far. Too many unprocessed
// trace state may cause the oldest state to become stale(e.g. in
// path-based scheme).
if err = tracker.wait(number); err != nil {
failed = err
break
}
// Prepare the statedb for tracing. Don't use the live database for
// tracing to avoid persisting state junks into the database. Switch
// over to `preferDisk` mode only if the memory usage exceeds the
// limit, the trie database will be reconstructed from scratch only
// if the relevant state is available in disk.
var preferDisk bool
if statedb != nil {
s1, s2 := statedb.Database().TrieDB().Size()
preferDisk = s1+s2 > defaultTracechainMemLimit
}
statedb, release, err = api.backend.StateAtBlock(ctx, block, reexec, statedb, false, preferDisk)
if err != nil {
failed = err
break
}
// Clean out any pending release functions of trace state. Note this
// step must be done after constructing tracing state, because the
// tracing state of block next depends on the parent state and construction
// may fail if we release too early.
tracker.callReleases()
// Send the block over to the concurrent tracers (if not in the fast-forward phase)
txs := next.Transactions()
select {
case taskCh <- &blockTraceTask{statedb: statedb.Copy(), block: next, release: release, results: make([]*txTraceResult, len(txs))}:
case <-closed:
tracker.releaseState(number, release)
return
}
traced += uint64(len(txs))
}
}()
// Keep reading the trace results and stream them to result channel.
retCh := make(chan *blockTraceResult)
go func() {
defer close(retCh)
var (
next = start.NumberU64() + 1
done = make(map[uint64]*blockTraceResult)
)
for res := range resCh {
// Queue up next received result
result := &blockTraceResult{
Block: hexutil.Uint64(res.block.NumberU64()),
Hash: res.block.Hash(),
Traces: res.results,
}
done[uint64(result.Block)] = result
// Stream completed traces to the result channel
for result, ok := done[next]; ok; result, ok = done[next] {
if len(result.Traces) > 0 || next == end.NumberU64() {
// It will be blocked in case the channel consumer doesn't take the
// tracing result in time(e.g. the websocket connect is not stable)
// which will eventually block the entire chain tracer. It's the
// expected behavior to not waste node resources for a non-active user.
retCh <- result
}
delete(done, next)
next++
}
}
}()
return retCh
}
// TraceBlockByNumber returns the structured logs created during the execution of
// EVM and returns them as a JSON object.
func (api *API) TraceBlockByNumber(ctx context.Context, number rpc.BlockNumber, config *TraceConfig) ([]*txTraceResult, error) {
block, err := api.blockByNumber(ctx, number)
if err != nil {
return nil, err
}
return api.traceBlock(ctx, block, config)
}
// TraceBlockByHash returns the structured logs created during the execution of
// EVM and returns them as a JSON object.
func (api *API) TraceBlockByHash(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {
block, err := api.blockByHash(ctx, hash)
if err != nil {
return nil, err
}
return api.traceBlock(ctx, block, config)
}
// TraceBlock returns the structured logs created during the execution of EVM
// and returns them as a JSON object.
func (api *API) TraceBlock(ctx context.Context, blob hexutil.Bytes, config *TraceConfig) ([]*txTraceResult, error) {
block := new(types.Block)
if err := rlp.Decode(bytes.NewReader(blob), block); err != nil {
return nil, fmt.Errorf("could not decode block: %v", err)
}
return api.traceBlock(ctx, block, config)
}
// TraceBlockFromFile returns the structured logs created during the execution of
// EVM and returns them as a JSON object.
func (api *API) TraceBlockFromFile(ctx context.Context, file string, config *TraceConfig) ([]*txTraceResult, error) {
blob, err := os.ReadFile(file)
if err != nil {
return nil, fmt.Errorf("could not read file: %v", err)
}
return api.TraceBlock(ctx, blob, config)
}
// TraceBadBlock returns the structured logs created during the execution of
// EVM against a block pulled from the pool of bad ones and returns them as a JSON
// object.
func (api *API) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {
block := rawdb.ReadBadBlock(api.backend.ChainDb(), hash)
if block == nil {
return nil, fmt.Errorf("bad block %#x not found", hash)
}
return api.traceBlock(ctx, block, config)
}
// StandardTraceBlockToFile dumps the structured logs created during the
// execution of EVM to the local file system and returns a list of files
// to the caller.
func (api *API) StandardTraceBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) {
block, err := api.blockByHash(ctx, hash)
if err != nil {
return nil, err
}
return api.standardTraceBlockToFile(ctx, block, config)
}
// IntermediateRoots executes a block (bad- or canon- or side-), and returns a list
// of intermediate roots: the stateroot after each transaction.
func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config *TraceConfig) ([]common.Hash, error) {
block, _ := api.blockByHash(ctx, hash)
if block == nil {
// Check in the bad blocks
block = rawdb.ReadBadBlock(api.backend.ChainDb(), hash)
}
if block == nil {
return nil, fmt.Errorf("block %#x not found", hash)
}
if block.NumberU64() == 0 {
return nil, errors.New("genesis is not traceable")
}
parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())
if err != nil {
return nil, err
}
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
reexec = *config.Reexec
}
statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false)
if err != nil {
return nil, err
}
defer release()
var (
roots []common.Hash
signer = types.MakeSigner(api.backend.ChainConfig(), block.Number())
chainConfig = api.backend.ChainConfig()
vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
deleteEmptyObjects = chainConfig.IsEIP158(block.Number())
)
for i, tx := range block.Transactions() {
if err := ctx.Err(); err != nil {
return nil, err
}
var (
msg, _ = core.TransactionToMessage(tx, signer, block.BaseFee())
txContext = core.NewEVMTxContext(msg)
vmenv = vm.NewEVM(vmctx, txContext, statedb, chainConfig, vm.Config{})
)
statedb.SetTxContext(tx.Hash(), i)
if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil {
log.Warn("Tracing intermediate roots did not complete", "txindex", i, "txhash", tx.Hash(), "err", err)
// We intentionally don't return the error here: if we do, then the RPC server will not
// return the roots. Most likely, the caller already knows that a certain transaction fails to
// be included, but still want the intermediate roots that led to that point.
// It may happen the tx_N causes an erroneous state, which in turn causes tx_N+M to not be
// executable.
// N.B: This should never happen while tracing canon blocks, only when tracing bad blocks.
return roots, nil
}
// calling IntermediateRoot will internally call Finalize on the state
// so any modifications are written to the trie
roots = append(roots, statedb.IntermediateRoot(deleteEmptyObjects))
}
return roots, nil
}
// StandardTraceBadBlockToFile dumps the structured logs created during the
// execution of EVM against a block pulled from the pool of bad ones to the
// local file system and returns a list of files to the caller.
func (api *API) StandardTraceBadBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) {
block := rawdb.ReadBadBlock(api.backend.ChainDb(), hash)
if block == nil {
return nil, fmt.Errorf("bad block %#x not found", hash)
}
return api.standardTraceBlockToFile(ctx, block, config)
}
// traceBlock configures a new tracer according to the provided configuration, and
// executes all the transactions contained within. The return value will be one item
// per transaction, dependent on the requested tracer.
func (api *API) traceBlock(ctx context.Context, block *types.Block, config *TraceConfig) ([]*txTraceResult, error) {
if block.NumberU64() == 0 {
return nil, errors.New("genesis is not traceable")
}
// Prepare base state
parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())
if err != nil {
return nil, err
}
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
reexec = *config.Reexec
}
statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false)
if err != nil {
return nil, err
}
defer release()
// JS tracers have high overhead. In this case run a parallel
// process that generates states in one thread and traces txes
// in separate worker threads.
if config != nil && config.Tracer != nil && *config.Tracer != "" {
if isJS := DefaultDirectory.IsJS(*config.Tracer); isJS {
return api.traceBlockParallel(ctx, block, statedb, config)
}
}
// Native tracers have low overhead
var (
txs = block.Transactions()
blockHash = block.Hash()
is158 = api.backend.ChainConfig().IsEIP158(block.Number())
blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
signer = types.MakeSigner(api.backend.ChainConfig(), block.Number())
results = make([]*txTraceResult, len(txs))
)
for i, tx := range txs {
// Generate the next state snapshot fast without tracing
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
txctx := &Context{
BlockHash: blockHash,
BlockNumber: block.Number(),
TxIndex: i,
TxHash: tx.Hash(),
}
res, err := api.traceTx(ctx, msg, txctx, blockCtx, statedb, config)
if err != nil {
return nil, err
}
results[i] = &txTraceResult{Result: res}
// Finalize the state so any modifications are written to the trie
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
statedb.Finalise(is158)
}
return results, nil
}
// traceBlockParallel is for tracers that have a high overhead (read JS tracers). One thread
// runs along and executes txes without tracing enabled to generate their prestate.
// Worker threads take the tasks and the prestate and trace them.
func (api *API) traceBlockParallel(ctx context.Context, block *types.Block, statedb *state.StateDB, config *TraceConfig) ([]*txTraceResult, error) {
// Execute all the transaction contained within the block concurrently
var (
txs = block.Transactions()
blockHash = block.Hash()
blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
signer = types.MakeSigner(api.backend.ChainConfig(), block.Number())
results = make([]*txTraceResult, len(txs))
pend sync.WaitGroup
)
threads := runtime.NumCPU()
if threads > len(txs) {
threads = len(txs)
}
jobs := make(chan *txTraceTask, threads)
for th := 0; th < threads; th++ {
pend.Add(1)
go func() {
defer pend.Done()
// Fetch and execute the next transaction trace tasks
for task := range jobs {
msg, _ := core.TransactionToMessage(txs[task.index], signer, block.BaseFee())
txctx := &Context{
BlockHash: blockHash,
BlockNumber: block.Number(),
TxIndex: task.index,
TxHash: txs[task.index].Hash(),
}
res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config)
if err != nil {
results[task.index] = &txTraceResult{Error: err.Error()}
continue
}
results[task.index] = &txTraceResult{Result: res}
}
}()
}
// Feed the transactions into the tracers and return
var failed error
txloop:
for i, tx := range txs {
// Send the trace task over for execution
task := &txTraceTask{statedb: statedb.Copy(), index: i}
select {
case <-ctx.Done():
failed = ctx.Err()
break txloop
case jobs <- task:
}
// Generate the next state snapshot fast without tracing
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
statedb.SetTxContext(tx.Hash(), i)
vmenv := vm.NewEVM(blockCtx, core.NewEVMTxContext(msg), statedb, api.backend.ChainConfig(), vm.Config{})
if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil {
failed = err
break txloop
}
// Finalize the state so any modifications are written to the trie
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
}
close(jobs)
pend.Wait()
// If execution failed in between, abort
if failed != nil {
return nil, failed
}
return results, nil
}
// standardTraceBlockToFile configures a new tracer which uses standard JSON output,
// and traces either a full block or an individual transaction. The return value will
// be one filename per transaction traced.
func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block, config *StdTraceConfig) ([]string, error) {
// If we're tracing a single transaction, make sure it's present
if config != nil && config.TxHash != (common.Hash{}) {
if !containsTx(block, config.TxHash) {
return nil, fmt.Errorf("transaction %#x not found in block", config.TxHash)
}
}
if block.NumberU64() == 0 {
return nil, errors.New("genesis is not traceable")
}
parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())
if err != nil {
return nil, err
}
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
reexec = *config.Reexec
}
statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false)
if err != nil {
return nil, err
}
defer release()
// Retrieve the tracing configurations, or use default values
var (
logConfig logger.Config
txHash common.Hash
)
if config != nil {
logConfig = config.Config
txHash = config.TxHash
}
logConfig.Debug = true
// Execute transaction, either tracing all or just the requested one
var (
dumps []string
signer = types.MakeSigner(api.backend.ChainConfig(), block.Number())
chainConfig = api.backend.ChainConfig()
vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
canon = true
)
// Check if there are any overrides: the caller may wish to enable a future
// fork when executing this block. Note, such overrides are only applicable to the
// actual specified block, not any preceding blocks that we have to go through
// in order to obtain the state.
// Therefore, it's perfectly valid to specify `"futureForkBlock": 0`, to enable `futureFork`
if config != nil && config.Overrides != nil {
// Note: This copies the config, to not screw up the main config
chainConfig, canon = overrideConfig(chainConfig, config.Overrides)
}
for i, tx := range block.Transactions() {
// Prepare the transaction for un-traced execution
var (
msg, _ = core.TransactionToMessage(tx, signer, block.BaseFee())
txContext = core.NewEVMTxContext(msg)
vmConf vm.Config
dump *os.File
writer *bufio.Writer
err error
)
// If the transaction needs tracing, swap out the configs
if tx.Hash() == txHash || txHash == (common.Hash{}) {
// Generate a unique temporary file to dump it into
prefix := fmt.Sprintf("block_%#x-%d-%#x-", block.Hash().Bytes()[:4], i, tx.Hash().Bytes()[:4])
if !canon {
prefix = fmt.Sprintf("%valt-", prefix)
}
dump, err = os.CreateTemp(os.TempDir(), prefix)
if err != nil {
return nil, err
}
dumps = append(dumps, dump.Name())
// Swap out the noop logger to the standard tracer
writer = bufio.NewWriter(dump)
vmConf = vm.Config{
Debug: true,
Tracer: logger.NewJSONLogger(&logConfig, writer),
EnablePreimageRecording: true,
}
}
// Execute the transaction and flush any traces to disk
vmenv := vm.NewEVM(vmctx, txContext, statedb, chainConfig, vmConf)
statedb.SetTxContext(tx.Hash(), i)
_, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit))
if writer != nil {
writer.Flush()
}
if dump != nil {
dump.Close()
log.Info("Wrote standard trace", "file", dump.Name())
}
if err != nil {
return dumps, err
}
// Finalize the state so any modifications are written to the trie
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
// If we've traced the transaction we were looking for, abort
if tx.Hash() == txHash {
break
}
}
return dumps, nil
}
// containsTx reports whether the transaction with a certain hash
// is contained within the specified block.
func containsTx(block *types.Block, hash common.Hash) bool {
for _, tx := range block.Transactions() {
if tx.Hash() == hash {
return true
}
}
return false
}
// TraceTransaction returns the structured logs created during the execution of EVM
// and returns them as a JSON object.
func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) {
tx, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash)
if err != nil {
return nil, err
}
// Only mined txes are supported
if tx == nil {
return nil, errTxNotFound
}
// It shouldn't happen in practice.
if blockNumber == 0 {
return nil, errors.New("genesis is not traceable")
}
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
reexec = *config.Reexec
}
block, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(blockNumber), blockHash)
if err != nil {
return nil, err
}
msg, vmctx, statedb, release, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec)
if err != nil {
return nil, err
}
defer release()
txctx := &Context{
BlockHash: blockHash,
BlockNumber: block.Number(),
TxIndex: int(index),
TxHash: hash,
}
return api.traceTx(ctx, msg, txctx, vmctx, statedb, config)
}
// TraceCall lets you trace a given eth_call. It collects the structured logs
// created during the execution of EVM if the given transaction was added on
// top of the provided block and returns them as a JSON object.
func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, config *TraceCallConfig) (interface{}, error) {
// Try to retrieve the specified block
var (
err error
block *types.Block
)
if hash, ok := blockNrOrHash.Hash(); ok {
block, err = api.blockByHash(ctx, hash)
} else if number, ok := blockNrOrHash.Number(); ok {
if number == rpc.PendingBlockNumber {
// We don't have access to the miner here. For tracing 'future' transactions,
// it can be done with block- and state-overrides instead, which offers
// more flexibility and stability than trying to trace on 'pending', since
// the contents of 'pending' is unstable and probably not a true representation
// of what the next actual block is likely to contain.
return nil, errors.New("tracing on top of pending is not supported")
}
block, err = api.blockByNumber(ctx, number)
} else {
return nil, errors.New("invalid arguments; neither block nor hash specified")
}
if err != nil {
return nil, err
}
// try to recompute the state
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
reexec = *config.Reexec
}
statedb, release, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true, false)
if err != nil {
return nil, err
}
defer release()
vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
// Apply the customization rules if required.
if config != nil {
if err := config.StateOverrides.Apply(statedb); err != nil {
return nil, err
}
config.BlockOverrides.Apply(&vmctx)
}
// Execute the trace
msg, err := args.ToMessage(api.backend.RPCGasCap(), block.BaseFee())
if err != nil {
return nil, err
}
var traceConfig *TraceConfig
if config != nil {
traceConfig = &config.TraceConfig
}
return api.traceTx(ctx, msg, new(Context), vmctx, statedb, traceConfig)
}
// traceTx configures a new tracer according to the provided configuration, and
// executes the given message in the provided environment. The return value will
// be tracer dependent.
func (api *API) traceTx(ctx context.Context, message *core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) {
var (
tracer Tracer
err error
timeout = defaultTraceTimeout
txContext = core.NewEVMTxContext(message)
)
if config == nil {
config = &TraceConfig{}
}
// Default tracer is the struct logger
tracer = logger.NewStructLogger(config.Config)
if config.Tracer != nil {
tracer, err = DefaultDirectory.New(*config.Tracer, txctx, config.TracerConfig)
if err != nil {
return nil, err
}
}
vmenv := vm.NewEVM(vmctx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Debug: true, Tracer: tracer, NoBaseFee: true})
// Define a meaningful timeout of a single transaction trace
if config.Timeout != nil {
if timeout, err = time.ParseDuration(*config.Timeout); err != nil {
return nil, err
}
}
deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
go func() {
<-deadlineCtx.Done()
if errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) {
tracer.Stop(errors.New("execution timeout"))
// Stop evm execution. Note cancellation is not necessarily immediate.
vmenv.Cancel()
}
}()
defer cancel()
// Call Prepare to clear out the statedb access list
statedb.SetTxContext(txctx.TxHash, txctx.TxIndex)
if _, err = core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.GasLimit)); err != nil {
return nil, fmt.Errorf("tracing failed: %w", err)
}
return tracer.GetResult()
}
// APIs return the collection of RPC services the tracer package offers.
func APIs(backend Backend) []rpc.API {
// Append all the local APIs and return
return []rpc.API{
{
Namespace: "debug",
Service: NewAPI(backend),
},
}
}
// overrideConfig returns a copy of original with forks enabled by override enabled,
// along with a boolean that indicates whether the copy is canonical (equivalent to the original).
// Note: the Clique-part is _not_ deep copied
func overrideConfig(original *params.ChainConfig, override *params.ChainConfig) (*params.ChainConfig, bool) {
copy := new(params.ChainConfig)
*copy = *original
canon := true
// Apply forks (after Berlin) to the copy.
if block := override.BerlinBlock; block != nil {
copy.BerlinBlock = block
canon = false
}
if block := override.LondonBlock; block != nil {
copy.LondonBlock = block
canon = false
}
if block := override.ArrowGlacierBlock; block != nil {
copy.ArrowGlacierBlock = block
canon = false
}
if block := override.GrayGlacierBlock; block != nil {
copy.GrayGlacierBlock = block
canon = false
}
if block := override.MergeNetsplitBlock; block != nil {
copy.MergeNetsplitBlock = block
canon = false
}
if timestamp := override.ShanghaiTime; timestamp != nil {
copy.ShanghaiTime = timestamp
canon = false
}
if timestamp := override.CancunTime; timestamp != nil {
copy.CancunTime = timestamp
canon = false
}
if timestamp := override.PragueTime; timestamp != nil {
copy.PragueTime = timestamp
canon = false
}
return copy, canon
}
| eth/tracers/api.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.02747834287583828,
0.0007912700530141592,
0.00016216529184021056,
0.00017084850696846843,
0.0034498395398259163
] |
{
"id": 0,
"code_window": [
"\t\"github.com/ethereum/go-ethereum/core/vm/runtime\"\n",
"\t\"github.com/ethereum/go-ethereum/eth/tracers/logger\"\n",
"\t\"github.com/ethereum/go-ethereum/internal/flags\"\n",
"\t\"github.com/ethereum/go-ethereum/log\"\n",
"\t\"github.com/ethereum/go-ethereum/params\"\n",
"\t\"github.com/urfave/cli/v2\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/ethereum/go-ethereum/trie\"\n"
],
"file_path": "cmd/evm/runner.go",
"type": "add",
"edit_start_line_idx": 42
} | // Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package params
// These are the multipliers for ether denominations.
// Example: To get the wei value of an amount in 'gwei', use
//
// new(big.Int).Mul(value, big.NewInt(params.GWei))
const (
Wei = 1
GWei = 1e9
Ether = 1e18
)
| params/denomination.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.00016970466822385788,
0.00016711378702893853,
0.0001640565023990348,
0.00016758019046392292,
0.000002329319386262796
] |
{
"id": 0,
"code_window": [
"\t\"github.com/ethereum/go-ethereum/core/vm/runtime\"\n",
"\t\"github.com/ethereum/go-ethereum/eth/tracers/logger\"\n",
"\t\"github.com/ethereum/go-ethereum/internal/flags\"\n",
"\t\"github.com/ethereum/go-ethereum/log\"\n",
"\t\"github.com/ethereum/go-ethereum/params\"\n",
"\t\"github.com/urfave/cli/v2\"\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/ethereum/go-ethereum/trie\"\n"
],
"file_path": "cmd/evm/runner.go",
"type": "add",
"edit_start_line_idx": 42
} | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bn256
import (
"crypto/rand"
"testing"
"github.com/stretchr/testify/require"
)
func TestExamplePair(t *testing.T) {
// This implements the tripartite Diffie-Hellman algorithm from "A One
// Round Protocol for Tripartite Diffie-Hellman", A. Joux.
// http://www.springerlink.com/content/cddc57yyva0hburb/fulltext.pdf
// Each of three parties, a, b and c, generate a private value.
a, _ := rand.Int(rand.Reader, Order)
b, _ := rand.Int(rand.Reader, Order)
c, _ := rand.Int(rand.Reader, Order)
// Then each party calculates g₁ and g₂ times their private value.
pa := new(G1).ScalarBaseMult(a)
qa := new(G2).ScalarBaseMult(a)
pb := new(G1).ScalarBaseMult(b)
qb := new(G2).ScalarBaseMult(b)
pc := new(G1).ScalarBaseMult(c)
qc := new(G2).ScalarBaseMult(c)
// Now each party exchanges its public values with the other two and
// all parties can calculate the shared key.
k1 := Pair(pb, qc)
k1.ScalarMult(k1, a)
k2 := Pair(pc, qa)
k2.ScalarMult(k2, b)
k3 := Pair(pa, qb)
k3.ScalarMult(k3, c)
// k1, k2 and k3 will all be equal.
require.Equal(t, k1, k2)
require.Equal(t, k1, k3)
require.Equal(t, len(np), 4) //Avoid gometalinter varcheck err on np
}
| crypto/bn256/cloudflare/example_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.000176333385752514,
0.00017420364019926637,
0.00017114769434556365,
0.00017474693595431745,
0.000001675938506195962
] |
{
"id": 1,
"code_window": [
"\t\tsender = common.BytesToAddress([]byte(\"sender\"))\n",
"\t\treceiver = common.BytesToAddress([]byte(\"receiver\"))\n",
"\t\tgenesisConfig *core.Genesis\n",
"\t)\n",
"\tif ctx.Bool(MachineFlag.Name) {\n",
"\t\ttracer = logger.NewJSONLogger(logconfig, os.Stdout)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tpreimages = ctx.Bool(DumpFlag.Name)\n"
],
"file_path": "cmd/evm/runner.go",
"type": "add",
"edit_start_line_idx": 127
} | // Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"math/big"
"os"
goruntime "runtime"
"runtime/pprof"
"testing"
"time"
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm/runtime"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2"
)
var runCommand = &cli.Command{
Action: runCmd,
Name: "run",
Usage: "run arbitrary evm binary",
ArgsUsage: "<code>",
Description: `The run command runs arbitrary EVM code.`,
}
// readGenesis will read the given JSON format genesis file and return
// the initialized Genesis structure
func readGenesis(genesisPath string) *core.Genesis {
// Make sure we have a valid genesis JSON
//genesisPath := ctx.Args().First()
if len(genesisPath) == 0 {
utils.Fatalf("Must supply path to genesis JSON file")
}
file, err := os.Open(genesisPath)
if err != nil {
utils.Fatalf("Failed to read genesis file: %v", err)
}
defer file.Close()
genesis := new(core.Genesis)
if err := json.NewDecoder(file).Decode(genesis); err != nil {
utils.Fatalf("invalid genesis file: %v", err)
}
return genesis
}
type execStats struct {
time time.Duration // The execution time.
allocs int64 // The number of heap allocations during execution.
bytesAllocated int64 // The cumulative number of bytes allocated during execution.
}
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []byte, gasLeft uint64, stats execStats, err error) {
if bench {
result := testing.Benchmark(func(b *testing.B) {
for i := 0; i < b.N; i++ {
output, gasLeft, err = execFunc()
}
})
// Get the average execution time from the benchmarking result.
// There are other useful stats here that could be reported.
stats.time = time.Duration(result.NsPerOp())
stats.allocs = result.AllocsPerOp()
stats.bytesAllocated = result.AllocedBytesPerOp()
} else {
var memStatsBefore, memStatsAfter goruntime.MemStats
goruntime.ReadMemStats(&memStatsBefore)
startTime := time.Now()
output, gasLeft, err = execFunc()
stats.time = time.Since(startTime)
goruntime.ReadMemStats(&memStatsAfter)
stats.allocs = int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs)
stats.bytesAllocated = int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc)
}
return output, gasLeft, stats, err
}
func runCmd(ctx *cli.Context) error {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
logconfig := &logger.Config{
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.Bool(DisableStackFlag.Name),
DisableStorage: ctx.Bool(DisableStorageFlag.Name),
EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
Debug: ctx.Bool(DebugFlag.Name),
}
var (
tracer vm.EVMLogger
debugLogger *logger.StructLogger
statedb *state.StateDB
chainConfig *params.ChainConfig
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
genesisConfig *core.Genesis
)
if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.Bool(DebugFlag.Name) {
debugLogger = logger.NewStructLogger(logconfig)
tracer = debugLogger
} else {
debugLogger = logger.NewStructLogger(logconfig)
}
if ctx.String(GenesisFlag.Name) != "" {
gen := readGenesis(ctx.String(GenesisFlag.Name))
genesisConfig = gen
db := rawdb.NewMemoryDatabase()
genesis := gen.MustCommit(db)
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)
chainConfig = gen.Config
} else {
statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
genesisConfig = new(core.Genesis)
}
if ctx.String(SenderFlag.Name) != "" {
sender = common.HexToAddress(ctx.String(SenderFlag.Name))
}
statedb.CreateAccount(sender)
if ctx.String(ReceiverFlag.Name) != "" {
receiver = common.HexToAddress(ctx.String(ReceiverFlag.Name))
}
var code []byte
codeFileFlag := ctx.String(CodeFileFlag.Name)
codeFlag := ctx.String(CodeFlag.Name)
// The '--code' or '--codefile' flag overrides code in state
if codeFileFlag != "" || codeFlag != "" {
var hexcode []byte
if codeFileFlag != "" {
var err error
// If - is specified, it means that code comes from stdin
if codeFileFlag == "-" {
//Try reading from stdin
if hexcode, err = io.ReadAll(os.Stdin); err != nil {
fmt.Printf("Could not load code from stdin: %v\n", err)
os.Exit(1)
}
} else {
// Codefile with hex assembly
if hexcode, err = os.ReadFile(codeFileFlag); err != nil {
fmt.Printf("Could not load code from file: %v\n", err)
os.Exit(1)
}
}
} else {
hexcode = []byte(codeFlag)
}
hexcode = bytes.TrimSpace(hexcode)
if len(hexcode)%2 != 0 {
fmt.Printf("Invalid input length for hex data (%d)\n", len(hexcode))
os.Exit(1)
}
code = common.FromHex(string(hexcode))
} else if fn := ctx.Args().First(); len(fn) > 0 {
// EASM-file to compile
src, err := os.ReadFile(fn)
if err != nil {
return err
}
bin, err := compiler.Compile(fn, src, false)
if err != nil {
return err
}
code = common.Hex2Bytes(bin)
}
initialGas := ctx.Uint64(GasFlag.Name)
if genesisConfig.GasLimit != 0 {
initialGas = genesisConfig.GasLimit
}
runtimeConfig := runtime.Config{
Origin: sender,
State: statedb,
GasLimit: initialGas,
GasPrice: flags.GlobalBig(ctx, PriceFlag.Name),
Value: flags.GlobalBig(ctx, ValueFlag.Name),
Difficulty: genesisConfig.Difficulty,
Time: genesisConfig.Timestamp,
Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
EVMConfig: vm.Config{
Tracer: tracer,
Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name),
},
}
if cpuProfilePath := ctx.String(CPUProfileFlag.Name); cpuProfilePath != "" {
f, err := os.Create(cpuProfilePath)
if err != nil {
fmt.Println("could not create CPU profile: ", err)
os.Exit(1)
}
if err := pprof.StartCPUProfile(f); err != nil {
fmt.Println("could not start CPU profile: ", err)
os.Exit(1)
}
defer pprof.StopCPUProfile()
}
if chainConfig != nil {
runtimeConfig.ChainConfig = chainConfig
} else {
runtimeConfig.ChainConfig = params.AllEthashProtocolChanges
}
var hexInput []byte
if inputFileFlag := ctx.String(InputFileFlag.Name); inputFileFlag != "" {
var err error
if hexInput, err = os.ReadFile(inputFileFlag); err != nil {
fmt.Printf("could not load input from file: %v\n", err)
os.Exit(1)
}
} else {
hexInput = []byte(ctx.String(InputFlag.Name))
}
hexInput = bytes.TrimSpace(hexInput)
if len(hexInput)%2 != 0 {
fmt.Println("input length must be even")
os.Exit(1)
}
input := common.FromHex(string(hexInput))
var execFunc func() ([]byte, uint64, error)
if ctx.Bool(CreateFlag.Name) {
input = append(code, input...)
execFunc = func() ([]byte, uint64, error) {
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
return output, gasLeft, err
}
} else {
if len(code) > 0 {
statedb.SetCode(receiver, code)
}
execFunc = func() ([]byte, uint64, error) {
return runtime.Call(receiver, input, &runtimeConfig)
}
}
bench := ctx.Bool(BenchFlag.Name)
output, leftOverGas, stats, err := timedExec(bench, execFunc)
if ctx.Bool(DumpFlag.Name) {
statedb.Commit(true)
statedb.IntermediateRoot(true)
fmt.Println(string(statedb.Dump(nil)))
}
if memProfilePath := ctx.String(MemProfileFlag.Name); memProfilePath != "" {
f, err := os.Create(memProfilePath)
if err != nil {
fmt.Println("could not create memory profile: ", err)
os.Exit(1)
}
if err := pprof.WriteHeapProfile(f); err != nil {
fmt.Println("could not write memory profile: ", err)
os.Exit(1)
}
f.Close()
}
if ctx.Bool(DebugFlag.Name) {
if debugLogger != nil {
fmt.Fprintln(os.Stderr, "#### TRACE ####")
logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
}
fmt.Fprintln(os.Stderr, "#### LOGS ####")
logger.WriteLogs(os.Stderr, statedb.Logs())
}
if bench || ctx.Bool(StatDumpFlag.Name) {
fmt.Fprintf(os.Stderr, `EVM gas used: %d
execution time: %v
allocations: %d
allocated bytes: %d
`, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated)
}
if tracer == nil {
fmt.Printf("%#x\n", output)
if err != nil {
fmt.Printf(" error: %v\n", err)
}
}
return nil
}
| cmd/evm/runner.go | 1 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.998401939868927,
0.14900127053260803,
0.00016488705296069384,
0.00017846503760665655,
0.34119004011154175
] |
{
"id": 1,
"code_window": [
"\t\tsender = common.BytesToAddress([]byte(\"sender\"))\n",
"\t\treceiver = common.BytesToAddress([]byte(\"receiver\"))\n",
"\t\tgenesisConfig *core.Genesis\n",
"\t)\n",
"\tif ctx.Bool(MachineFlag.Name) {\n",
"\t\ttracer = logger.NewJSONLogger(logconfig, os.Stdout)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tpreimages = ctx.Bool(DumpFlag.Name)\n"
],
"file_path": "cmd/evm/runner.go",
"type": "add",
"edit_start_line_idx": 127
} | [{"X":"0000000000000000000000000000000000000000000000000000000000000000","Y":"0000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000000","Y":"0000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"0000000000000000000000000000000000000000000000000000000000000000","Y":"0000000000000000000000000000000000000000000000000000000000000005","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"0000000000000000000000000000000000000000000000000000000000000000","Y":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"0000000000000000000000000000000000000000000000000000000000000000","Y":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"0000000000000000000000000000000000000000000000000000000000000000","Y":"8000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000000","Y":"8000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000000","Y":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000000","Y":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000001","Y":"0000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000001","Y":"0000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000001","Y":"0000000000000000000000000000000000000000000000000000000000000005","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"0000000000000000000000000000000000000000000000000000000000000001","Y":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"0000000000000000000000000000000000000000000000000000000000000001","Y":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"0000000000000000000000000000000000000000000000000000000000000001","Y":"8000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000001","Y":"8000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000001","Y":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000001","Y":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000005","Y":"0000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000005","Y":"0000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000005","Y":"0000000000000000000000000000000000000000000000000000000000000005","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000005","Y":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"0000000000000000000000000000000000000000000000000000000000000005","Y":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"0000000000000000000000000000000000000000000000000000000000000005","Y":"8000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000005","Y":"8000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000005","Y":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"0000000000000000000000000000000000000000000000000000000000000005","Y":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Y":"0000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Y":"0000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Y":"0000000000000000000000000000000000000000000000000000000000000005","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Y":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Y":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Y":"8000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Y":"8000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Y":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Y":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"0000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"0000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"0000000000000000000000000000000000000000000000000000000000000005","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"8000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"8000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"8000000000000000000000000000000000000000000000000000000000000000","Y":"0000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000000","Y":"0000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000000","Y":"0000000000000000000000000000000000000000000000000000000000000005","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000000","Y":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000000","Y":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000000","Y":"8000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"8000000000000000000000000000000000000000000000000000000000000000","Y":"8000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000000","Y":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000000","Y":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000001","Y":"0000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000001","Y":"0000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000001","Y":"0000000000000000000000000000000000000000000000000000000000000005","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000001","Y":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000001","Y":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000001","Y":"8000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"8000000000000000000000000000000000000000000000000000000000000001","Y":"8000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"8000000000000000000000000000000000000000000000000000000000000001","Y":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"8000000000000000000000000000000000000000000000000000000000000001","Y":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Y":"0000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Y":"0000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Y":"0000000000000000000000000000000000000000000000000000000000000005","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Y":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Y":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Y":"8000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Y":"8000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Y":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Y":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"0000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"0000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"0000000000000000000000000000000000000000000000000000000000000005","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000001"},{"X":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"8000000000000000000000000000000000000000000000000000000000000000","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"8000000000000000000000000000000000000000000000000000000000000001","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb","Expected":"0000000000000000000000000000000000000000000000000000000000000000"},{"X":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Y":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","Expected":"0000000000000000000000000000000000000000000000000000000000000000"}] | core/vm/testdata/testcases_sgt.json | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.008087251335382462,
0.008087251335382462,
0.008087251335382462,
0.008087251335382462,
0
] |
{
"id": 1,
"code_window": [
"\t\tsender = common.BytesToAddress([]byte(\"sender\"))\n",
"\t\treceiver = common.BytesToAddress([]byte(\"receiver\"))\n",
"\t\tgenesisConfig *core.Genesis\n",
"\t)\n",
"\tif ctx.Bool(MachineFlag.Name) {\n",
"\t\ttracer = logger.NewJSONLogger(logconfig, os.Stdout)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tpreimages = ctx.Bool(DumpFlag.Name)\n"
],
"file_path": "cmd/evm/runner.go",
"type": "add",
"edit_start_line_idx": 127
} | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package graphql
const schema string = `
# Bytes32 is a 32 byte binary string, represented as 0x-prefixed hexadecimal.
scalar Bytes32
# Address is a 20 byte Ethereum address, represented as 0x-prefixed hexadecimal.
scalar Address
# Bytes is an arbitrary length binary string, represented as 0x-prefixed hexadecimal.
# An empty byte string is represented as '0x'. Byte strings must have an even number of hexadecimal nybbles.
scalar Bytes
# BigInt is a large integer. Input is accepted as either a JSON number or as a string.
# Strings may be either decimal or 0x-prefixed hexadecimal. Output values are all
# 0x-prefixed hexadecimal.
scalar BigInt
# Long is a 64 bit unsigned integer.
scalar Long
schema {
query: Query
mutation: Mutation
}
# Account is an Ethereum account at a particular block.
type Account {
# Address is the address owning the account.
address: Address!
# Balance is the balance of the account, in wei.
balance: BigInt!
# TransactionCount is the number of transactions sent from this account,
# or in the case of a contract, the number of contracts created. Otherwise
# known as the nonce.
transactionCount: Long!
# Code contains the smart contract code for this account, if the account
# is a (non-self-destructed) contract.
code: Bytes!
# Storage provides access to the storage of a contract account, indexed
# by its 32 byte slot identifier.
storage(slot: Bytes32!): Bytes32!
}
# Log is an Ethereum event log.
type Log {
# Index is the index of this log in the block.
index: Int!
# Account is the account which generated this log - this will always
# be a contract account.
account(block: Long): Account!
# Topics is a list of 0-4 indexed topics for the log.
topics: [Bytes32!]!
# Data is unindexed data for this log.
data: Bytes!
# Transaction is the transaction that generated this log entry.
transaction: Transaction!
}
#EIP-2718
type AccessTuple{
address: Address!
storageKeys : [Bytes32!]!
}
# Transaction is an Ethereum transaction.
type Transaction {
# Hash is the hash of this transaction.
hash: Bytes32!
# Nonce is the nonce of the account this transaction was generated with.
nonce: Long!
# Index is the index of this transaction in the parent block. This will
# be null if the transaction has not yet been mined.
index: Int
# From is the account that sent this transaction - this will always be
# an externally owned account.
from(block: Long): Account!
# To is the account the transaction was sent to. This is null for
# contract-creating transactions.
to(block: Long): Account
# Value is the value, in wei, sent along with this transaction.
value: BigInt!
# GasPrice is the price offered to miners for gas, in wei per unit.
gasPrice: BigInt!
# MaxFeePerGas is the maximum fee per gas offered to include a transaction, in wei.
maxFeePerGas: BigInt
# MaxPriorityFeePerGas is the maximum miner tip per gas offered to include a transaction, in wei.
maxPriorityFeePerGas: BigInt
# EffectiveTip is the actual amount of reward going to miner after considering the max fee cap.
effectiveTip: BigInt
# Gas is the maximum amount of gas this transaction can consume.
gas: Long!
# InputData is the data supplied to the target of the transaction.
inputData: Bytes!
# Block is the block this transaction was mined in. This will be null if
# the transaction has not yet been mined.
block: Block
# Status is the return status of the transaction. This will be 1 if the
# transaction succeeded, or 0 if it failed (due to a revert, or due to
# running out of gas). If the transaction has not yet been mined, this
# field will be null.
status: Long
# GasUsed is the amount of gas that was used processing this transaction.
# If the transaction has not yet been mined, this field will be null.
gasUsed: Long
# CumulativeGasUsed is the total gas used in the block up to and including
# this transaction. If the transaction has not yet been mined, this field
# will be null.
cumulativeGasUsed: Long
# EffectiveGasPrice is actual value per gas deducted from the sender's
# account. Before EIP-1559, this is equal to the transaction's gas price.
# After EIP-1559, it is baseFeePerGas + min(maxFeePerGas - baseFeePerGas,
# maxPriorityFeePerGas). Legacy transactions and EIP-2930 transactions are
# coerced into the EIP-1559 format by setting both maxFeePerGas and
# maxPriorityFeePerGas as the transaction's gas price.
effectiveGasPrice: BigInt
# CreatedContract is the account that was created by a contract creation
# transaction. If the transaction was not a contract creation transaction,
# or it has not yet been mined, this field will be null.
createdContract(block: Long): Account
# Logs is a list of log entries emitted by this transaction. If the
# transaction has not yet been mined, this field will be null.
logs: [Log!]
r: BigInt!
s: BigInt!
v: BigInt!
# Envelope transaction support
type: Int
accessList: [AccessTuple!]
# Raw is the canonical encoding of the transaction.
# For legacy transactions, it returns the RLP encoding.
# For EIP-2718 typed transactions, it returns the type and payload.
raw: Bytes!
# RawReceipt is the canonical encoding of the receipt. For post EIP-2718 typed transactions
# this is equivalent to TxType || ReceiptEncoding.
rawReceipt: Bytes!
}
# BlockFilterCriteria encapsulates log filter criteria for a filter applied
# to a single block.
input BlockFilterCriteria {
# Addresses is list of addresses that are of interest. If this list is
# empty, results will not be filtered by address.
addresses: [Address!]
# Topics list restricts matches to particular event topics. Each event has a list
# of topics. Topics matches a prefix of that list. An empty element array matches any
# topic. Non-empty elements represent an alternative that matches any of the
# contained topics.
#
# Examples:
# - [] or nil matches any topic list
# - [[A]] matches topic A in first position
# - [[], [B]] matches any topic in first position, B in second position
# - [[A], [B]] matches topic A in first position, B in second position
# - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
topics: [[Bytes32!]!]
}
# Block is an Ethereum block.
type Block {
# Number is the number of this block, starting at 0 for the genesis block.
number: Long!
# Hash is the block hash of this block.
hash: Bytes32!
# Parent is the parent block of this block.
parent: Block
# Nonce is the block nonce, an 8 byte sequence determined by the miner.
nonce: Bytes!
# TransactionsRoot is the keccak256 hash of the root of the trie of transactions in this block.
transactionsRoot: Bytes32!
# TransactionCount is the number of transactions in this block. if
# transactions are not available for this block, this field will be null.
transactionCount: Int
# StateRoot is the keccak256 hash of the state trie after this block was processed.
stateRoot: Bytes32!
# ReceiptsRoot is the keccak256 hash of the trie of transaction receipts in this block.
receiptsRoot: Bytes32!
# Miner is the account that mined this block.
miner(block: Long): Account!
# ExtraData is an arbitrary data field supplied by the miner.
extraData: Bytes!
# GasLimit is the maximum amount of gas that was available to transactions in this block.
gasLimit: Long!
# GasUsed is the amount of gas that was used executing transactions in this block.
gasUsed: Long!
# BaseFeePerGas is the fee per unit of gas burned by the protocol in this block.
baseFeePerGas: BigInt
# NextBaseFeePerGas is the fee per unit of gas which needs to be burned in the next block.
nextBaseFeePerGas: BigInt
# Timestamp is the unix timestamp at which this block was mined.
timestamp: Long!
# LogsBloom is a bloom filter that can be used to check if a block may
# contain log entries matching a filter.
logsBloom: Bytes!
# MixHash is the hash that was used as an input to the PoW process.
mixHash: Bytes32!
# Difficulty is a measure of the difficulty of mining this block.
difficulty: BigInt!
# TotalDifficulty is the sum of all difficulty values up to and including
# this block.
totalDifficulty: BigInt!
# OmmerCount is the number of ommers (AKA uncles) associated with this
# block. If ommers are unavailable, this field will be null.
ommerCount: Int
# Ommers is a list of ommer (AKA uncle) blocks associated with this block.
# If ommers are unavailable, this field will be null. Depending on your
# node, the transactions, transactionAt, transactionCount, ommers,
# ommerCount and ommerAt fields may not be available on any ommer blocks.
ommers: [Block]
# OmmerAt returns the ommer (AKA uncle) at the specified index. If ommers
# are unavailable, or the index is out of bounds, this field will be null.
ommerAt(index: Int!): Block
# OmmerHash is the keccak256 hash of all the ommers (AKA uncles)
# associated with this block.
ommerHash: Bytes32!
# Transactions is a list of transactions associated with this block. If
# transactions are unavailable for this block, this field will be null.
transactions: [Transaction!]
# TransactionAt returns the transaction at the specified index. If
# transactions are unavailable for this block, or if the index is out of
# bounds, this field will be null.
transactionAt(index: Int!): Transaction
# Logs returns a filtered set of logs from this block.
logs(filter: BlockFilterCriteria!): [Log!]!
# Account fetches an Ethereum account at the current block's state.
account(address: Address!): Account!
# Call executes a local call operation at the current block's state.
call(data: CallData!): CallResult
# EstimateGas estimates the amount of gas that will be required for
# successful execution of a transaction at the current block's state.
estimateGas(data: CallData!): Long!
# RawHeader is the RLP encoding of the block's header.
rawHeader: Bytes!
# Raw is the RLP encoding of the block.
raw: Bytes!
}
# CallData represents the data associated with a local contract call.
# All fields are optional.
input CallData {
# From is the address making the call.
from: Address
# To is the address the call is sent to.
to: Address
# Gas is the amount of gas sent with the call.
gas: Long
# GasPrice is the price, in wei, offered for each unit of gas.
gasPrice: BigInt
# MaxFeePerGas is the maximum fee per gas offered, in wei.
maxFeePerGas: BigInt
# MaxPriorityFeePerGas is the maximum miner tip per gas offered, in wei.
maxPriorityFeePerGas: BigInt
# Value is the value, in wei, sent along with the call.
value: BigInt
# Data is the data sent to the callee.
data: Bytes
}
# CallResult is the result of a local call operation.
type CallResult {
# Data is the return data of the called contract.
data: Bytes!
# GasUsed is the amount of gas used by the call, after any refunds.
gasUsed: Long!
# Status is the result of the call - 1 for success or 0 for failure.
status: Long!
}
# FilterCriteria encapsulates log filter criteria for searching log entries.
input FilterCriteria {
# FromBlock is the block at which to start searching, inclusive. Defaults
# to the latest block if not supplied.
fromBlock: Long
# ToBlock is the block at which to stop searching, inclusive. Defaults
# to the latest block if not supplied.
toBlock: Long
# Addresses is a list of addresses that are of interest. If this list is
# empty, results will not be filtered by address.
addresses: [Address!]
# Topics list restricts matches to particular event topics. Each event has a list
# of topics. Topics matches a prefix of that list. An empty element array matches any
# topic. Non-empty elements represent an alternative that matches any of the
# contained topics.
#
# Examples:
# - [] or nil matches any topic list
# - [[A]] matches topic A in first position
# - [[], [B]] matches any topic in first position, B in second position
# - [[A], [B]] matches topic A in first position, B in second position
# - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
topics: [[Bytes32!]!]
}
# SyncState contains the current synchronisation state of the client.
type SyncState{
# StartingBlock is the block number at which synchronisation started.
startingBlock: Long!
# CurrentBlock is the point at which synchronisation has presently reached.
currentBlock: Long!
# HighestBlock is the latest known block number.
highestBlock: Long!
}
# Pending represents the current pending state.
type Pending {
# TransactionCount is the number of transactions in the pending state.
transactionCount: Int!
# Transactions is a list of transactions in the current pending state.
transactions: [Transaction!]
# Account fetches an Ethereum account for the pending state.
account(address: Address!): Account!
# Call executes a local call operation for the pending state.
call(data: CallData!): CallResult
# EstimateGas estimates the amount of gas that will be required for
# successful execution of a transaction for the pending state.
estimateGas(data: CallData!): Long!
}
type Query {
# Block fetches an Ethereum block by number or by hash. If neither is
# supplied, the most recent known block is returned.
block(number: Long, hash: Bytes32): Block
# Blocks returns all the blocks between two numbers, inclusive. If
# to is not supplied, it defaults to the most recent known block.
blocks(from: Long, to: Long): [Block!]!
# Pending returns the current pending state.
pending: Pending!
# Transaction returns a transaction specified by its hash.
transaction(hash: Bytes32!): Transaction
# Logs returns log entries matching the provided filter.
logs(filter: FilterCriteria!): [Log!]!
# GasPrice returns the node's estimate of a gas price sufficient to
# ensure a transaction is mined in a timely fashion.
gasPrice: BigInt!
# MaxPriorityFeePerGas returns the node's estimate of a gas tip sufficient
# to ensure a transaction is mined in a timely fashion.
maxPriorityFeePerGas: BigInt!
# Syncing returns information on the current synchronisation state.
syncing: SyncState
# ChainID returns the current chain ID for transaction replay protection.
chainID: BigInt!
}
type Mutation {
# SendRawTransaction sends an RLP-encoded transaction to the network.
sendRawTransaction(data: Bytes!): Bytes32!
}
`
| graphql/schema.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.0007492739823646843,
0.00020464560657273978,
0.0001643710711505264,
0.00017116221715696156,
0.00011183507740497589
] |
{
"id": 1,
"code_window": [
"\t\tsender = common.BytesToAddress([]byte(\"sender\"))\n",
"\t\treceiver = common.BytesToAddress([]byte(\"receiver\"))\n",
"\t\tgenesisConfig *core.Genesis\n",
"\t)\n",
"\tif ctx.Bool(MachineFlag.Name) {\n",
"\t\ttracer = logger.NewJSONLogger(logconfig, os.Stdout)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tpreimages = ctx.Bool(DumpFlag.Name)\n"
],
"file_path": "cmd/evm/runner.go",
"type": "add",
"edit_start_line_idx": 127
} | // Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package event_test
import (
"fmt"
"github.com/ethereum/go-ethereum/event"
)
func ExampleFeed_acknowledgedEvents() {
// This example shows how the return value of Send can be used for request/reply
// interaction between event consumers and producers.
var feed event.Feed
type ackedEvent struct {
i int
ack chan<- struct{}
}
// Consumers wait for events on the feed and acknowledge processing.
done := make(chan struct{})
defer close(done)
for i := 0; i < 3; i++ {
ch := make(chan ackedEvent, 100)
sub := feed.Subscribe(ch)
go func() {
defer sub.Unsubscribe()
for {
select {
case ev := <-ch:
fmt.Println(ev.i) // "process" the event
ev.ack <- struct{}{}
case <-done:
return
}
}
}()
}
// The producer sends values of type ackedEvent with increasing values of i.
// It waits for all consumers to acknowledge before sending the next event.
for i := 0; i < 3; i++ {
acksignal := make(chan struct{})
n := feed.Send(ackedEvent{i, acksignal})
for ack := 0; ack < n; ack++ {
<-acksignal
}
}
// Output:
// 0
// 0
// 0
// 1
// 1
// 1
// 2
// 2
// 2
}
| event/example_feed_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.00017789486446417868,
0.00017356465104967356,
0.00016397189756389707,
0.0001743403699947521,
0.000004073239779245341
] |
{
"id": 2,
"code_window": [
"\t\tgen := readGenesis(ctx.String(GenesisFlag.Name))\n",
"\t\tgenesisConfig = gen\n",
"\t\tdb := rawdb.NewMemoryDatabase()\n",
"\t\tgenesis := gen.MustCommit(db)\n",
"\t\tstatedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)\n",
"\t\tchainConfig = gen.Config\n",
"\t} else {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\tsdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: preimages})\n",
"\t\tstatedb, _ = state.New(genesis.Root(), sdb, nil)\n"
],
"file_path": "cmd/evm/runner.go",
"type": "replace",
"edit_start_line_idx": 141
} | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package tests
import (
"encoding/hex"
"encoding/json"
"fmt"
"math/big"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
// StateTest checks transaction processing without block context.
// See https://github.com/ethereum/EIPs/issues/176 for the test format specification.
type StateTest struct {
json stJSON
}
// StateSubtest selects a specific configuration of a General State Test.
type StateSubtest struct {
Fork string
Index int
}
func (t *StateTest) UnmarshalJSON(in []byte) error {
return json.Unmarshal(in, &t.json)
}
type stJSON struct {
Env stEnv `json:"env"`
Pre core.GenesisAlloc `json:"pre"`
Tx stTransaction `json:"transaction"`
Out hexutil.Bytes `json:"out"`
Post map[string][]stPostState `json:"post"`
}
type stPostState struct {
Root common.UnprefixedHash `json:"hash"`
Logs common.UnprefixedHash `json:"logs"`
TxBytes hexutil.Bytes `json:"txbytes"`
ExpectException string `json:"expectException"`
Indexes struct {
Data int `json:"data"`
Gas int `json:"gas"`
Value int `json:"value"`
}
}
//go:generate go run github.com/fjl/gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go
type stEnv struct {
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
Difficulty *big.Int `json:"currentDifficulty" gencodec:"optional"`
Random *big.Int `json:"currentRandom" gencodec:"optional"`
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
Number uint64 `json:"currentNumber" gencodec:"required"`
Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
BaseFee *big.Int `json:"currentBaseFee" gencodec:"optional"`
}
type stEnvMarshaling struct {
Coinbase common.UnprefixedAddress
Difficulty *math.HexOrDecimal256
Random *math.HexOrDecimal256
GasLimit math.HexOrDecimal64
Number math.HexOrDecimal64
Timestamp math.HexOrDecimal64
BaseFee *math.HexOrDecimal256
}
//go:generate go run github.com/fjl/gencodec -type stTransaction -field-override stTransactionMarshaling -out gen_sttransaction.go
type stTransaction struct {
GasPrice *big.Int `json:"gasPrice"`
MaxFeePerGas *big.Int `json:"maxFeePerGas"`
MaxPriorityFeePerGas *big.Int `json:"maxPriorityFeePerGas"`
Nonce uint64 `json:"nonce"`
To string `json:"to"`
Data []string `json:"data"`
AccessLists []*types.AccessList `json:"accessLists,omitempty"`
GasLimit []uint64 `json:"gasLimit"`
Value []string `json:"value"`
PrivateKey []byte `json:"secretKey"`
}
type stTransactionMarshaling struct {
GasPrice *math.HexOrDecimal256
MaxFeePerGas *math.HexOrDecimal256
MaxPriorityFeePerGas *math.HexOrDecimal256
Nonce math.HexOrDecimal64
GasLimit []math.HexOrDecimal64
PrivateKey hexutil.Bytes
}
// GetChainConfig takes a fork definition and returns a chain config.
// The fork definition can be
// - a plain forkname, e.g. `Byzantium`,
// - a fork basename, and a list of EIPs to enable; e.g. `Byzantium+1884+1283`.
func GetChainConfig(forkString string) (baseConfig *params.ChainConfig, eips []int, err error) {
var (
splitForks = strings.Split(forkString, "+")
ok bool
baseName, eipsStrings = splitForks[0], splitForks[1:]
)
if baseConfig, ok = Forks[baseName]; !ok {
return nil, nil, UnsupportedForkError{baseName}
}
for _, eip := range eipsStrings {
if eipNum, err := strconv.Atoi(eip); err != nil {
return nil, nil, fmt.Errorf("syntax error, invalid eip number %v", eipNum)
} else {
if !vm.ValidEip(eipNum) {
return nil, nil, fmt.Errorf("syntax error, invalid eip number %v", eipNum)
}
eips = append(eips, eipNum)
}
}
return baseConfig, eips, nil
}
// Subtests returns all valid subtests of the test.
func (t *StateTest) Subtests() []StateSubtest {
var sub []StateSubtest
for fork, pss := range t.json.Post {
for i := range pss {
sub = append(sub, StateSubtest{fork, i})
}
}
return sub
}
// checkError checks if the error returned by the state transition matches any expected error.
// A failing expectation returns a wrapped version of the original error, if any,
// or a new error detailing the failing expectation.
// This function does not return or modify the original error, it only evaluates and returns expectations for the error.
func (t *StateTest) checkError(subtest StateSubtest, err error) error {
expectedError := t.json.Post[subtest.Fork][subtest.Index].ExpectException
if err == nil && expectedError == "" {
return nil
}
if err == nil && expectedError != "" {
return fmt.Errorf("expected error %q, got no error", expectedError)
}
if err != nil && expectedError == "" {
return fmt.Errorf("unexpected error: %w", err)
}
if err != nil && expectedError != "" {
// Ignore expected errors (TODO MariusVanDerWijden check error string)
return nil
}
return nil
}
// Run executes a specific subtest and verifies the post-state and logs
func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, error) {
snaps, statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter)
if checkedErr := t.checkError(subtest, err); checkedErr != nil {
return snaps, statedb, checkedErr
}
// The error has been checked; if it was unexpected, it's already returned.
if err != nil {
// Here, an error exists but it was expected.
// We do not check the post state or logs.
return snaps, statedb, nil
}
post := t.json.Post[subtest.Fork][subtest.Index]
// N.B: We need to do this in a two-step process, because the first Commit takes care
// of suicides, and we need to touch the coinbase _after_ it has potentially suicided.
if root != common.Hash(post.Root) {
return snaps, statedb, fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root)
}
if logs := rlpHash(statedb.Logs()); logs != common.Hash(post.Logs) {
return snaps, statedb, fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs)
}
return snaps, statedb, nil
}
// RunNoVerify runs a specific subtest and returns the statedb and post-state root
func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, common.Hash, error) {
config, eips, err := GetChainConfig(subtest.Fork)
if err != nil {
return nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
}
vmconfig.ExtraEips = eips
block := t.genesis(config).ToBlock()
snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
var baseFee *big.Int
if config.IsLondon(new(big.Int)) {
baseFee = t.json.Env.BaseFee
if baseFee == nil {
// Retesteth uses `0x10` for genesis baseFee. Therefore, it defaults to
// parent - 2 : 0xa as the basefee for 'this' context.
baseFee = big.NewInt(0x0a)
}
}
post := t.json.Post[subtest.Fork][subtest.Index]
msg, err := t.json.Tx.toMessage(post, baseFee)
if err != nil {
return nil, nil, common.Hash{}, err
}
// Try to recover tx with current signer
if len(post.TxBytes) != 0 {
var ttx types.Transaction
err := ttx.UnmarshalBinary(post.TxBytes)
if err != nil {
return nil, nil, common.Hash{}, err
}
if _, err := types.Sender(types.LatestSigner(config), &ttx); err != nil {
return nil, nil, common.Hash{}, err
}
}
// Prepare the EVM.
txContext := core.NewEVMTxContext(msg)
context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase)
context.GetHash = vmTestBlockHash
context.BaseFee = baseFee
context.Random = nil
if t.json.Env.Difficulty != nil {
context.Difficulty = new(big.Int).Set(t.json.Env.Difficulty)
}
if config.IsLondon(new(big.Int)) && t.json.Env.Random != nil {
rnd := common.BigToHash(t.json.Env.Random)
context.Random = &rnd
context.Difficulty = big.NewInt(0)
}
evm := vm.NewEVM(context, txContext, statedb, config, vmconfig)
// Execute the message.
snapshot := statedb.Snapshot()
gaspool := new(core.GasPool)
gaspool.AddGas(block.GasLimit())
_, err = core.ApplyMessage(evm, msg, gaspool)
if err != nil {
statedb.RevertToSnapshot(snapshot)
}
// Add 0-value mining reward. This only makes a difference in the cases
// where
// - the coinbase suicided, or
// - there are only 'bad' transactions, which aren't executed. In those cases,
// the coinbase gets no txfee, so isn't created, and thus needs to be touched
statedb.AddBalance(block.Coinbase(), new(big.Int))
// Commit block
statedb.Commit(config.IsEIP158(block.Number()))
// And _now_ get the state root
root := statedb.IntermediateRoot(config.IsEIP158(block.Number()))
return snaps, statedb, root, err
}
func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {
return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]
}
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) {
sdb := state.NewDatabase(db)
statedb, _ := state.New(common.Hash{}, sdb, nil)
for addr, a := range accounts {
statedb.SetCode(addr, a.Code)
statedb.SetNonce(addr, a.Nonce)
statedb.SetBalance(addr, a.Balance)
for k, v := range a.Storage {
statedb.SetState(addr, k, v)
}
}
// Commit and re-open to start with a clean state.
root, _ := statedb.Commit(false)
var snaps *snapshot.Tree
if snapshotter {
snapconfig := snapshot.Config{
CacheSize: 1,
Recovery: false,
NoBuild: false,
AsyncBuild: false,
}
snaps, _ = snapshot.New(snapconfig, db, sdb.TrieDB(), root)
}
statedb, _ = state.New(root, sdb, snaps)
return snaps, statedb
}
func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis {
genesis := &core.Genesis{
Config: config,
Coinbase: t.json.Env.Coinbase,
Difficulty: t.json.Env.Difficulty,
GasLimit: t.json.Env.GasLimit,
Number: t.json.Env.Number,
Timestamp: t.json.Env.Timestamp,
Alloc: t.json.Pre,
}
if t.json.Env.Random != nil {
// Post-Merge
genesis.Mixhash = common.BigToHash(t.json.Env.Random)
genesis.Difficulty = big.NewInt(0)
}
return genesis
}
func (tx *stTransaction) toMessage(ps stPostState, baseFee *big.Int) (*core.Message, error) {
// Derive sender from private key if present.
var from common.Address
if len(tx.PrivateKey) > 0 {
key, err := crypto.ToECDSA(tx.PrivateKey)
if err != nil {
return nil, fmt.Errorf("invalid private key: %v", err)
}
from = crypto.PubkeyToAddress(key.PublicKey)
}
// Parse recipient if present.
var to *common.Address
if tx.To != "" {
to = new(common.Address)
if err := to.UnmarshalText([]byte(tx.To)); err != nil {
return nil, fmt.Errorf("invalid to address: %v", err)
}
}
// Get values specific to this post state.
if ps.Indexes.Data > len(tx.Data) {
return nil, fmt.Errorf("tx data index %d out of bounds", ps.Indexes.Data)
}
if ps.Indexes.Value > len(tx.Value) {
return nil, fmt.Errorf("tx value index %d out of bounds", ps.Indexes.Value)
}
if ps.Indexes.Gas > len(tx.GasLimit) {
return nil, fmt.Errorf("tx gas limit index %d out of bounds", ps.Indexes.Gas)
}
dataHex := tx.Data[ps.Indexes.Data]
valueHex := tx.Value[ps.Indexes.Value]
gasLimit := tx.GasLimit[ps.Indexes.Gas]
// Value, Data hex encoding is messy: https://github.com/ethereum/tests/issues/203
value := new(big.Int)
if valueHex != "0x" {
v, ok := math.ParseBig256(valueHex)
if !ok {
return nil, fmt.Errorf("invalid tx value %q", valueHex)
}
value = v
}
data, err := hex.DecodeString(strings.TrimPrefix(dataHex, "0x"))
if err != nil {
return nil, fmt.Errorf("invalid tx data %q", dataHex)
}
var accessList types.AccessList
if tx.AccessLists != nil && tx.AccessLists[ps.Indexes.Data] != nil {
accessList = *tx.AccessLists[ps.Indexes.Data]
}
// If baseFee provided, set gasPrice to effectiveGasPrice.
gasPrice := tx.GasPrice
if baseFee != nil {
if tx.MaxFeePerGas == nil {
tx.MaxFeePerGas = gasPrice
}
if tx.MaxFeePerGas == nil {
tx.MaxFeePerGas = new(big.Int)
}
if tx.MaxPriorityFeePerGas == nil {
tx.MaxPriorityFeePerGas = tx.MaxFeePerGas
}
gasPrice = math.BigMin(new(big.Int).Add(tx.MaxPriorityFeePerGas, baseFee),
tx.MaxFeePerGas)
}
if gasPrice == nil {
return nil, fmt.Errorf("no gas price provided")
}
msg := &core.Message{
From: from,
To: to,
Nonce: tx.Nonce,
Value: value,
GasLimit: gasLimit,
GasPrice: gasPrice,
GasFeeCap: tx.MaxFeePerGas,
GasTipCap: tx.MaxPriorityFeePerGas,
Data: data,
AccessList: accessList,
}
return msg, nil
}
func rlpHash(x interface{}) (h common.Hash) {
hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h
}
func vmTestBlockHash(n uint64) common.Hash {
return common.BytesToHash(crypto.Keccak256([]byte(big.NewInt(int64(n)).String())))
}
| tests/state_test_util.go | 1 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.9985411167144775,
0.1627836972475052,
0.00016359785513486713,
0.0001753453689161688,
0.3651157021522522
] |
{
"id": 2,
"code_window": [
"\t\tgen := readGenesis(ctx.String(GenesisFlag.Name))\n",
"\t\tgenesisConfig = gen\n",
"\t\tdb := rawdb.NewMemoryDatabase()\n",
"\t\tgenesis := gen.MustCommit(db)\n",
"\t\tstatedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)\n",
"\t\tchainConfig = gen.Config\n",
"\t} else {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\tsdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: preimages})\n",
"\t\tstatedb, _ = state.New(genesis.Root(), sdb, nil)\n"
],
"file_path": "cmd/evm/runner.go",
"type": "replace",
"edit_start_line_idx": 141
} | // Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package gasprice
import (
"context"
"errors"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/rpc"
)
func TestFeeHistory(t *testing.T) {
var cases = []struct {
pending bool
maxHeader, maxBlock int
count int
last rpc.BlockNumber
percent []float64
expFirst uint64
expCount int
expErr error
}{
{false, 1000, 1000, 10, 30, nil, 21, 10, nil},
{false, 1000, 1000, 10, 30, []float64{0, 10}, 21, 10, nil},
{false, 1000, 1000, 10, 30, []float64{20, 10}, 0, 0, errInvalidPercentile},
{false, 1000, 1000, 1000000000, 30, nil, 0, 31, nil},
{false, 1000, 1000, 1000000000, rpc.LatestBlockNumber, nil, 0, 33, nil},
{false, 1000, 1000, 10, 40, nil, 0, 0, errRequestBeyondHead},
{true, 1000, 1000, 10, 40, nil, 0, 0, errRequestBeyondHead},
{false, 20, 2, 100, rpc.LatestBlockNumber, nil, 13, 20, nil},
{false, 20, 2, 100, rpc.LatestBlockNumber, []float64{0, 10}, 31, 2, nil},
{false, 20, 2, 100, 32, []float64{0, 10}, 31, 2, nil},
{false, 1000, 1000, 1, rpc.PendingBlockNumber, nil, 0, 0, nil},
{false, 1000, 1000, 2, rpc.PendingBlockNumber, nil, 32, 1, nil},
{true, 1000, 1000, 2, rpc.PendingBlockNumber, nil, 32, 2, nil},
{true, 1000, 1000, 2, rpc.PendingBlockNumber, []float64{0, 10}, 32, 2, nil},
{false, 1000, 1000, 2, rpc.FinalizedBlockNumber, []float64{0, 10}, 24, 2, nil},
{false, 1000, 1000, 2, rpc.SafeBlockNumber, []float64{0, 10}, 24, 2, nil},
}
for i, c := range cases {
config := Config{
MaxHeaderHistory: c.maxHeader,
MaxBlockHistory: c.maxBlock,
}
backend := newTestBackend(t, big.NewInt(16), c.pending)
oracle := NewOracle(backend, config)
first, reward, baseFee, ratio, err := oracle.FeeHistory(context.Background(), c.count, c.last, c.percent)
backend.teardown()
expReward := c.expCount
if len(c.percent) == 0 {
expReward = 0
}
expBaseFee := c.expCount
if expBaseFee != 0 {
expBaseFee++
}
if first.Uint64() != c.expFirst {
t.Fatalf("Test case %d: first block mismatch, want %d, got %d", i, c.expFirst, first)
}
if len(reward) != expReward {
t.Fatalf("Test case %d: reward array length mismatch, want %d, got %d", i, expReward, len(reward))
}
if len(baseFee) != expBaseFee {
t.Fatalf("Test case %d: baseFee array length mismatch, want %d, got %d", i, expBaseFee, len(baseFee))
}
if len(ratio) != c.expCount {
t.Fatalf("Test case %d: gasUsedRatio array length mismatch, want %d, got %d", i, c.expCount, len(ratio))
}
if err != c.expErr && !errors.Is(err, c.expErr) {
t.Fatalf("Test case %d: error mismatch, want %v, got %v", i, c.expErr, err)
}
}
}
| eth/gasprice/feehistory_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.0002496520755812526,
0.00018253872985951602,
0.00016754982061684132,
0.00017695539281703532,
0.000022707030439050868
] |
{
"id": 2,
"code_window": [
"\t\tgen := readGenesis(ctx.String(GenesisFlag.Name))\n",
"\t\tgenesisConfig = gen\n",
"\t\tdb := rawdb.NewMemoryDatabase()\n",
"\t\tgenesis := gen.MustCommit(db)\n",
"\t\tstatedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)\n",
"\t\tchainConfig = gen.Config\n",
"\t} else {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\tsdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: preimages})\n",
"\t\tstatedb, _ = state.New(genesis.Root(), sdb, nil)\n"
],
"file_path": "cmd/evm/runner.go",
"type": "replace",
"edit_start_line_idx": 141
} | // Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package hexutil_test
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
)
type MyType [5]byte
func (v *MyType) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("MyType", input, v[:])
}
func (v MyType) String() string {
return hexutil.Bytes(v[:]).String()
}
func ExampleUnmarshalFixedText() {
var v1, v2 MyType
fmt.Println("v1 error:", json.Unmarshal([]byte(`"0x01"`), &v1))
fmt.Println("v2 error:", json.Unmarshal([]byte(`"0x0101010101"`), &v2))
fmt.Println("v2:", v2)
// Output:
// v1 error: hex string has length 2, want 10 for MyType
// v2 error: <nil>
// v2: 0x0101010101
}
| common/hexutil/json_example_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.00028000507154501975,
0.00019689947657752782,
0.00017293675045948476,
0.00017689990636426955,
0.000041607545426813886
] |
{
"id": 2,
"code_window": [
"\t\tgen := readGenesis(ctx.String(GenesisFlag.Name))\n",
"\t\tgenesisConfig = gen\n",
"\t\tdb := rawdb.NewMemoryDatabase()\n",
"\t\tgenesis := gen.MustCommit(db)\n",
"\t\tstatedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)\n",
"\t\tchainConfig = gen.Config\n",
"\t} else {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\tsdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: preimages})\n",
"\t\tstatedb, _ = state.New(genesis.Root(), sdb, nil)\n"
],
"file_path": "cmd/evm/runner.go",
"type": "replace",
"edit_start_line_idx": 141
} | // Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package light
import (
"context"
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
const (
// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
chainHeadChanSize = 10
)
// txPermanent is the number of mined blocks after a mined transaction is
// considered permanent and no rollback is expected
var txPermanent = uint64(500)
// TxPool implements the transaction pool for light clients, which keeps track
// of the status of locally created transactions, detecting if they are included
// in a block (mined) or rolled back. There are no queued transactions since we
// always receive all locally signed transactions in the same order as they are
// created.
type TxPool struct {
config *params.ChainConfig
signer types.Signer
quit chan bool
txFeed event.Feed
scope event.SubscriptionScope
chainHeadCh chan core.ChainHeadEvent
chainHeadSub event.Subscription
mu sync.RWMutex
chain *LightChain
odr OdrBackend
chainDb ethdb.Database
relay TxRelayBackend
head common.Hash
nonce map[common.Address]uint64 // "pending" nonce
pending map[common.Hash]*types.Transaction // pending transactions by tx hash
mined map[common.Hash][]*types.Transaction // mined transactions by block hash
clearIdx uint64 // earliest block nr that can contain mined tx info
istanbul bool // Fork indicator whether we are in the istanbul stage.
eip2718 bool // Fork indicator whether we are in the eip2718 stage.
shanghai bool // Fork indicator whether we are in the shanghai stage.
}
// TxRelayBackend provides an interface to the mechanism that forwards transactions to the
// ETH network. The implementations of the functions should be non-blocking.
//
// Send instructs backend to forward new transactions NewHead notifies backend about a new
// head after processed by the tx pool, including mined and rolled back transactions since
// the last event.
//
// Discard notifies backend about transactions that should be discarded either because
// they have been replaced by a re-send or because they have been mined long ago and no
// rollback is expected.
type TxRelayBackend interface {
Send(txs types.Transactions)
NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash)
Discard(hashes []common.Hash)
}
// NewTxPool creates a new light transaction pool
func NewTxPool(config *params.ChainConfig, chain *LightChain, relay TxRelayBackend) *TxPool {
pool := &TxPool{
config: config,
signer: types.LatestSigner(config),
nonce: make(map[common.Address]uint64),
pending: make(map[common.Hash]*types.Transaction),
mined: make(map[common.Hash][]*types.Transaction),
quit: make(chan bool),
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
chain: chain,
relay: relay,
odr: chain.Odr(),
chainDb: chain.Odr().Database(),
head: chain.CurrentHeader().Hash(),
clearIdx: chain.CurrentHeader().Number.Uint64(),
}
// Subscribe events from blockchain
pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh)
go pool.eventLoop()
return pool
}
// currentState returns the light state of the current head header
func (pool *TxPool) currentState(ctx context.Context) *state.StateDB {
return NewState(ctx, pool.chain.CurrentHeader(), pool.odr)
}
// GetNonce returns the "pending" nonce of a given address. It always queries
// the nonce belonging to the latest header too in order to detect if another
// client using the same key sent a transaction.
func (pool *TxPool) GetNonce(ctx context.Context, addr common.Address) (uint64, error) {
state := pool.currentState(ctx)
nonce := state.GetNonce(addr)
if state.Error() != nil {
return 0, state.Error()
}
sn, ok := pool.nonce[addr]
if ok && sn > nonce {
nonce = sn
}
if !ok || sn < nonce {
pool.nonce[addr] = nonce
}
return nonce, nil
}
// txStateChanges stores the recent changes between pending/mined states of
// transactions. True means mined, false means rolled back, no entry means no change
type txStateChanges map[common.Hash]bool
// setState sets the status of a tx to either recently mined or recently rolled back
func (txc txStateChanges) setState(txHash common.Hash, mined bool) {
val, ent := txc[txHash]
if ent && (val != mined) {
delete(txc, txHash)
} else {
txc[txHash] = mined
}
}
// getLists creates lists of mined and rolled back tx hashes
func (txc txStateChanges) getLists() (mined []common.Hash, rollback []common.Hash) {
for hash, val := range txc {
if val {
mined = append(mined, hash)
} else {
rollback = append(rollback, hash)
}
}
return
}
// checkMinedTxs checks newly added blocks for the currently pending transactions
// and marks them as mined if necessary. It also stores block position in the db
// and adds them to the received txStateChanges map.
func (pool *TxPool) checkMinedTxs(ctx context.Context, hash common.Hash, number uint64, txc txStateChanges) error {
// If no transactions are pending, we don't care about anything
if len(pool.pending) == 0 {
return nil
}
block, err := GetBlock(ctx, pool.odr, hash, number)
if err != nil {
return err
}
// Gather all the local transaction mined in this block
list := pool.mined[hash]
for _, tx := range block.Transactions() {
if _, ok := pool.pending[tx.Hash()]; ok {
list = append(list, tx)
}
}
// If some transactions have been mined, write the needed data to disk and update
if list != nil {
// Retrieve all the receipts belonging to this block and write the lookup table
if _, err := GetBlockReceipts(ctx, pool.odr, hash, number); err != nil { // ODR caches, ignore results
return err
}
rawdb.WriteTxLookupEntriesByBlock(pool.chainDb, block)
// Update the transaction pool's state
for _, tx := range list {
delete(pool.pending, tx.Hash())
txc.setState(tx.Hash(), true)
}
pool.mined[hash] = list
}
return nil
}
// rollbackTxs marks the transactions contained in recently rolled back blocks
// as rolled back. It also removes any positional lookup entries.
func (pool *TxPool) rollbackTxs(hash common.Hash, txc txStateChanges) {
batch := pool.chainDb.NewBatch()
if list, ok := pool.mined[hash]; ok {
for _, tx := range list {
txHash := tx.Hash()
rawdb.DeleteTxLookupEntry(batch, txHash)
pool.pending[txHash] = tx
txc.setState(txHash, false)
}
delete(pool.mined, hash)
}
batch.Write()
}
// reorgOnNewHead sets a new head header, processing (and rolling back if necessary)
// the blocks since the last known head and returns a txStateChanges map containing
// the recently mined and rolled back transaction hashes. If an error (context
// timeout) occurs during checking new blocks, it leaves the locally known head
// at the latest checked block and still returns a valid txStateChanges, making it
// possible to continue checking the missing blocks at the next chain head event
func (pool *TxPool) reorgOnNewHead(ctx context.Context, newHeader *types.Header) (txStateChanges, error) {
txc := make(txStateChanges)
oldh := pool.chain.GetHeaderByHash(pool.head)
newh := newHeader
// find common ancestor, create list of rolled back and new block hashes
var oldHashes, newHashes []common.Hash
for oldh.Hash() != newh.Hash() {
if oldh.Number.Uint64() >= newh.Number.Uint64() {
oldHashes = append(oldHashes, oldh.Hash())
oldh = pool.chain.GetHeader(oldh.ParentHash, oldh.Number.Uint64()-1)
}
if oldh.Number.Uint64() < newh.Number.Uint64() {
newHashes = append(newHashes, newh.Hash())
newh = pool.chain.GetHeader(newh.ParentHash, newh.Number.Uint64()-1)
if newh == nil {
// happens when CHT syncing, nothing to do
newh = oldh
}
}
}
if oldh.Number.Uint64() < pool.clearIdx {
pool.clearIdx = oldh.Number.Uint64()
}
// roll back old blocks
for _, hash := range oldHashes {
pool.rollbackTxs(hash, txc)
}
pool.head = oldh.Hash()
// check mined txs of new blocks (array is in reversed order)
for i := len(newHashes) - 1; i >= 0; i-- {
hash := newHashes[i]
if err := pool.checkMinedTxs(ctx, hash, newHeader.Number.Uint64()-uint64(i), txc); err != nil {
return txc, err
}
pool.head = hash
}
// clear old mined tx entries of old blocks
if idx := newHeader.Number.Uint64(); idx > pool.clearIdx+txPermanent {
idx2 := idx - txPermanent
if len(pool.mined) > 0 {
for i := pool.clearIdx; i < idx2; i++ {
hash := rawdb.ReadCanonicalHash(pool.chainDb, i)
if list, ok := pool.mined[hash]; ok {
hashes := make([]common.Hash, len(list))
for i, tx := range list {
hashes[i] = tx.Hash()
}
pool.relay.Discard(hashes)
delete(pool.mined, hash)
}
}
}
pool.clearIdx = idx2
}
return txc, nil
}
// blockCheckTimeout is the time limit for checking new blocks for mined
// transactions. Checking resumes at the next chain head event if timed out.
const blockCheckTimeout = time.Second * 3
// eventLoop processes chain head events and also notifies the tx relay backend
// about the new head hash and tx state changes
func (pool *TxPool) eventLoop() {
for {
select {
case ev := <-pool.chainHeadCh:
pool.setNewHead(ev.Block.Header())
// hack in order to avoid hogging the lock; this part will
// be replaced by a subsequent PR.
time.Sleep(time.Millisecond)
// System stopped
case <-pool.chainHeadSub.Err():
return
}
}
}
func (pool *TxPool) setNewHead(head *types.Header) {
pool.mu.Lock()
defer pool.mu.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), blockCheckTimeout)
defer cancel()
txc, _ := pool.reorgOnNewHead(ctx, head)
m, r := txc.getLists()
pool.relay.NewHead(pool.head, m, r)
// Update fork indicator by next pending block number
next := new(big.Int).Add(head.Number, big.NewInt(1))
pool.istanbul = pool.config.IsIstanbul(next)
pool.eip2718 = pool.config.IsBerlin(next)
pool.shanghai = pool.config.IsShanghai(uint64(time.Now().Unix()))
}
// Stop stops the light transaction pool
func (pool *TxPool) Stop() {
// Unsubscribe all subscriptions registered from txpool
pool.scope.Close()
// Unsubscribe subscriptions registered from blockchain
pool.chainHeadSub.Unsubscribe()
close(pool.quit)
log.Info("Transaction pool stopped")
}
// SubscribeNewTxsEvent registers a subscription of core.NewTxsEvent and
// starts sending event to the given channel.
func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return pool.scope.Track(pool.txFeed.Subscribe(ch))
}
// Stats returns the number of currently pending (locally created) transactions
func (pool *TxPool) Stats() (pending int) {
pool.mu.RLock()
defer pool.mu.RUnlock()
pending = len(pool.pending)
return
}
// validateTx checks whether a transaction is valid according to the consensus rules.
func (pool *TxPool) validateTx(ctx context.Context, tx *types.Transaction) error {
// Validate sender
var (
from common.Address
err error
)
// Validate the transaction sender and it's sig. Throw
// if the from fields is invalid.
if from, err = types.Sender(pool.signer, tx); err != nil {
return txpool.ErrInvalidSender
}
// Last but not least check for nonce errors
currentState := pool.currentState(ctx)
if n := currentState.GetNonce(from); n > tx.Nonce() {
return core.ErrNonceTooLow
}
// Check the transaction doesn't exceed the current
// block limit gas.
header := pool.chain.GetHeaderByHash(pool.head)
if header.GasLimit < tx.Gas() {
return txpool.ErrGasLimit
}
// Transactions can't be negative. This may never happen
// using RLP decoded transactions but may occur if you create
// a transaction using the RPC for example.
if tx.Value().Sign() < 0 {
return txpool.ErrNegativeValue
}
// Transactor should have enough funds to cover the costs
// cost == V + GP * GL
if b := currentState.GetBalance(from); b.Cmp(tx.Cost()) < 0 {
return core.ErrInsufficientFunds
}
// Should supply enough intrinsic gas
gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul, pool.shanghai)
if err != nil {
return err
}
if tx.Gas() < gas {
return core.ErrIntrinsicGas
}
return currentState.Error()
}
// add validates a new transaction and sets its state pending if processable.
// It also updates the locally stored nonce if necessary.
func (pool *TxPool) add(ctx context.Context, tx *types.Transaction) error {
hash := tx.Hash()
if pool.pending[hash] != nil {
return fmt.Errorf("known transaction (%x)", hash[:4])
}
err := pool.validateTx(ctx, tx)
if err != nil {
return err
}
if _, ok := pool.pending[hash]; !ok {
pool.pending[hash] = tx
nonce := tx.Nonce() + 1
addr, _ := types.Sender(pool.signer, tx)
if nonce > pool.nonce[addr] {
pool.nonce[addr] = nonce
}
// Notify the subscribers. This event is posted in a goroutine
// because it's possible that somewhere during the post "Remove transaction"
// gets called which will then wait for the global tx pool lock and deadlock.
go pool.txFeed.Send(core.NewTxsEvent{Txs: types.Transactions{tx}})
}
// Print a log message if low enough level is set
log.Debug("Pooled new transaction", "hash", hash, "from", log.Lazy{Fn: func() common.Address { from, _ := types.Sender(pool.signer, tx); return from }}, "to", tx.To())
return nil
}
// Add adds a transaction to the pool if valid and passes it to the tx relay
// backend
func (pool *TxPool) Add(ctx context.Context, tx *types.Transaction) error {
pool.mu.Lock()
defer pool.mu.Unlock()
data, err := tx.MarshalBinary()
if err != nil {
return err
}
if err := pool.add(ctx, tx); err != nil {
return err
}
//fmt.Println("Send", tx.Hash())
pool.relay.Send(types.Transactions{tx})
pool.chainDb.Put(tx.Hash().Bytes(), data)
return nil
}
// AddBatch adds all valid transactions to the pool and passes them to
// the tx relay backend
func (pool *TxPool) AddBatch(ctx context.Context, txs []*types.Transaction) {
pool.mu.Lock()
defer pool.mu.Unlock()
var sendTx types.Transactions
for _, tx := range txs {
if err := pool.add(ctx, tx); err == nil {
sendTx = append(sendTx, tx)
}
}
if len(sendTx) > 0 {
pool.relay.Send(sendTx)
}
}
// GetTransaction returns a transaction if it is contained in the pool
// and nil otherwise.
func (pool *TxPool) GetTransaction(hash common.Hash) *types.Transaction {
// check the txs first
if tx, ok := pool.pending[hash]; ok {
return tx
}
return nil
}
// GetTransactions returns all currently processable transactions.
// The returned slice may be modified by the caller.
func (pool *TxPool) GetTransactions() (txs types.Transactions, err error) {
pool.mu.RLock()
defer pool.mu.RUnlock()
txs = make(types.Transactions, len(pool.pending))
i := 0
for _, tx := range pool.pending {
txs[i] = tx
i++
}
return txs, nil
}
// Content retrieves the data content of the transaction pool, returning all the
// pending as well as queued transactions, grouped by account and nonce.
func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
pool.mu.RLock()
defer pool.mu.RUnlock()
// Retrieve all the pending transactions and sort by account and by nonce
pending := make(map[common.Address]types.Transactions)
for _, tx := range pool.pending {
account, _ := types.Sender(pool.signer, tx)
pending[account] = append(pending[account], tx)
}
// There are no queued transactions in a light pool, just return an empty map
queued := make(map[common.Address]types.Transactions)
return pending, queued
}
// ContentFrom retrieves the data content of the transaction pool, returning the
// pending as well as queued transactions of this address, grouped by nonce.
func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
pool.mu.RLock()
defer pool.mu.RUnlock()
// Retrieve the pending transactions and sort by nonce
var pending types.Transactions
for _, tx := range pool.pending {
account, _ := types.Sender(pool.signer, tx)
if account != addr {
continue
}
pending = append(pending, tx)
}
// There are no queued transactions in a light pool, just return an empty map
return pending, types.Transactions{}
}
// RemoveTransactions removes all given transactions from the pool.
func (pool *TxPool) RemoveTransactions(txs types.Transactions) {
pool.mu.Lock()
defer pool.mu.Unlock()
var hashes []common.Hash
batch := pool.chainDb.NewBatch()
for _, tx := range txs {
hash := tx.Hash()
delete(pool.pending, hash)
batch.Delete(hash.Bytes())
hashes = append(hashes, hash)
}
batch.Write()
pool.relay.Discard(hashes)
}
// RemoveTx removes the transaction with the given hash from the pool.
func (pool *TxPool) RemoveTx(hash common.Hash) {
pool.mu.Lock()
defer pool.mu.Unlock()
// delete from pending pool
delete(pool.pending, hash)
pool.chainDb.Delete(hash[:])
pool.relay.Discard([]common.Hash{hash})
}
| light/txpool.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.5544468760490417,
0.010246269404888153,
0.000159741539391689,
0.00017419624782633036,
0.07338372617959976
] |
{
"id": 3,
"code_window": [
"\t\tchainConfig = gen.Config\n",
"\t} else {\n",
"\t\tstatedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)\n",
"\t\tgenesisConfig = new(core.Genesis)\n",
"\t}\n",
"\tif ctx.String(SenderFlag.Name) != \"\" {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tsdb := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: preimages})\n",
"\t\tstatedb, _ = state.New(common.Hash{}, sdb, nil)\n"
],
"file_path": "cmd/evm/runner.go",
"type": "replace",
"edit_start_line_idx": 144
} | // Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"math/big"
"os"
goruntime "runtime"
"runtime/pprof"
"testing"
"time"
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm/runtime"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2"
)
var runCommand = &cli.Command{
Action: runCmd,
Name: "run",
Usage: "run arbitrary evm binary",
ArgsUsage: "<code>",
Description: `The run command runs arbitrary EVM code.`,
}
// readGenesis will read the given JSON format genesis file and return
// the initialized Genesis structure
func readGenesis(genesisPath string) *core.Genesis {
// Make sure we have a valid genesis JSON
//genesisPath := ctx.Args().First()
if len(genesisPath) == 0 {
utils.Fatalf("Must supply path to genesis JSON file")
}
file, err := os.Open(genesisPath)
if err != nil {
utils.Fatalf("Failed to read genesis file: %v", err)
}
defer file.Close()
genesis := new(core.Genesis)
if err := json.NewDecoder(file).Decode(genesis); err != nil {
utils.Fatalf("invalid genesis file: %v", err)
}
return genesis
}
type execStats struct {
time time.Duration // The execution time.
allocs int64 // The number of heap allocations during execution.
bytesAllocated int64 // The cumulative number of bytes allocated during execution.
}
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []byte, gasLeft uint64, stats execStats, err error) {
if bench {
result := testing.Benchmark(func(b *testing.B) {
for i := 0; i < b.N; i++ {
output, gasLeft, err = execFunc()
}
})
// Get the average execution time from the benchmarking result.
// There are other useful stats here that could be reported.
stats.time = time.Duration(result.NsPerOp())
stats.allocs = result.AllocsPerOp()
stats.bytesAllocated = result.AllocedBytesPerOp()
} else {
var memStatsBefore, memStatsAfter goruntime.MemStats
goruntime.ReadMemStats(&memStatsBefore)
startTime := time.Now()
output, gasLeft, err = execFunc()
stats.time = time.Since(startTime)
goruntime.ReadMemStats(&memStatsAfter)
stats.allocs = int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs)
stats.bytesAllocated = int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc)
}
return output, gasLeft, stats, err
}
func runCmd(ctx *cli.Context) error {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
logconfig := &logger.Config{
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.Bool(DisableStackFlag.Name),
DisableStorage: ctx.Bool(DisableStorageFlag.Name),
EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
Debug: ctx.Bool(DebugFlag.Name),
}
var (
tracer vm.EVMLogger
debugLogger *logger.StructLogger
statedb *state.StateDB
chainConfig *params.ChainConfig
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
genesisConfig *core.Genesis
)
if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.Bool(DebugFlag.Name) {
debugLogger = logger.NewStructLogger(logconfig)
tracer = debugLogger
} else {
debugLogger = logger.NewStructLogger(logconfig)
}
if ctx.String(GenesisFlag.Name) != "" {
gen := readGenesis(ctx.String(GenesisFlag.Name))
genesisConfig = gen
db := rawdb.NewMemoryDatabase()
genesis := gen.MustCommit(db)
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)
chainConfig = gen.Config
} else {
statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
genesisConfig = new(core.Genesis)
}
if ctx.String(SenderFlag.Name) != "" {
sender = common.HexToAddress(ctx.String(SenderFlag.Name))
}
statedb.CreateAccount(sender)
if ctx.String(ReceiverFlag.Name) != "" {
receiver = common.HexToAddress(ctx.String(ReceiverFlag.Name))
}
var code []byte
codeFileFlag := ctx.String(CodeFileFlag.Name)
codeFlag := ctx.String(CodeFlag.Name)
// The '--code' or '--codefile' flag overrides code in state
if codeFileFlag != "" || codeFlag != "" {
var hexcode []byte
if codeFileFlag != "" {
var err error
// If - is specified, it means that code comes from stdin
if codeFileFlag == "-" {
//Try reading from stdin
if hexcode, err = io.ReadAll(os.Stdin); err != nil {
fmt.Printf("Could not load code from stdin: %v\n", err)
os.Exit(1)
}
} else {
// Codefile with hex assembly
if hexcode, err = os.ReadFile(codeFileFlag); err != nil {
fmt.Printf("Could not load code from file: %v\n", err)
os.Exit(1)
}
}
} else {
hexcode = []byte(codeFlag)
}
hexcode = bytes.TrimSpace(hexcode)
if len(hexcode)%2 != 0 {
fmt.Printf("Invalid input length for hex data (%d)\n", len(hexcode))
os.Exit(1)
}
code = common.FromHex(string(hexcode))
} else if fn := ctx.Args().First(); len(fn) > 0 {
// EASM-file to compile
src, err := os.ReadFile(fn)
if err != nil {
return err
}
bin, err := compiler.Compile(fn, src, false)
if err != nil {
return err
}
code = common.Hex2Bytes(bin)
}
initialGas := ctx.Uint64(GasFlag.Name)
if genesisConfig.GasLimit != 0 {
initialGas = genesisConfig.GasLimit
}
runtimeConfig := runtime.Config{
Origin: sender,
State: statedb,
GasLimit: initialGas,
GasPrice: flags.GlobalBig(ctx, PriceFlag.Name),
Value: flags.GlobalBig(ctx, ValueFlag.Name),
Difficulty: genesisConfig.Difficulty,
Time: genesisConfig.Timestamp,
Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
EVMConfig: vm.Config{
Tracer: tracer,
Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name),
},
}
if cpuProfilePath := ctx.String(CPUProfileFlag.Name); cpuProfilePath != "" {
f, err := os.Create(cpuProfilePath)
if err != nil {
fmt.Println("could not create CPU profile: ", err)
os.Exit(1)
}
if err := pprof.StartCPUProfile(f); err != nil {
fmt.Println("could not start CPU profile: ", err)
os.Exit(1)
}
defer pprof.StopCPUProfile()
}
if chainConfig != nil {
runtimeConfig.ChainConfig = chainConfig
} else {
runtimeConfig.ChainConfig = params.AllEthashProtocolChanges
}
var hexInput []byte
if inputFileFlag := ctx.String(InputFileFlag.Name); inputFileFlag != "" {
var err error
if hexInput, err = os.ReadFile(inputFileFlag); err != nil {
fmt.Printf("could not load input from file: %v\n", err)
os.Exit(1)
}
} else {
hexInput = []byte(ctx.String(InputFlag.Name))
}
hexInput = bytes.TrimSpace(hexInput)
if len(hexInput)%2 != 0 {
fmt.Println("input length must be even")
os.Exit(1)
}
input := common.FromHex(string(hexInput))
var execFunc func() ([]byte, uint64, error)
if ctx.Bool(CreateFlag.Name) {
input = append(code, input...)
execFunc = func() ([]byte, uint64, error) {
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
return output, gasLeft, err
}
} else {
if len(code) > 0 {
statedb.SetCode(receiver, code)
}
execFunc = func() ([]byte, uint64, error) {
return runtime.Call(receiver, input, &runtimeConfig)
}
}
bench := ctx.Bool(BenchFlag.Name)
output, leftOverGas, stats, err := timedExec(bench, execFunc)
if ctx.Bool(DumpFlag.Name) {
statedb.Commit(true)
statedb.IntermediateRoot(true)
fmt.Println(string(statedb.Dump(nil)))
}
if memProfilePath := ctx.String(MemProfileFlag.Name); memProfilePath != "" {
f, err := os.Create(memProfilePath)
if err != nil {
fmt.Println("could not create memory profile: ", err)
os.Exit(1)
}
if err := pprof.WriteHeapProfile(f); err != nil {
fmt.Println("could not write memory profile: ", err)
os.Exit(1)
}
f.Close()
}
if ctx.Bool(DebugFlag.Name) {
if debugLogger != nil {
fmt.Fprintln(os.Stderr, "#### TRACE ####")
logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
}
fmt.Fprintln(os.Stderr, "#### LOGS ####")
logger.WriteLogs(os.Stderr, statedb.Logs())
}
if bench || ctx.Bool(StatDumpFlag.Name) {
fmt.Fprintf(os.Stderr, `EVM gas used: %d
execution time: %v
allocations: %d
allocated bytes: %d
`, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated)
}
if tracer == nil {
fmt.Printf("%#x\n", output)
if err != nil {
fmt.Printf(" error: %v\n", err)
}
}
return nil
}
| cmd/evm/runner.go | 1 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.9987311959266663,
0.2504441738128662,
0.00016408044029958546,
0.00021128955995664,
0.4315829873085022
] |
{
"id": 3,
"code_window": [
"\t\tchainConfig = gen.Config\n",
"\t} else {\n",
"\t\tstatedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)\n",
"\t\tgenesisConfig = new(core.Genesis)\n",
"\t}\n",
"\tif ctx.String(SenderFlag.Name) != \"\" {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tsdb := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: preimages})\n",
"\t\tstatedb, _ = state.New(common.Hash{}, sdb, nil)\n"
],
"file_path": "cmd/evm/runner.go",
"type": "replace",
"edit_start_line_idx": 144
} | package metrics
import (
"math"
"math/rand"
"runtime"
"testing"
"time"
)
// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
// expensive computations like Variance, the cost of copying the Sample, as
// approximated by a make and copy, is much greater than the cost of the
// computation for small samples and only slightly less for large samples.
func BenchmarkCompute1000(b *testing.B) {
s := make([]int64, 1000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
SampleVariance(s)
}
}
func BenchmarkCompute1000000(b *testing.B) {
s := make([]int64, 1000000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
SampleVariance(s)
}
}
func BenchmarkCopy1000(b *testing.B) {
s := make([]int64, 1000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
sCopy := make([]int64, len(s))
copy(sCopy, s)
}
}
func BenchmarkCopy1000000(b *testing.B) {
s := make([]int64, 1000000)
for i := 0; i < len(s); i++ {
s[i] = int64(i)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
sCopy := make([]int64, len(s))
copy(sCopy, s)
}
}
func BenchmarkExpDecaySample257(b *testing.B) {
benchmarkSample(b, NewExpDecaySample(257, 0.015))
}
func BenchmarkExpDecaySample514(b *testing.B) {
benchmarkSample(b, NewExpDecaySample(514, 0.015))
}
func BenchmarkExpDecaySample1028(b *testing.B) {
benchmarkSample(b, NewExpDecaySample(1028, 0.015))
}
func BenchmarkUniformSample257(b *testing.B) {
benchmarkSample(b, NewUniformSample(257))
}
func BenchmarkUniformSample514(b *testing.B) {
benchmarkSample(b, NewUniformSample(514))
}
func BenchmarkUniformSample1028(b *testing.B) {
benchmarkSample(b, NewUniformSample(1028))
}
func TestExpDecaySample10(t *testing.T) {
s := NewExpDecaySample(100, 0.99)
for i := 0; i < 10; i++ {
s.Update(int64(i))
}
if size := s.Count(); size != 10 {
t.Errorf("s.Count(): 10 != %v\n", size)
}
if size := s.Size(); size != 10 {
t.Errorf("s.Size(): 10 != %v\n", size)
}
if l := len(s.Values()); l != 10 {
t.Errorf("len(s.Values()): 10 != %v\n", l)
}
for _, v := range s.Values() {
if v > 10 || v < 0 {
t.Errorf("out of range [0, 10): %v\n", v)
}
}
}
func TestExpDecaySample100(t *testing.T) {
s := NewExpDecaySample(1000, 0.01)
for i := 0; i < 100; i++ {
s.Update(int64(i))
}
if size := s.Count(); size != 100 {
t.Errorf("s.Count(): 100 != %v\n", size)
}
if size := s.Size(); size != 100 {
t.Errorf("s.Size(): 100 != %v\n", size)
}
if l := len(s.Values()); l != 100 {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
for _, v := range s.Values() {
if v > 100 || v < 0 {
t.Errorf("out of range [0, 100): %v\n", v)
}
}
}
func TestExpDecaySample1000(t *testing.T) {
s := NewExpDecaySample(100, 0.99)
for i := 0; i < 1000; i++ {
s.Update(int64(i))
}
if size := s.Count(); size != 1000 {
t.Errorf("s.Count(): 1000 != %v\n", size)
}
if size := s.Size(); size != 100 {
t.Errorf("s.Size(): 100 != %v\n", size)
}
if l := len(s.Values()); l != 100 {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
for _, v := range s.Values() {
if v > 1000 || v < 0 {
t.Errorf("out of range [0, 1000): %v\n", v)
}
}
}
// This test makes sure that the sample's priority is not amplified by using
// nanosecond duration since start rather than second duration since start.
// The priority becomes +Inf quickly after starting if this is done,
// effectively freezing the set of samples until a rescale step happens.
func TestExpDecaySampleNanosecondRegression(t *testing.T) {
s := NewExpDecaySample(100, 0.99)
for i := 0; i < 100; i++ {
s.Update(10)
}
time.Sleep(1 * time.Millisecond)
for i := 0; i < 100; i++ {
s.Update(20)
}
v := s.Values()
avg := float64(0)
for i := 0; i < len(v); i++ {
avg += float64(v[i])
}
avg /= float64(len(v))
if avg > 16 || avg < 14 {
t.Errorf("out of range [14, 16]: %v\n", avg)
}
}
func TestExpDecaySampleRescale(t *testing.T) {
s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
s.update(time.Now(), 1)
s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
for _, v := range s.values.Values() {
if v.k == 0.0 {
t.Fatal("v.k == 0.0")
}
}
}
func TestExpDecaySampleSnapshot(t *testing.T) {
now := time.Now()
s := NewExpDecaySample(100, 0.99).(*ExpDecaySample).SetRand(rand.New(rand.NewSource(1)))
for i := 1; i <= 10000; i++ {
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
}
snapshot := s.Snapshot()
s.Update(1)
testExpDecaySampleStatistics(t, snapshot)
}
func TestExpDecaySampleStatistics(t *testing.T) {
now := time.Now()
s := NewExpDecaySample(100, 0.99).(*ExpDecaySample).SetRand(rand.New(rand.NewSource(1)))
for i := 1; i <= 10000; i++ {
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
}
testExpDecaySampleStatistics(t, s)
}
func TestUniformSample(t *testing.T) {
s := NewUniformSample(100)
for i := 0; i < 1000; i++ {
s.Update(int64(i))
}
if size := s.Count(); size != 1000 {
t.Errorf("s.Count(): 1000 != %v\n", size)
}
if size := s.Size(); size != 100 {
t.Errorf("s.Size(): 100 != %v\n", size)
}
if l := len(s.Values()); l != 100 {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
for _, v := range s.Values() {
if v > 1000 || v < 0 {
t.Errorf("out of range [0, 100): %v\n", v)
}
}
}
func TestUniformSampleIncludesTail(t *testing.T) {
s := NewUniformSample(100)
max := 100
for i := 0; i < max; i++ {
s.Update(int64(i))
}
v := s.Values()
sum := 0
exp := (max - 1) * max / 2
for i := 0; i < len(v); i++ {
sum += int(v[i])
}
if exp != sum {
t.Errorf("sum: %v != %v\n", exp, sum)
}
}
func TestUniformSampleSnapshot(t *testing.T) {
s := NewUniformSample(100).(*UniformSample).SetRand(rand.New(rand.NewSource(1)))
for i := 1; i <= 10000; i++ {
s.Update(int64(i))
}
snapshot := s.Snapshot()
s.Update(1)
testUniformSampleStatistics(t, snapshot)
}
func TestUniformSampleStatistics(t *testing.T) {
s := NewUniformSample(100).(*UniformSample).SetRand(rand.New(rand.NewSource(1)))
for i := 1; i <= 10000; i++ {
s.Update(int64(i))
}
testUniformSampleStatistics(t, s)
}
func benchmarkSample(b *testing.B, s Sample) {
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
pauseTotalNs := memStats.PauseTotalNs
b.ResetTimer()
for i := 0; i < b.N; i++ {
s.Update(1)
}
b.StopTimer()
runtime.GC()
runtime.ReadMemStats(&memStats)
b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
}
func testExpDecaySampleStatistics(t *testing.T, s Sample) {
if count := s.Count(); count != 10000 {
t.Errorf("s.Count(): 10000 != %v\n", count)
}
if min := s.Min(); min != 107 {
t.Errorf("s.Min(): 107 != %v\n", min)
}
if max := s.Max(); max != 10000 {
t.Errorf("s.Max(): 10000 != %v\n", max)
}
if mean := s.Mean(); mean != 4965.98 {
t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
}
if stdDev := s.StdDev(); stdDev != 2959.825156930727 {
t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
}
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
if ps[0] != 4615 {
t.Errorf("median: 4615 != %v\n", ps[0])
}
if ps[1] != 7672 {
t.Errorf("75th percentile: 7672 != %v\n", ps[1])
}
if ps[2] != 9998.99 {
t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
}
}
func testUniformSampleStatistics(t *testing.T, s Sample) {
if count := s.Count(); count != 10000 {
t.Errorf("s.Count(): 10000 != %v\n", count)
}
if min := s.Min(); min != 37 {
t.Errorf("s.Min(): 37 != %v\n", min)
}
if max := s.Max(); max != 9989 {
t.Errorf("s.Max(): 9989 != %v\n", max)
}
if mean := s.Mean(); mean != 4748.14 {
t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
}
if stdDev := s.StdDev(); stdDev != 2826.684117548333 {
t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
}
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
if ps[0] != 4599 {
t.Errorf("median: 4599 != %v\n", ps[0])
}
if ps[1] != 7380.5 {
t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
}
if math.Abs(9986.429999999998-ps[2]) > epsilonPercentile {
t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
}
}
// TestUniformSampleConcurrentUpdateCount would expose data race problems with
// concurrent Update and Count calls on Sample when test is called with -race
// argument
func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
s := NewUniformSample(100)
for i := 0; i < 100; i++ {
s.Update(int64(i))
}
quit := make(chan struct{})
go func() {
t := time.NewTicker(10 * time.Millisecond)
defer t.Stop()
for {
select {
case <-t.C:
s.Update(rand.Int63())
case <-quit:
t.Stop()
return
}
}
}()
for i := 0; i < 1000; i++ {
s.Count()
time.Sleep(5 * time.Millisecond)
}
quit <- struct{}{}
}
| metrics/sample_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.0003014319809153676,
0.00018207007087767124,
0.00016697759565431625,
0.00017594988457858562,
0.000024583883714512922
] |
{
"id": 3,
"code_window": [
"\t\tchainConfig = gen.Config\n",
"\t} else {\n",
"\t\tstatedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)\n",
"\t\tgenesisConfig = new(core.Genesis)\n",
"\t}\n",
"\tif ctx.String(SenderFlag.Name) != \"\" {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tsdb := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: preimages})\n",
"\t\tstatedb, _ = state.New(common.Hash{}, sdb, nil)\n"
],
"file_path": "cmd/evm/runner.go",
"type": "replace",
"edit_start_line_idx": 144
} | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package gasprice
import (
"context"
"math/big"
"sort"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
)
const sampleNumber = 3 // Number of transactions sampled in a block
var (
DefaultMaxPrice = big.NewInt(500 * params.GWei)
DefaultIgnorePrice = big.NewInt(2 * params.Wei)
)
type Config struct {
Blocks int
Percentile int
MaxHeaderHistory int
MaxBlockHistory int
Default *big.Int `toml:",omitempty"`
MaxPrice *big.Int `toml:",omitempty"`
IgnorePrice *big.Int `toml:",omitempty"`
}
// OracleBackend includes all necessary background APIs for oracle.
type OracleBackend interface {
HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error)
BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error)
GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error)
PendingBlockAndReceipts() (*types.Block, types.Receipts)
ChainConfig() *params.ChainConfig
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
}
// Oracle recommends gas prices based on the content of recent
// blocks. Suitable for both light and full clients.
type Oracle struct {
backend OracleBackend
lastHead common.Hash
lastPrice *big.Int
maxPrice *big.Int
ignorePrice *big.Int
cacheLock sync.RWMutex
fetchLock sync.Mutex
checkBlocks, percentile int
maxHeaderHistory, maxBlockHistory int
historyCache *lru.Cache[cacheKey, processedFees]
}
// NewOracle returns a new gasprice oracle which can recommend suitable
// gasprice for newly created transaction.
func NewOracle(backend OracleBackend, params Config) *Oracle {
blocks := params.Blocks
if blocks < 1 {
blocks = 1
log.Warn("Sanitizing invalid gasprice oracle sample blocks", "provided", params.Blocks, "updated", blocks)
}
percent := params.Percentile
if percent < 0 {
percent = 0
log.Warn("Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent)
} else if percent > 100 {
percent = 100
log.Warn("Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent)
}
maxPrice := params.MaxPrice
if maxPrice == nil || maxPrice.Int64() <= 0 {
maxPrice = DefaultMaxPrice
log.Warn("Sanitizing invalid gasprice oracle price cap", "provided", params.MaxPrice, "updated", maxPrice)
}
ignorePrice := params.IgnorePrice
if ignorePrice == nil || ignorePrice.Int64() <= 0 {
ignorePrice = DefaultIgnorePrice
log.Warn("Sanitizing invalid gasprice oracle ignore price", "provided", params.IgnorePrice, "updated", ignorePrice)
} else if ignorePrice.Int64() > 0 {
log.Info("Gasprice oracle is ignoring threshold set", "threshold", ignorePrice)
}
maxHeaderHistory := params.MaxHeaderHistory
if maxHeaderHistory < 1 {
maxHeaderHistory = 1
log.Warn("Sanitizing invalid gasprice oracle max header history", "provided", params.MaxHeaderHistory, "updated", maxHeaderHistory)
}
maxBlockHistory := params.MaxBlockHistory
if maxBlockHistory < 1 {
maxBlockHistory = 1
log.Warn("Sanitizing invalid gasprice oracle max block history", "provided", params.MaxBlockHistory, "updated", maxBlockHistory)
}
cache := lru.NewCache[cacheKey, processedFees](2048)
headEvent := make(chan core.ChainHeadEvent, 1)
backend.SubscribeChainHeadEvent(headEvent)
go func() {
var lastHead common.Hash
for ev := range headEvent {
if ev.Block.ParentHash() != lastHead {
cache.Purge()
}
lastHead = ev.Block.Hash()
}
}()
return &Oracle{
backend: backend,
lastPrice: params.Default,
maxPrice: maxPrice,
ignorePrice: ignorePrice,
checkBlocks: blocks,
percentile: percent,
maxHeaderHistory: maxHeaderHistory,
maxBlockHistory: maxBlockHistory,
historyCache: cache,
}
}
// SuggestTipCap returns a tip cap so that newly created transaction can have a
// very high chance to be included in the following blocks.
//
// Note, for legacy transactions and the legacy eth_gasPrice RPC call, it will be
// necessary to add the basefee to the returned number to fall back to the legacy
// behavior.
func (oracle *Oracle) SuggestTipCap(ctx context.Context) (*big.Int, error) {
head, _ := oracle.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
headHash := head.Hash()
// If the latest gasprice is still available, return it.
oracle.cacheLock.RLock()
lastHead, lastPrice := oracle.lastHead, oracle.lastPrice
oracle.cacheLock.RUnlock()
if headHash == lastHead {
return new(big.Int).Set(lastPrice), nil
}
oracle.fetchLock.Lock()
defer oracle.fetchLock.Unlock()
// Try checking the cache again, maybe the last fetch fetched what we need
oracle.cacheLock.RLock()
lastHead, lastPrice = oracle.lastHead, oracle.lastPrice
oracle.cacheLock.RUnlock()
if headHash == lastHead {
return new(big.Int).Set(lastPrice), nil
}
var (
sent, exp int
number = head.Number.Uint64()
result = make(chan results, oracle.checkBlocks)
quit = make(chan struct{})
results []*big.Int
)
for sent < oracle.checkBlocks && number > 0 {
go oracle.getBlockValues(ctx, types.MakeSigner(oracle.backend.ChainConfig(), big.NewInt(int64(number))), number, sampleNumber, oracle.ignorePrice, result, quit)
sent++
exp++
number--
}
for exp > 0 {
res := <-result
if res.err != nil {
close(quit)
return new(big.Int).Set(lastPrice), res.err
}
exp--
// Nothing returned. There are two special cases here:
// - The block is empty
// - All the transactions included are sent by the miner itself.
// In these cases, use the latest calculated price for sampling.
if len(res.values) == 0 {
res.values = []*big.Int{lastPrice}
}
// Besides, in order to collect enough data for sampling, if nothing
// meaningful returned, try to query more blocks. But the maximum
// is 2*checkBlocks.
if len(res.values) == 1 && len(results)+1+exp < oracle.checkBlocks*2 && number > 0 {
go oracle.getBlockValues(ctx, types.MakeSigner(oracle.backend.ChainConfig(), big.NewInt(int64(number))), number, sampleNumber, oracle.ignorePrice, result, quit)
sent++
exp++
number--
}
results = append(results, res.values...)
}
price := lastPrice
if len(results) > 0 {
sort.Sort(bigIntArray(results))
price = results[(len(results)-1)*oracle.percentile/100]
}
if price.Cmp(oracle.maxPrice) > 0 {
price = new(big.Int).Set(oracle.maxPrice)
}
oracle.cacheLock.Lock()
oracle.lastHead = headHash
oracle.lastPrice = price
oracle.cacheLock.Unlock()
return new(big.Int).Set(price), nil
}
type results struct {
values []*big.Int
err error
}
type txSorter struct {
txs []*types.Transaction
baseFee *big.Int
}
func newSorter(txs []*types.Transaction, baseFee *big.Int) *txSorter {
return &txSorter{
txs: txs,
baseFee: baseFee,
}
}
func (s *txSorter) Len() int { return len(s.txs) }
func (s *txSorter) Swap(i, j int) {
s.txs[i], s.txs[j] = s.txs[j], s.txs[i]
}
func (s *txSorter) Less(i, j int) bool {
// It's okay to discard the error because a tx would never be
// accepted into a block with an invalid effective tip.
tip1, _ := s.txs[i].EffectiveGasTip(s.baseFee)
tip2, _ := s.txs[j].EffectiveGasTip(s.baseFee)
return tip1.Cmp(tip2) < 0
}
// getBlockPrices calculates the lowest transaction gas price in a given block
// and sends it to the result channel. If the block is empty or all transactions
// are sent by the miner itself(it doesn't make any sense to include this kind of
// transaction prices for sampling), nil gasprice is returned.
func (oracle *Oracle) getBlockValues(ctx context.Context, signer types.Signer, blockNum uint64, limit int, ignoreUnder *big.Int, result chan results, quit chan struct{}) {
block, err := oracle.backend.BlockByNumber(ctx, rpc.BlockNumber(blockNum))
if block == nil {
select {
case result <- results{nil, err}:
case <-quit:
}
return
}
// Sort the transaction by effective tip in ascending sort.
txs := make([]*types.Transaction, len(block.Transactions()))
copy(txs, block.Transactions())
sorter := newSorter(txs, block.BaseFee())
sort.Sort(sorter)
var prices []*big.Int
for _, tx := range sorter.txs {
tip, _ := tx.EffectiveGasTip(block.BaseFee())
if ignoreUnder != nil && tip.Cmp(ignoreUnder) == -1 {
continue
}
sender, err := types.Sender(signer, tx)
if err == nil && sender != block.Coinbase() {
prices = append(prices, tip)
if len(prices) >= limit {
break
}
}
}
select {
case result <- results{prices, nil}:
case <-quit:
}
}
type bigIntArray []*big.Int
func (s bigIntArray) Len() int { return len(s) }
func (s bigIntArray) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 }
func (s bigIntArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
| eth/gasprice/gasprice.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.004848418291658163,
0.000534564140252769,
0.0001629906182643026,
0.00017376113100908697,
0.0009785254951566458
] |
{
"id": 3,
"code_window": [
"\t\tchainConfig = gen.Config\n",
"\t} else {\n",
"\t\tstatedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)\n",
"\t\tgenesisConfig = new(core.Genesis)\n",
"\t}\n",
"\tif ctx.String(SenderFlag.Name) != \"\" {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tsdb := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: preimages})\n",
"\t\tstatedb, _ = state.New(common.Hash{}, sdb, nil)\n"
],
"file_path": "cmd/evm/runner.go",
"type": "replace",
"edit_start_line_idx": 144
} | {
"genesis": {
"difficulty": "8430028481555",
"extraData": "0xd783010302844765746887676f312e352e31856c696e7578",
"gasLimit": "3141592",
"hash": "0xde66937783697293f2e529d2034887c531535d78afa8c9051511ae12ba48fbea",
"miner": "0x2a65aca4d5fc5b5c859090a6c34d164135398226",
"mixHash": "0xba28a43bfbca4a2effbb76bb70d03482a8a0c92e2883ff36cbac3d7c6dbb7df5",
"nonce": "0xa3827ec0a82fe823",
"number": "765824",
"stateRoot": "0x8d96cb027a29f8ca0ccd6d31f9ea0656136ec8030ecda70bb9231849ed6f41a2",
"timestamp": "1451389443",
"totalDifficulty": "4838314986494741271",
"alloc": {
"0xd1220a0cf47c7b9be7a2e6ba89f429762e7b9adb": {
"balance": "0x14203bee2ea6fbe8c",
"nonce": "34"
},
"0xe2fe6b13287f28e193333fdfe7fedf2f6df6124a": {
"balance": "0x2717a9c870a286f4350"
},
"0xf4eced2f682ce333f96f2d8966c613ded8fc95dd": {
"balance": "0x0",
"code": "0x606060405260e060020a600035046306fdde038114610047578063313ce567146100a457806370a08231146100b057806395d89b41146100c8578063a9059cbb14610123575b005b61015260008054602060026001831615610100026000190190921691909104601f810182900490910260809081016040526060828152929190828280156101f55780601f106101ca576101008083540402835291602001916101f5565b6101c060025460ff1681565b6101c060043560036020526000908152604090205481565b610152600180546020601f6002600019610100858716150201909316929092049182018190040260809081016040526060828152929190828280156101f55780601f106101ca576101008083540402835291602001916101f5565b610045600435602435600160a060020a033316600090815260036020526040902054819010156101fd57610002565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156101b25780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b6060908152602090f35b820191906000526020600020905b8154815290600101906020018083116101d857829003601f168201915b505050505081565b600160a060020a03821660009081526040902054808201101561021f57610002565b806003600050600033600160a060020a03168152602001908152602001600020600082828250540392505081905550806003600050600084600160a060020a0316815260200190815260200160002060008282825054019250508190555081600160a060020a031633600160a060020a03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a3505056",
"storage": {
"0x1dae8253445d3a5edbe8200da9fc39bc4f11db9362181dc1b640d08c3c2fb4d6": "0x0000000000000000000000000000000000000000000000000000000000000000",
"0x8ba52aac7f255d80a49abcf003d6af4752aba5a9531cae94fde7ac8d72191d67": "0x000000000000000000000000000000000000000000000000000000000178e460"
}
}
},
"config": {
"chainId": 1,
"homesteadBlock": 1150000,
"daoForkBlock": 1920000,
"daoForkSupport": true,
"eip150Block": 2463000,
"eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0",
"eip155Block": 2675000,
"eip158Block": 2675000,
"byzantiumBlock": 4370000,
"constantinopleBlock": 7280000,
"petersburgBlock": 7280000,
"istanbulBlock": 9069000,
"muirGlacierBlock": 9200000,
"berlinBlock": 12244000,
"londonBlock": 12965000,
"arrowGlacierBlock": 13773000,
"grayGlacierBlock": 15050000,
"terminalTotalDifficultyPassed": true,
"ethash": {}
}
},
"context": {
"number": "765825",
"difficulty": "8425912256743",
"timestamp": "1451389488",
"gasLimit": "3141592",
"miner": "0xe2fe6b13287f28e193333fdfe7fedf2f6df6124a"
},
"input": "0xf8aa22850ba43b740083024d4594f4eced2f682ce333f96f2d8966c613ded8fc95dd80b844a9059cbb000000000000000000000000dbf03b407c01e7cd3cbea99509d93f8dddc8c6fb00000000000000000000000000000000000000000000000000000000009896801ca067da548a2e0f381a957b9b51f086073375d6bfc7312cbc9540b3647ccab7db11a042c6e5b34bc7ba821e9c25b166fa13d82ad4b0d044d16174d5587d4f04ecfcd1",
"tracerConfig": {
"withLog": true
},
"result": {
"from": "0xd1220a0cf47c7b9be7a2e6ba89f429762e7b9adb",
"gas": "0x1f36d",
"gasUsed": "0xc6a5",
"to": "0xf4eced2f682ce333f96f2d8966c613ded8fc95dd",
"input": "0xa9059cbb000000000000000000000000dbf03b407c01e7cd3cbea99509d93f8dddc8c6fb0000000000000000000000000000000000000000000000000000000000989680",
"logs": [
{
"address": "0xf4eced2f682ce333f96f2d8966c613ded8fc95dd",
"topics": [
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
"0x000000000000000000000000d1220a0cf47c7b9be7a2e6ba89f429762e7b9adb",
"0x000000000000000000000000dbf03b407c01e7cd3cbea99509d93f8dddc8c6fb"
],
"data": "0x0000000000000000000000000000000000000000000000000000000000989680"
}
],
"value": "0x0",
"type": "CALL"
}
}
| eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.019061662256717682,
0.0022747390903532505,
0.00016337931447196752,
0.00016781587328296155,
0.005935113877058029
] |
{
"id": 4,
"code_window": [
"\t\"github.com/ethereum/go-ethereum/core/vm\"\n",
"\t\"github.com/ethereum/go-ethereum/crypto\"\n",
"\t\"github.com/ethereum/go-ethereum/ethdb\"\n",
"\t\"github.com/ethereum/go-ethereum/params\"\n",
"\t\"github.com/ethereum/go-ethereum/rlp\"\n",
"\t\"golang.org/x/crypto/sha3\"\n",
")\n",
"\n",
"// StateTest checks transaction processing without block context.\n",
"// See https://github.com/ethereum/EIPs/issues/176 for the test format specification.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/ethereum/go-ethereum/trie\"\n"
],
"file_path": "tests/state_test_util.go",
"type": "add",
"edit_start_line_idx": 39
} | // Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"math/big"
"os"
goruntime "runtime"
"runtime/pprof"
"testing"
"time"
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm/runtime"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2"
)
var runCommand = &cli.Command{
Action: runCmd,
Name: "run",
Usage: "run arbitrary evm binary",
ArgsUsage: "<code>",
Description: `The run command runs arbitrary EVM code.`,
}
// readGenesis will read the given JSON format genesis file and return
// the initialized Genesis structure
func readGenesis(genesisPath string) *core.Genesis {
// Make sure we have a valid genesis JSON
//genesisPath := ctx.Args().First()
if len(genesisPath) == 0 {
utils.Fatalf("Must supply path to genesis JSON file")
}
file, err := os.Open(genesisPath)
if err != nil {
utils.Fatalf("Failed to read genesis file: %v", err)
}
defer file.Close()
genesis := new(core.Genesis)
if err := json.NewDecoder(file).Decode(genesis); err != nil {
utils.Fatalf("invalid genesis file: %v", err)
}
return genesis
}
type execStats struct {
time time.Duration // The execution time.
allocs int64 // The number of heap allocations during execution.
bytesAllocated int64 // The cumulative number of bytes allocated during execution.
}
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []byte, gasLeft uint64, stats execStats, err error) {
if bench {
result := testing.Benchmark(func(b *testing.B) {
for i := 0; i < b.N; i++ {
output, gasLeft, err = execFunc()
}
})
// Get the average execution time from the benchmarking result.
// There are other useful stats here that could be reported.
stats.time = time.Duration(result.NsPerOp())
stats.allocs = result.AllocsPerOp()
stats.bytesAllocated = result.AllocedBytesPerOp()
} else {
var memStatsBefore, memStatsAfter goruntime.MemStats
goruntime.ReadMemStats(&memStatsBefore)
startTime := time.Now()
output, gasLeft, err = execFunc()
stats.time = time.Since(startTime)
goruntime.ReadMemStats(&memStatsAfter)
stats.allocs = int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs)
stats.bytesAllocated = int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc)
}
return output, gasLeft, stats, err
}
func runCmd(ctx *cli.Context) error {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
logconfig := &logger.Config{
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.Bool(DisableStackFlag.Name),
DisableStorage: ctx.Bool(DisableStorageFlag.Name),
EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
Debug: ctx.Bool(DebugFlag.Name),
}
var (
tracer vm.EVMLogger
debugLogger *logger.StructLogger
statedb *state.StateDB
chainConfig *params.ChainConfig
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
genesisConfig *core.Genesis
)
if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.Bool(DebugFlag.Name) {
debugLogger = logger.NewStructLogger(logconfig)
tracer = debugLogger
} else {
debugLogger = logger.NewStructLogger(logconfig)
}
if ctx.String(GenesisFlag.Name) != "" {
gen := readGenesis(ctx.String(GenesisFlag.Name))
genesisConfig = gen
db := rawdb.NewMemoryDatabase()
genesis := gen.MustCommit(db)
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)
chainConfig = gen.Config
} else {
statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
genesisConfig = new(core.Genesis)
}
if ctx.String(SenderFlag.Name) != "" {
sender = common.HexToAddress(ctx.String(SenderFlag.Name))
}
statedb.CreateAccount(sender)
if ctx.String(ReceiverFlag.Name) != "" {
receiver = common.HexToAddress(ctx.String(ReceiverFlag.Name))
}
var code []byte
codeFileFlag := ctx.String(CodeFileFlag.Name)
codeFlag := ctx.String(CodeFlag.Name)
// The '--code' or '--codefile' flag overrides code in state
if codeFileFlag != "" || codeFlag != "" {
var hexcode []byte
if codeFileFlag != "" {
var err error
// If - is specified, it means that code comes from stdin
if codeFileFlag == "-" {
//Try reading from stdin
if hexcode, err = io.ReadAll(os.Stdin); err != nil {
fmt.Printf("Could not load code from stdin: %v\n", err)
os.Exit(1)
}
} else {
// Codefile with hex assembly
if hexcode, err = os.ReadFile(codeFileFlag); err != nil {
fmt.Printf("Could not load code from file: %v\n", err)
os.Exit(1)
}
}
} else {
hexcode = []byte(codeFlag)
}
hexcode = bytes.TrimSpace(hexcode)
if len(hexcode)%2 != 0 {
fmt.Printf("Invalid input length for hex data (%d)\n", len(hexcode))
os.Exit(1)
}
code = common.FromHex(string(hexcode))
} else if fn := ctx.Args().First(); len(fn) > 0 {
// EASM-file to compile
src, err := os.ReadFile(fn)
if err != nil {
return err
}
bin, err := compiler.Compile(fn, src, false)
if err != nil {
return err
}
code = common.Hex2Bytes(bin)
}
initialGas := ctx.Uint64(GasFlag.Name)
if genesisConfig.GasLimit != 0 {
initialGas = genesisConfig.GasLimit
}
runtimeConfig := runtime.Config{
Origin: sender,
State: statedb,
GasLimit: initialGas,
GasPrice: flags.GlobalBig(ctx, PriceFlag.Name),
Value: flags.GlobalBig(ctx, ValueFlag.Name),
Difficulty: genesisConfig.Difficulty,
Time: genesisConfig.Timestamp,
Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
EVMConfig: vm.Config{
Tracer: tracer,
Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name),
},
}
if cpuProfilePath := ctx.String(CPUProfileFlag.Name); cpuProfilePath != "" {
f, err := os.Create(cpuProfilePath)
if err != nil {
fmt.Println("could not create CPU profile: ", err)
os.Exit(1)
}
if err := pprof.StartCPUProfile(f); err != nil {
fmt.Println("could not start CPU profile: ", err)
os.Exit(1)
}
defer pprof.StopCPUProfile()
}
if chainConfig != nil {
runtimeConfig.ChainConfig = chainConfig
} else {
runtimeConfig.ChainConfig = params.AllEthashProtocolChanges
}
var hexInput []byte
if inputFileFlag := ctx.String(InputFileFlag.Name); inputFileFlag != "" {
var err error
if hexInput, err = os.ReadFile(inputFileFlag); err != nil {
fmt.Printf("could not load input from file: %v\n", err)
os.Exit(1)
}
} else {
hexInput = []byte(ctx.String(InputFlag.Name))
}
hexInput = bytes.TrimSpace(hexInput)
if len(hexInput)%2 != 0 {
fmt.Println("input length must be even")
os.Exit(1)
}
input := common.FromHex(string(hexInput))
var execFunc func() ([]byte, uint64, error)
if ctx.Bool(CreateFlag.Name) {
input = append(code, input...)
execFunc = func() ([]byte, uint64, error) {
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
return output, gasLeft, err
}
} else {
if len(code) > 0 {
statedb.SetCode(receiver, code)
}
execFunc = func() ([]byte, uint64, error) {
return runtime.Call(receiver, input, &runtimeConfig)
}
}
bench := ctx.Bool(BenchFlag.Name)
output, leftOverGas, stats, err := timedExec(bench, execFunc)
if ctx.Bool(DumpFlag.Name) {
statedb.Commit(true)
statedb.IntermediateRoot(true)
fmt.Println(string(statedb.Dump(nil)))
}
if memProfilePath := ctx.String(MemProfileFlag.Name); memProfilePath != "" {
f, err := os.Create(memProfilePath)
if err != nil {
fmt.Println("could not create memory profile: ", err)
os.Exit(1)
}
if err := pprof.WriteHeapProfile(f); err != nil {
fmt.Println("could not write memory profile: ", err)
os.Exit(1)
}
f.Close()
}
if ctx.Bool(DebugFlag.Name) {
if debugLogger != nil {
fmt.Fprintln(os.Stderr, "#### TRACE ####")
logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
}
fmt.Fprintln(os.Stderr, "#### LOGS ####")
logger.WriteLogs(os.Stderr, statedb.Logs())
}
if bench || ctx.Bool(StatDumpFlag.Name) {
fmt.Fprintf(os.Stderr, `EVM gas used: %d
execution time: %v
allocations: %d
allocated bytes: %d
`, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated)
}
if tracer == nil {
fmt.Printf("%#x\n", output)
if err != nil {
fmt.Printf(" error: %v\n", err)
}
}
return nil
}
| cmd/evm/runner.go | 1 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.011777170933783054,
0.0006731012836098671,
0.00016199189121834934,
0.0001709037460386753,
0.0021407802123576403
] |
{
"id": 4,
"code_window": [
"\t\"github.com/ethereum/go-ethereum/core/vm\"\n",
"\t\"github.com/ethereum/go-ethereum/crypto\"\n",
"\t\"github.com/ethereum/go-ethereum/ethdb\"\n",
"\t\"github.com/ethereum/go-ethereum/params\"\n",
"\t\"github.com/ethereum/go-ethereum/rlp\"\n",
"\t\"golang.org/x/crypto/sha3\"\n",
")\n",
"\n",
"// StateTest checks transaction processing without block context.\n",
"// See https://github.com/ethereum/EIPs/issues/176 for the test format specification.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/ethereum/go-ethereum/trie\"\n"
],
"file_path": "tests/state_test_util.go",
"type": "add",
"edit_start_line_idx": 39
} | // Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
import (
"fmt"
)
// StorageSize is a wrapper around a float value that supports user friendly
// formatting.
type StorageSize float64
// String implements the stringer interface.
func (s StorageSize) String() string {
if s > 1099511627776 {
return fmt.Sprintf("%.2f TiB", s/1099511627776)
} else if s > 1073741824 {
return fmt.Sprintf("%.2f GiB", s/1073741824)
} else if s > 1048576 {
return fmt.Sprintf("%.2f MiB", s/1048576)
} else if s > 1024 {
return fmt.Sprintf("%.2f KiB", s/1024)
} else {
return fmt.Sprintf("%.2f B", s)
}
}
// TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging.
func (s StorageSize) TerminalString() string {
if s > 1099511627776 {
return fmt.Sprintf("%.2fTiB", s/1099511627776)
} else if s > 1073741824 {
return fmt.Sprintf("%.2fGiB", s/1073741824)
} else if s > 1048576 {
return fmt.Sprintf("%.2fMiB", s/1048576)
} else if s > 1024 {
return fmt.Sprintf("%.2fKiB", s/1024)
} else {
return fmt.Sprintf("%.2fB", s)
}
}
| common/size.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.0001741041778586805,
0.00016626459546387196,
0.00016115716425701976,
0.00016361444431822747,
0.000005171134034753777
] |
{
"id": 4,
"code_window": [
"\t\"github.com/ethereum/go-ethereum/core/vm\"\n",
"\t\"github.com/ethereum/go-ethereum/crypto\"\n",
"\t\"github.com/ethereum/go-ethereum/ethdb\"\n",
"\t\"github.com/ethereum/go-ethereum/params\"\n",
"\t\"github.com/ethereum/go-ethereum/rlp\"\n",
"\t\"golang.org/x/crypto/sha3\"\n",
")\n",
"\n",
"// StateTest checks transaction processing without block context.\n",
"// See https://github.com/ethereum/EIPs/issues/176 for the test format specification.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/ethereum/go-ethereum/trie\"\n"
],
"file_path": "tests/state_test_util.go",
"type": "add",
"edit_start_line_idx": 39
} | // Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"os"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/urfave/cli/v2"
)
const (
defaultKeyfileName = "keyfile.json"
)
var app *cli.App
func init() {
app = flags.NewApp("Ethereum key manager")
app.Commands = []*cli.Command{
commandGenerate,
commandInspect,
commandChangePassphrase,
commandSignMessage,
commandVerifyMessage,
}
}
// Commonly used command line flags.
var (
passphraseFlag = &cli.StringFlag{
Name: "passwordfile",
Usage: "the file that contains the password for the keyfile",
}
jsonFlag = &cli.BoolFlag{
Name: "json",
Usage: "output JSON instead of human-readable format",
}
)
func main() {
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
| cmd/ethkey/main.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.001416990882717073,
0.0003479650185909122,
0.00016310893988702446,
0.00016785203479230404,
0.0004364771011751145
] |
{
"id": 4,
"code_window": [
"\t\"github.com/ethereum/go-ethereum/core/vm\"\n",
"\t\"github.com/ethereum/go-ethereum/crypto\"\n",
"\t\"github.com/ethereum/go-ethereum/ethdb\"\n",
"\t\"github.com/ethereum/go-ethereum/params\"\n",
"\t\"github.com/ethereum/go-ethereum/rlp\"\n",
"\t\"golang.org/x/crypto/sha3\"\n",
")\n",
"\n",
"// StateTest checks transaction processing without block context.\n",
"// See https://github.com/ethereum/EIPs/issues/176 for the test format specification.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/ethereum/go-ethereum/trie\"\n"
],
"file_path": "tests/state_test_util.go",
"type": "add",
"edit_start_line_idx": 39
} | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package forkid
import (
"bytes"
"math"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
// TestCreation tests that different genesis and fork rule combinations result in
// the correct fork ID.
func TestCreation(t *testing.T) {
type testcase struct {
head uint64
time uint64
want ID
}
tests := []struct {
config *params.ChainConfig
genesis common.Hash
cases []testcase
}{
// Mainnet test cases
{
params.MainnetChainConfig,
params.MainnetGenesisHash,
[]testcase{
{0, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced
{1149999, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block
{1150000, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block
{1919999, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block
{1920000, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block
{2462999, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block
{2463000, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block
{2674999, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block
{2675000, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block
{4369999, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block
{4370000, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block
{7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
{7280000, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
{9068999, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
{9069000, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block
{9199999, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block
{9200000, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block
{12243999, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block
{12244000, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block
{12964999, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
{12965000, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
{13772999, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
{13773000, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // First Arrow Glacier block
{15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block
{15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // First Gray Glacier block
{20000000, 1681338454, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last Gray Glacier block
{20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // First Shanghai block
{30000000, 2000000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // Future Shanghai block
},
},
// Rinkeby test cases
{
params.RinkebyChainConfig,
params.RinkebyGenesisHash,
[]testcase{
{0, 0, ID{Hash: checksumToBytes(0x3b8e0691), Next: 1}}, // Unsynced, last Frontier block
{1, 0, ID{Hash: checksumToBytes(0x60949295), Next: 2}}, // First and last Homestead block
{2, 0, ID{Hash: checksumToBytes(0x8bde40dd), Next: 3}}, // First and last Tangerine block
{3, 0, ID{Hash: checksumToBytes(0xcb3a64bb), Next: 1035301}}, // First Spurious block
{1035300, 0, ID{Hash: checksumToBytes(0xcb3a64bb), Next: 1035301}}, // Last Spurious block
{1035301, 0, ID{Hash: checksumToBytes(0x8d748b57), Next: 3660663}}, // First Byzantium block
{3660662, 0, ID{Hash: checksumToBytes(0x8d748b57), Next: 3660663}}, // Last Byzantium block
{3660663, 0, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // First Constantinople block
{4321233, 0, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // Last Constantinople block
{4321234, 0, ID{Hash: checksumToBytes(0xafec6b27), Next: 5435345}}, // First Petersburg block
{5435344, 0, ID{Hash: checksumToBytes(0xafec6b27), Next: 5435345}}, // Last Petersburg block
{5435345, 0, ID{Hash: checksumToBytes(0xcbdb8838), Next: 8290928}}, // First Istanbul block
{8290927, 0, ID{Hash: checksumToBytes(0xcbdb8838), Next: 8290928}}, // Last Istanbul block
{8290928, 0, ID{Hash: checksumToBytes(0x6910c8bd), Next: 8897988}}, // First Berlin block
{8897987, 0, ID{Hash: checksumToBytes(0x6910c8bd), Next: 8897988}}, // Last Berlin block
{8897988, 0, ID{Hash: checksumToBytes(0x8E29F2F3), Next: 0}}, // First London block
{10000000, 0, ID{Hash: checksumToBytes(0x8E29F2F3), Next: 0}}, // Future London block
},
},
// Goerli test cases
{
params.GoerliChainConfig,
params.GoerliGenesisHash,
[]testcase{
{0, 0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople and first Petersburg block
{1561650, 0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Last Petersburg block
{1561651, 0, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // First Istanbul block
{4460643, 0, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // Last Istanbul block
{4460644, 0, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // First Berlin block
{5000000, 0, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // Last Berlin block
{5062605, 0, ID{Hash: checksumToBytes(0xB8C6299D), Next: 1678832736}}, // First London block
{6000000, 1678832735, ID{Hash: checksumToBytes(0xB8C6299D), Next: 1678832736}}, // Last London block
{6000001, 1678832736, ID{Hash: checksumToBytes(0xf9843abf), Next: 0}}, // First Shanghai block
{6500000, 2678832736, ID{Hash: checksumToBytes(0xf9843abf), Next: 0}}, // Future Shanghai block
},
},
// Sepolia test cases
{
params.SepoliaChainConfig,
params.SepoliaGenesisHash,
[]testcase{
{0, 0, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin and first London block
{1735370, 0, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Last London block
{1735371, 0, ID{Hash: checksumToBytes(0xb96cbd13), Next: 1677557088}}, // First MergeNetsplit block
{1735372, 1677557087, ID{Hash: checksumToBytes(0xb96cbd13), Next: 1677557088}}, // Last MergeNetsplit block
{1735372, 1677557088, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 0}}, // First Shanghai block
},
},
}
for i, tt := range tests {
for j, ttt := range tt.cases {
if have := NewID(tt.config, tt.genesis, ttt.head, ttt.time); have != ttt.want {
t.Errorf("test %d, case %d: fork ID mismatch: have %x, want %x", i, j, have, ttt.want)
}
}
}
}
// TestValidation tests that a local peer correctly validates and accepts a remote
// fork ID.
func TestValidation(t *testing.T) {
// Config that has not timestamp enabled
legacyConfig := *params.MainnetChainConfig
legacyConfig.ShanghaiTime = nil
tests := []struct {
config *params.ChainConfig
head uint64
time uint64
id ID
err error
}{
//------------------
// Block based tests
//------------------
// Local is mainnet Gray Glacier, remote announces the same. No future fork is announced.
{&legacyConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, nil},
// Local is mainnet Gray Glacier, remote announces the same. Remote also announces a next fork
// at block 0xffffffff, but that is uncertain.
{&legacyConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: math.MaxUint64}, nil},
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
// also Byzantium, but it's not yet aware of Petersburg (e.g. non updated node before the fork).
// In this case we don't know if Petersburg passed yet or not.
{&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil},
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
// also Byzantium, and it's also aware of Petersburg (e.g. updated node before the fork). We
// don't know if Petersburg passed yet (will pass) or not.
{&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
// also Byzantium, and it's also aware of some random fork (e.g. misconfigured Petersburg). As
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
{&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: math.MaxUint64}, nil},
// Local is mainnet exactly on Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote
// is simply out of sync, accept.
{&legacyConfig, 7280000, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
// Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote
// is simply out of sync, accept.
{&legacyConfig, 7987396, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
// Local is mainnet Petersburg, remote announces Spurious + knowledge about Byzantium. Remote
// is definitely out of sync. It may or may not need the Petersburg update, we don't know yet.
{&legacyConfig, 7987396, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
// Local is mainnet Byzantium, remote announces Petersburg. Local is out of sync, accept.
{&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 0}, nil},
// Local is mainnet Spurious, remote announces Byzantium, but is not aware of Petersburg. Local
// out of sync. Local also knows about a future fork, but that is uncertain yet.
{&legacyConfig, 4369999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil},
// Local is mainnet Petersburg. remote announces Byzantium but is not aware of further forks.
// Remote needs software update.
{&legacyConfig, 7987396, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, ErrRemoteStale},
// Local is mainnet Petersburg, and isn't aware of more forks. Remote announces Petersburg +
// 0xffffffff. Local needs software update, reject.
{&legacyConfig, 7987396, 0, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet Byzantium, and is aware of Petersburg. Remote announces Petersburg +
// 0xffffffff. Local needs software update, reject.
{&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet Petersburg, remote is Rinkeby Petersburg.
{&legacyConfig, 7987396, 0, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet Gray Glacier, far in the future. Remote announces Gopherium (non existing fork)
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
//
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
//
// TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config
{&legacyConfig, 88888888, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrLocalIncompatibleOrStale},
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
// fork) at block 7279999, before Petersburg. Local is incompatible.
//
// TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config
{&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
//------------------------------------
// Block to timestamp transition tests
//------------------------------------
// Local is mainnet currently in Gray Glacier only (so it's aware of Shanghai), remote announces
// also Gray Glacier, but it's not yet aware of Shanghai (e.g. non updated node before the fork).
// In this case we don't know if Shanghai passed yet or not.
{params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, nil},
// Local is mainnet currently in Gray Glacier only (so it's aware of Shanghai), remote announces
// also Gray Glacier, and it's also aware of Shanghai (e.g. updated node before the fork). We
// don't know if Shanghai passed yet (will pass) or not.
{params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}, nil},
// Local is mainnet currently in Gray Glacier only (so it's aware of Shanghai), remote announces
// also Gray Glacier, and it's also aware of some random fork (e.g. misconfigured Shanghai). As
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
{params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: math.MaxUint64}, nil},
// Local is mainnet exactly on Shanghai, remote announces Gray Glacier + knowledge about Shanghai. Remote
// is simply out of sync, accept.
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}, nil},
// Local is mainnet Shanghai, remote announces Gray Glacier + knowledge about Shanghai. Remote
// is simply out of sync, accept.
{params.MainnetChainConfig, 20123456, 1681338456, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}, nil},
// Local is mainnet Shanghai, remote announces Arrow Glacier + knowledge about Gray Glacier. Remote
// is definitely out of sync. It may or may not need the Shanghai update, we don't know yet.
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}, nil},
// Local is mainnet Gray Glacier, remote announces Shanghai. Local is out of sync, accept.
{params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}, nil},
// Local is mainnet Arrow Glacier, remote announces Gray Glacier, but is not aware of Shanghai. Local
// out of sync. Local also knows about a future fork, but that is uncertain yet.
{params.MainnetChainConfig, 13773000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, nil},
// Local is mainnet Shanghai. remote announces Gray Glacier but is not aware of further forks.
// Remote needs software update.
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, ErrRemoteStale},
// Local is mainnet Gray Glacier, and isn't aware of more forks. Remote announces Gray Glacier +
// 0xffffffff. Local needs software update, reject.
{params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(checksumUpdate(0xf0afd0e3, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet Gray Glacier, and is aware of Shanghai. Remote announces Shanghai +
// 0xffffffff. Local needs software update, reject.
{params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(checksumUpdate(0xdce96c2d, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet Gray Glacier, far in the future. Remote announces Gopherium (non existing fork)
// at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible.
//
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
{params.MainnetChainConfig, 888888888, 1660000000, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1660000000}, ErrLocalIncompatibleOrStale},
// Local is mainnet Gray Glacier. Remote is also in Gray Glacier, but announces Gopherium (non existing
// fork) at block 7279999, before Shanghai. Local is incompatible.
{params.MainnetChainConfig, 19999999, 1667999999, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1667999999}, ErrLocalIncompatibleOrStale},
//----------------------
// Timestamp based tests
//----------------------
// Local is mainnet Shanghai, remote announces the same. No future fork is announced.
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}, nil},
// Local is mainnet Shanghai, remote announces the same. Remote also announces a next fork
// at time 0xffffffff, but that is uncertain.
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: math.MaxUint64}, nil},
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
// also Shanghai, but it's not yet aware of Cancun (e.g. non updated node before the fork).
// In this case we don't know if Cancun passed yet or not.
//
// TODO(karalabe): Enable this when Cancun is specced
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, nil},
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
// also Shanghai, and it's also aware of Cancun (e.g. updated node before the fork). We
// don't know if Cancun passed yet (will pass) or not.
//
// TODO(karalabe): Enable this when Cancun is specced and update next timestamp
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
// also Shanghai, and it's also aware of some random fork (e.g. misconfigured Cancun). As
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
//
// TODO(karalabe): Enable this when Cancun is specced
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: math.MaxUint64}, nil},
// Local is mainnet exactly on Cancun, remote announces Shanghai + knowledge about Cancun. Remote
// is simply out of sync, accept.
//
// TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp
// {params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
// Local is mainnet Cancun, remote announces Shanghai + knowledge about Cancun. Remote
// is simply out of sync, accept.
// TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp
//{params.MainnetChainConfig, 21123456, 1678123456, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
// Local is mainnet Prague, remote announces Shanghai + knowledge about Cancun. Remote
// is definitely out of sync. It may or may not need the Prague update, we don't know yet.
//
// TODO(karalabe): Enable this when Cancun **and** Prague is specced, update all the numbers
//{params.MainnetChainConfig, 0, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
// Local is mainnet Shanghai, remote announces Cancun. Local is out of sync, accept.
//
// TODO(karalabe): Enable this when Cancun is specced, update remote checksum
//{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x00000000), Next: 0}, nil},
// Local is mainnet Shanghai, remote announces Cancun, but is not aware of Prague. Local
// out of sync. Local also knows about a future fork, but that is uncertain yet.
//
// TODO(karalabe): Enable this when Cancun **and** Prague is specced, update remote checksum
//{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x00000000), Next: 0}, nil},
// Local is mainnet Cancun. remote announces Shanghai but is not aware of further forks.
// Remote needs software update.
//
// TODO(karalabe): Enable this when Cancun is specced, update local head and time
//{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, ErrRemoteStale},
// Local is mainnet Shanghai, and isn't aware of more forks. Remote announces Shanghai +
// 0xffffffff. Local needs software update, reject.
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(checksumUpdate(0xdce96c2d, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet Shanghai, and is aware of Cancun. Remote announces Cancun +
// 0xffffffff. Local needs software update, reject.
//
// TODO(karalabe): Enable this when Cancun is specced, update remote checksum
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x00000000, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet Shanghai, remote is random Shanghai.
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet Shanghai, far in the future. Remote announces Gopherium (non existing fork)
// at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible.
//
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
{params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0xdce96c2d), Next: 8888888888}, ErrLocalIncompatibleOrStale},
// Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing
// fork) at timestamp 1668000000, before Cancun. Local is incompatible.
//
// TODO(karalabe): Enable this when Cancun is specced
//{params.MainnetChainConfig, 20999999, 1677999999, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, ErrLocalIncompatibleOrStale},
}
for i, tt := range tests {
filter := newFilter(tt.config, params.MainnetGenesisHash, func() (uint64, uint64) { return tt.head, tt.time })
if err := filter(tt.id); err != tt.err {
t.Errorf("test %d: validation error mismatch: have %v, want %v", i, err, tt.err)
}
}
}
// Tests that IDs are properly RLP encoded (specifically important because we
// use uint32 to store the hash, but we need to encode it as [4]byte).
func TestEncoding(t *testing.T) {
tests := []struct {
id ID
want []byte
}{
{ID{Hash: checksumToBytes(0), Next: 0}, common.Hex2Bytes("c6840000000080")},
{ID{Hash: checksumToBytes(0xdeadbeef), Next: 0xBADDCAFE}, common.Hex2Bytes("ca84deadbeef84baddcafe,")},
{ID{Hash: checksumToBytes(math.MaxUint32), Next: math.MaxUint64}, common.Hex2Bytes("ce84ffffffff88ffffffffffffffff")},
}
for i, tt := range tests {
have, err := rlp.EncodeToBytes(tt.id)
if err != nil {
t.Errorf("test %d: failed to encode forkid: %v", i, err)
continue
}
if !bytes.Equal(have, tt.want) {
t.Errorf("test %d: RLP mismatch: have %x, want %x", i, have, tt.want)
}
}
}
| core/forkid/forkid_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.0015848783077672124,
0.00020266705541871488,
0.0001621516712475568,
0.00016731720825191587,
0.00021857774117961526
] |
{
"id": 5,
"code_window": [
"func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {\n",
"\treturn t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]\n",
"}\n",
"\n",
"func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) {\n",
"\tsdb := state.NewDatabase(db)\n",
"\tstatedb, _ := state.New(common.Hash{}, sdb, nil)\n",
"\tfor addr, a := range accounts {\n",
"\t\tstatedb.SetCode(addr, a.Code)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tsdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true})\n"
],
"file_path": "tests/state_test_util.go",
"type": "replace",
"edit_start_line_idx": 286
} | // Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"math/big"
"os"
goruntime "runtime"
"runtime/pprof"
"testing"
"time"
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm/runtime"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2"
)
var runCommand = &cli.Command{
Action: runCmd,
Name: "run",
Usage: "run arbitrary evm binary",
ArgsUsage: "<code>",
Description: `The run command runs arbitrary EVM code.`,
}
// readGenesis will read the given JSON format genesis file and return
// the initialized Genesis structure
func readGenesis(genesisPath string) *core.Genesis {
// Make sure we have a valid genesis JSON
//genesisPath := ctx.Args().First()
if len(genesisPath) == 0 {
utils.Fatalf("Must supply path to genesis JSON file")
}
file, err := os.Open(genesisPath)
if err != nil {
utils.Fatalf("Failed to read genesis file: %v", err)
}
defer file.Close()
genesis := new(core.Genesis)
if err := json.NewDecoder(file).Decode(genesis); err != nil {
utils.Fatalf("invalid genesis file: %v", err)
}
return genesis
}
type execStats struct {
time time.Duration // The execution time.
allocs int64 // The number of heap allocations during execution.
bytesAllocated int64 // The cumulative number of bytes allocated during execution.
}
func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []byte, gasLeft uint64, stats execStats, err error) {
if bench {
result := testing.Benchmark(func(b *testing.B) {
for i := 0; i < b.N; i++ {
output, gasLeft, err = execFunc()
}
})
// Get the average execution time from the benchmarking result.
// There are other useful stats here that could be reported.
stats.time = time.Duration(result.NsPerOp())
stats.allocs = result.AllocsPerOp()
stats.bytesAllocated = result.AllocedBytesPerOp()
} else {
var memStatsBefore, memStatsAfter goruntime.MemStats
goruntime.ReadMemStats(&memStatsBefore)
startTime := time.Now()
output, gasLeft, err = execFunc()
stats.time = time.Since(startTime)
goruntime.ReadMemStats(&memStatsAfter)
stats.allocs = int64(memStatsAfter.Mallocs - memStatsBefore.Mallocs)
stats.bytesAllocated = int64(memStatsAfter.TotalAlloc - memStatsBefore.TotalAlloc)
}
return output, gasLeft, stats, err
}
func runCmd(ctx *cli.Context) error {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
logconfig := &logger.Config{
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.Bool(DisableStackFlag.Name),
DisableStorage: ctx.Bool(DisableStorageFlag.Name),
EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
Debug: ctx.Bool(DebugFlag.Name),
}
var (
tracer vm.EVMLogger
debugLogger *logger.StructLogger
statedb *state.StateDB
chainConfig *params.ChainConfig
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
genesisConfig *core.Genesis
)
if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.Bool(DebugFlag.Name) {
debugLogger = logger.NewStructLogger(logconfig)
tracer = debugLogger
} else {
debugLogger = logger.NewStructLogger(logconfig)
}
if ctx.String(GenesisFlag.Name) != "" {
gen := readGenesis(ctx.String(GenesisFlag.Name))
genesisConfig = gen
db := rawdb.NewMemoryDatabase()
genesis := gen.MustCommit(db)
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)
chainConfig = gen.Config
} else {
statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
genesisConfig = new(core.Genesis)
}
if ctx.String(SenderFlag.Name) != "" {
sender = common.HexToAddress(ctx.String(SenderFlag.Name))
}
statedb.CreateAccount(sender)
if ctx.String(ReceiverFlag.Name) != "" {
receiver = common.HexToAddress(ctx.String(ReceiverFlag.Name))
}
var code []byte
codeFileFlag := ctx.String(CodeFileFlag.Name)
codeFlag := ctx.String(CodeFlag.Name)
// The '--code' or '--codefile' flag overrides code in state
if codeFileFlag != "" || codeFlag != "" {
var hexcode []byte
if codeFileFlag != "" {
var err error
// If - is specified, it means that code comes from stdin
if codeFileFlag == "-" {
//Try reading from stdin
if hexcode, err = io.ReadAll(os.Stdin); err != nil {
fmt.Printf("Could not load code from stdin: %v\n", err)
os.Exit(1)
}
} else {
// Codefile with hex assembly
if hexcode, err = os.ReadFile(codeFileFlag); err != nil {
fmt.Printf("Could not load code from file: %v\n", err)
os.Exit(1)
}
}
} else {
hexcode = []byte(codeFlag)
}
hexcode = bytes.TrimSpace(hexcode)
if len(hexcode)%2 != 0 {
fmt.Printf("Invalid input length for hex data (%d)\n", len(hexcode))
os.Exit(1)
}
code = common.FromHex(string(hexcode))
} else if fn := ctx.Args().First(); len(fn) > 0 {
// EASM-file to compile
src, err := os.ReadFile(fn)
if err != nil {
return err
}
bin, err := compiler.Compile(fn, src, false)
if err != nil {
return err
}
code = common.Hex2Bytes(bin)
}
initialGas := ctx.Uint64(GasFlag.Name)
if genesisConfig.GasLimit != 0 {
initialGas = genesisConfig.GasLimit
}
runtimeConfig := runtime.Config{
Origin: sender,
State: statedb,
GasLimit: initialGas,
GasPrice: flags.GlobalBig(ctx, PriceFlag.Name),
Value: flags.GlobalBig(ctx, ValueFlag.Name),
Difficulty: genesisConfig.Difficulty,
Time: genesisConfig.Timestamp,
Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
EVMConfig: vm.Config{
Tracer: tracer,
Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name),
},
}
if cpuProfilePath := ctx.String(CPUProfileFlag.Name); cpuProfilePath != "" {
f, err := os.Create(cpuProfilePath)
if err != nil {
fmt.Println("could not create CPU profile: ", err)
os.Exit(1)
}
if err := pprof.StartCPUProfile(f); err != nil {
fmt.Println("could not start CPU profile: ", err)
os.Exit(1)
}
defer pprof.StopCPUProfile()
}
if chainConfig != nil {
runtimeConfig.ChainConfig = chainConfig
} else {
runtimeConfig.ChainConfig = params.AllEthashProtocolChanges
}
var hexInput []byte
if inputFileFlag := ctx.String(InputFileFlag.Name); inputFileFlag != "" {
var err error
if hexInput, err = os.ReadFile(inputFileFlag); err != nil {
fmt.Printf("could not load input from file: %v\n", err)
os.Exit(1)
}
} else {
hexInput = []byte(ctx.String(InputFlag.Name))
}
hexInput = bytes.TrimSpace(hexInput)
if len(hexInput)%2 != 0 {
fmt.Println("input length must be even")
os.Exit(1)
}
input := common.FromHex(string(hexInput))
var execFunc func() ([]byte, uint64, error)
if ctx.Bool(CreateFlag.Name) {
input = append(code, input...)
execFunc = func() ([]byte, uint64, error) {
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
return output, gasLeft, err
}
} else {
if len(code) > 0 {
statedb.SetCode(receiver, code)
}
execFunc = func() ([]byte, uint64, error) {
return runtime.Call(receiver, input, &runtimeConfig)
}
}
bench := ctx.Bool(BenchFlag.Name)
output, leftOverGas, stats, err := timedExec(bench, execFunc)
if ctx.Bool(DumpFlag.Name) {
statedb.Commit(true)
statedb.IntermediateRoot(true)
fmt.Println(string(statedb.Dump(nil)))
}
if memProfilePath := ctx.String(MemProfileFlag.Name); memProfilePath != "" {
f, err := os.Create(memProfilePath)
if err != nil {
fmt.Println("could not create memory profile: ", err)
os.Exit(1)
}
if err := pprof.WriteHeapProfile(f); err != nil {
fmt.Println("could not write memory profile: ", err)
os.Exit(1)
}
f.Close()
}
if ctx.Bool(DebugFlag.Name) {
if debugLogger != nil {
fmt.Fprintln(os.Stderr, "#### TRACE ####")
logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
}
fmt.Fprintln(os.Stderr, "#### LOGS ####")
logger.WriteLogs(os.Stderr, statedb.Logs())
}
if bench || ctx.Bool(StatDumpFlag.Name) {
fmt.Fprintf(os.Stderr, `EVM gas used: %d
execution time: %v
allocations: %d
allocated bytes: %d
`, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated)
}
if tracer == nil {
fmt.Printf("%#x\n", output)
if err != nil {
fmt.Printf(" error: %v\n", err)
}
}
return nil
}
| cmd/evm/runner.go | 1 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.9985033273696899,
0.15391282737255096,
0.00016481144120916724,
0.00017259587184526026,
0.3572606146335602
] |
{
"id": 5,
"code_window": [
"func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {\n",
"\treturn t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]\n",
"}\n",
"\n",
"func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) {\n",
"\tsdb := state.NewDatabase(db)\n",
"\tstatedb, _ := state.New(common.Hash{}, sdb, nil)\n",
"\tfor addr, a := range accounts {\n",
"\t\tstatedb.SetCode(addr, a.Code)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tsdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true})\n"
],
"file_path": "tests/state_test_util.go",
"type": "replace",
"edit_start_line_idx": 286
} | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build !linux
// +build !linux
package metrics
import "errors"
// ReadDiskStats retrieves the disk IO stats belonging to the current process.
func ReadDiskStats(stats *DiskStats) error {
return errors.New("Not implemented")
}
| metrics/disk_nop.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.00017773955187294632,
0.0001741748274071142,
0.00016757266712374985,
0.000177212234120816,
0.000004673386683862191
] |
{
"id": 5,
"code_window": [
"func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {\n",
"\treturn t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]\n",
"}\n",
"\n",
"func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) {\n",
"\tsdb := state.NewDatabase(db)\n",
"\tstatedb, _ := state.New(common.Hash{}, sdb, nil)\n",
"\tfor addr, a := range accounts {\n",
"\t\tstatedb.SetCode(addr, a.Code)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tsdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true})\n"
],
"file_path": "tests/state_test_util.go",
"type": "replace",
"edit_start_line_idx": 286
} | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package tests
import (
"testing"
)
func TestBlockchain(t *testing.T) {
t.Parallel()
bt := new(testMatcher)
// General state tests are 'exported' as blockchain tests, but we can run them natively.
// For speedier CI-runs, the line below can be uncommented, so those are skipped.
// For now, in hardfork-times (Berlin), we run the tests both as StateTests and
// as blockchain tests, since the latter also covers things like receipt root
bt.skipLoad(`^GeneralStateTests/`)
// Skip random failures due to selfish mining test
bt.skipLoad(`.*bcForgedTest/bcForkUncle\.json`)
// Slow tests
bt.slow(`.*bcExploitTest/DelegateCallSpam.json`)
bt.slow(`.*bcExploitTest/ShanghaiLove.json`)
bt.slow(`.*bcExploitTest/SuicideIssue.json`)
bt.slow(`.*/bcForkStressTest/`)
bt.slow(`.*/bcGasPricerTest/RPC_API_Test.json`)
bt.slow(`.*/bcWalletTest/`)
// Very slow test
bt.skipLoad(`.*/stTimeConsuming/.*`)
// test takes a lot for time and goes easily OOM because of sha3 calculation on a huge range,
// using 4.6 TGas
bt.skipLoad(`.*randomStatetest94.json.*`)
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
if err := bt.checkFailure(t, test.Run(false)); err != nil {
t.Errorf("test without snapshotter failed: %v", err)
}
if err := bt.checkFailure(t, test.Run(true)); err != nil {
t.Errorf("test with snapshotter failed: %v", err)
}
})
// There is also a LegacyTests folder, containing blockchain tests generated
// prior to Istanbul. However, they are all derived from GeneralStateTests,
// which run natively, so there's no reason to run them here.
}
| tests/block_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.001309444778598845,
0.00036260628257878125,
0.00016715392121113837,
0.00017773955187294632,
0.0003906639467459172
] |
{
"id": 5,
"code_window": [
"func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {\n",
"\treturn t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]\n",
"}\n",
"\n",
"func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) {\n",
"\tsdb := state.NewDatabase(db)\n",
"\tstatedb, _ := state.New(common.Hash{}, sdb, nil)\n",
"\tfor addr, a := range accounts {\n",
"\t\tstatedb.SetCode(addr, a.Code)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tsdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true})\n"
],
"file_path": "tests/state_test_util.go",
"type": "replace",
"edit_start_line_idx": 286
} | // Copyright 2016 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"os"
"runtime"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/internal/version"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2"
)
var (
VersionCheckUrlFlag = &cli.StringFlag{
Name: "check.url",
Usage: "URL to use when checking vulnerabilities",
Value: "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json",
}
VersionCheckVersionFlag = &cli.StringFlag{
Name: "check.version",
Usage: "Version to check",
Value: version.ClientName(clientIdentifier),
}
makecacheCommand = &cli.Command{
Action: makecache,
Name: "makecache",
Usage: "Generate ethash verification cache (for testing)",
ArgsUsage: "<blockNum> <outputDir>",
Description: `
The makecache command generates an ethash cache in <outputDir>.
This command exists to support the system testing project.
Regular users do not need to execute it.
`,
}
makedagCommand = &cli.Command{
Action: makedag,
Name: "makedag",
Usage: "Generate ethash mining DAG (for testing)",
ArgsUsage: "<blockNum> <outputDir>",
Description: `
The makedag command generates an ethash DAG in <outputDir>.
This command exists to support the system testing project.
Regular users do not need to execute it.
`,
}
versionCommand = &cli.Command{
Action: printVersion,
Name: "version",
Usage: "Print version numbers",
ArgsUsage: " ",
Description: `
The output of this command is supposed to be machine-readable.
`,
}
versionCheckCommand = &cli.Command{
Action: versionCheck,
Flags: []cli.Flag{
VersionCheckUrlFlag,
VersionCheckVersionFlag,
},
Name: "version-check",
Usage: "Checks (online) for known Geth security vulnerabilities",
ArgsUsage: "<versionstring (optional)>",
Description: `
The version-check command fetches vulnerability-information from https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json,
and displays information about any security vulnerabilities that affect the currently executing version.
`,
}
licenseCommand = &cli.Command{
Action: license,
Name: "license",
Usage: "Display license information",
ArgsUsage: " ",
}
)
// makecache generates an ethash verification cache into the provided folder.
func makecache(ctx *cli.Context) error {
args := ctx.Args().Slice()
if len(args) != 2 {
utils.Fatalf(`Usage: geth makecache <block number> <outputdir>`)
}
block, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
utils.Fatalf("Invalid block number: %v", err)
}
ethash.MakeCache(block, args[1])
return nil
}
// makedag generates an ethash mining DAG into the provided folder.
func makedag(ctx *cli.Context) error {
args := ctx.Args().Slice()
if len(args) != 2 {
utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`)
}
block, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
utils.Fatalf("Invalid block number: %v", err)
}
ethash.MakeDataset(block, args[1])
return nil
}
func printVersion(ctx *cli.Context) error {
git, _ := version.VCS()
fmt.Println(strings.Title(clientIdentifier))
fmt.Println("Version:", params.VersionWithMeta)
if git.Commit != "" {
fmt.Println("Git Commit:", git.Commit)
}
if git.Date != "" {
fmt.Println("Git Commit Date:", git.Date)
}
fmt.Println("Architecture:", runtime.GOARCH)
fmt.Println("Go Version:", runtime.Version())
fmt.Println("Operating System:", runtime.GOOS)
fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
fmt.Printf("GOROOT=%s\n", runtime.GOROOT())
return nil
}
func license(_ *cli.Context) error {
fmt.Println(`Geth is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Geth is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with geth. If not, see <http://www.gnu.org/licenses/>.`)
return nil
}
| cmd/geth/misccmd.go | 0 | https://github.com/ethereum/go-ethereum/commit/37ecff0967bec978e0723f4861803943bd6d0e17 | [
0.00018936047854367644,
0.0001726663758745417,
0.0001665352174313739,
0.00017128106264863163,
0.000005835013325850014
] |
{
"id": 0,
"code_window": [
"\t\t\tsort.Strings(children)\n",
"\n",
"\t\t\t// remove List-triggering key and add children in reverse order\n",
"\t\t\tdfs = dfs[:len(dfs)-1]\n",
"\t\t\tfor i := len(children) - 1; i >= 0; i-- {\n",
"\t\t\t\tdfs = append(dfs, key+children[i])\n",
"\t\t\t}\n",
"\t\t} else {\n",
"\t\t\terr := cb(ctx, key)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tif children[i] != \"\" {\n",
"\t\t\t\t\tdfs = append(dfs, key+children[i])\n",
"\t\t\t\t}\n"
],
"file_path": "command/operator_migrate.go",
"type": "replace",
"edit_start_line_idx": 313
} | package command
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"testing"
"time"
"github.com/go-test/deep"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/helper/base62"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/physical"
"github.com/hashicorp/vault/vault"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func TestMigration(t *testing.T) {
t.Run("Default", func(t *testing.T) {
data := generateData()
fromFactory := physicalBackends["file"]
folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
defer os.RemoveAll(folder)
confFrom := map[string]string{
"path": folder,
}
from, err := fromFactory(confFrom, nil)
if err != nil {
t.Fatal(err)
}
if err := storeData(from, data); err != nil {
t.Fatal(err)
}
toFactory := physicalBackends["inmem"]
confTo := map[string]string{}
to, err := toFactory(confTo, nil)
if err != nil {
t.Fatal(err)
}
cmd := OperatorMigrateCommand{
logger: log.NewNullLogger(),
}
if err := cmd.migrateAll(context.Background(), from, to); err != nil {
t.Fatal(err)
}
if err := compareStoredData(to, data, ""); err != nil {
t.Fatal(err)
}
})
t.Run("Start option", func(t *testing.T) {
data := generateData()
fromFactory := physicalBackends["inmem"]
confFrom := map[string]string{}
from, err := fromFactory(confFrom, nil)
if err != nil {
t.Fatal(err)
}
if err := storeData(from, data); err != nil {
t.Fatal(err)
}
toFactory := physicalBackends["file"]
folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
defer os.RemoveAll(folder)
confTo := map[string]string{
"path": folder,
}
to, err := toFactory(confTo, nil)
if err != nil {
t.Fatal(err)
}
const start = "m"
cmd := OperatorMigrateCommand{
logger: log.NewNullLogger(),
flagStart: start,
}
if err := cmd.migrateAll(context.Background(), from, to); err != nil {
t.Fatal(err)
}
if err := compareStoredData(to, data, start); err != nil {
t.Fatal(err)
}
})
t.Run("Config parsing", func(t *testing.T) {
cmd := new(OperatorMigrateCommand)
cfgName := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
ioutil.WriteFile(cfgName, []byte(`
storage_source "src_type" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}`), 0644)
defer os.Remove(cfgName)
expCfg := &migratorConfig{
StorageSource: &server.Storage{
Type: "src_type",
Config: map[string]string{
"path": "src_path",
},
},
StorageDestination: &server.Storage{
Type: "dest_type",
Config: map[string]string{
"path": "dest_path",
},
},
}
cfg, err := cmd.loadMigratorConfig(cfgName)
if err != nil {
t.Fatal(cfg)
}
if diff := deep.Equal(cfg, expCfg); diff != nil {
t.Fatal(diff)
}
verifyBad := func(cfg string) {
ioutil.WriteFile(cfgName, []byte(cfg), 0644)
_, err := cmd.loadMigratorConfig(cfgName)
if err == nil {
t.Fatalf("expected error but none received from: %v", cfg)
}
}
// missing source
verifyBad(`
storage_destination "dest_type" {
path = "dest_path"
}`)
// missing destination
verifyBad(`
storage_source "src_type" {
path = "src_path"
}`)
// duplicate source
verifyBad(`
storage_source "src_type" {
path = "src_path"
}
storage_source "src_type2" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}`)
// duplicate destination
verifyBad(`
storage_source "src_type" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}
storage_destination "dest_type2" {
path = "dest_path"
}`)
})
t.Run("DFS Scan", func(t *testing.T) {
s, _ := physicalBackends["inmem"](map[string]string{}, nil)
data := generateData()
data["cc"] = []byte{}
data["c/d/e/f"] = []byte{}
data["c/d/e/g"] = []byte{}
data["c"] = []byte{}
storeData(s, data)
l := randomLister{s}
var out []string
dfsScan(context.Background(), l, func(ctx context.Context, path string) error {
out = append(out, path)
return nil
})
var keys []string
for key := range data {
keys = append(keys, key)
}
sort.Strings(keys)
if !reflect.DeepEqual(keys, out) {
t.Fatalf("expected equal: %v, %v", keys, out)
}
})
}
// randomLister wraps a physical backend, providing a List method
// that returns results in a random order.
type randomLister struct {
b physical.Backend
}
func (l randomLister) List(ctx context.Context, path string) ([]string, error) {
result, err := l.b.List(ctx, path)
if err != nil {
return nil, err
}
rand.Shuffle(len(result), func(i, j int) {
result[i], result[j] = result[j], result[i]
})
return result, err
}
func (l randomLister) Get(ctx context.Context, path string) (*physical.Entry, error) {
return l.b.Get(ctx, path)
}
func (l randomLister) Put(ctx context.Context, entry *physical.Entry) error {
return l.b.Put(ctx, entry)
}
func (l randomLister) Delete(ctx context.Context, path string) error {
return l.b.Delete(ctx, path)
}
// generateData creates a map of 500 random keys and values
func generateData() map[string][]byte {
result := make(map[string][]byte)
for i := 0; i < 500; i++ {
segments := make([]string, rand.Intn(8)+1)
for j := 0; j < len(segments); j++ {
s, _ := base62.Random(6)
segments[j] = s
}
data := make([]byte, 100)
rand.Read(data)
result[strings.Join(segments, "/")] = data
}
// Add special keys that should be excluded from migration
result[storageMigrationLock] = []byte{}
result[vault.CoreLockPath] = []byte{}
return result
}
func storeData(s physical.Backend, ref map[string][]byte) error {
for k, v := range ref {
entry := physical.Entry{
Key: k,
Value: v,
}
err := s.Put(context.Background(), &entry)
if err != nil {
return err
}
}
return nil
}
func compareStoredData(s physical.Backend, ref map[string][]byte, start string) error {
for k, v := range ref {
entry, err := s.Get(context.Background(), k)
if err != nil {
return err
}
if k == storageMigrationLock || k == vault.CoreLockPath {
if entry == nil {
continue
}
return fmt.Errorf("key found that should have been excluded: %s", k)
}
if k >= start {
if entry == nil {
return fmt.Errorf("key not found: %s", k)
}
if !bytes.Equal(v, entry.Value) {
return fmt.Errorf("values differ for key: %s", k)
}
} else {
if entry != nil {
return fmt.Errorf("found key the should have been skipped by start option: %s", k)
}
}
}
return nil
}
| command/operator_migrate_test.go | 1 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.0043105497024953365,
0.0003722924739122391,
0.00016320368740707636,
0.0001733063254505396,
0.0007494695018976927
] |
{
"id": 0,
"code_window": [
"\t\t\tsort.Strings(children)\n",
"\n",
"\t\t\t// remove List-triggering key and add children in reverse order\n",
"\t\t\tdfs = dfs[:len(dfs)-1]\n",
"\t\t\tfor i := len(children) - 1; i >= 0; i-- {\n",
"\t\t\t\tdfs = append(dfs, key+children[i])\n",
"\t\t\t}\n",
"\t\t} else {\n",
"\t\t\terr := cb(ctx, key)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tif children[i] != \"\" {\n",
"\t\t\t\t\tdfs = append(dfs, key+children[i])\n",
"\t\t\t\t}\n"
],
"file_path": "command/operator_migrate.go",
"type": "replace",
"edit_start_line_idx": 313
} | package transit
import (
"context"
"encoding/base64"
"encoding/hex"
"fmt"
"strconv"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func (b *backend) pathRandom() *framework.Path {
return &framework.Path{
Pattern: "random" + framework.OptionalParamRegex("urlbytes"),
Fields: map[string]*framework.FieldSchema{
"urlbytes": &framework.FieldSchema{
Type: framework.TypeString,
Description: "The number of bytes to generate (POST URL parameter)",
},
"bytes": &framework.FieldSchema{
Type: framework.TypeInt,
Default: 32,
Description: "The number of bytes to generate (POST body parameter). Defaults to 32 (256 bits).",
},
"format": &framework.FieldSchema{
Type: framework.TypeString,
Default: "base64",
Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "base64".`,
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathRandomWrite,
},
HelpSynopsis: pathRandomHelpSyn,
HelpDescription: pathRandomHelpDesc,
}
}
func (b *backend) pathRandomWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
bytes := 0
var err error
strBytes := d.Get("urlbytes").(string)
if strBytes != "" {
bytes, err = strconv.Atoi(strBytes)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf("error parsing url-set byte count: %s", err)), nil
}
} else {
bytes = d.Get("bytes").(int)
}
format := d.Get("format").(string)
if bytes < 1 {
return logical.ErrorResponse(`"bytes" cannot be less than 1`), nil
}
switch format {
case "hex":
case "base64":
default:
return logical.ErrorResponse(fmt.Sprintf("unsupported encoding format %s; must be \"hex\" or \"base64\"", format)), nil
}
randBytes, err := uuid.GenerateRandomBytes(bytes)
if err != nil {
return nil, err
}
var retStr string
switch format {
case "hex":
retStr = hex.EncodeToString(randBytes)
case "base64":
retStr = base64.StdEncoding.EncodeToString(randBytes)
}
// Generate the response
resp := &logical.Response{
Data: map[string]interface{}{
"random_bytes": retStr,
},
}
return resp, nil
}
const pathRandomHelpSyn = `Generate random bytes`
const pathRandomHelpDesc = `
This function can be used to generate high-entropy random bytes.
`
| builtin/logical/transit/path_random.go | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.00021023137378506362,
0.00017457276408094913,
0.00016640141257084906,
0.0001707350747892633,
0.000012128665730415378
] |
{
"id": 0,
"code_window": [
"\t\t\tsort.Strings(children)\n",
"\n",
"\t\t\t// remove List-triggering key and add children in reverse order\n",
"\t\t\tdfs = dfs[:len(dfs)-1]\n",
"\t\t\tfor i := len(children) - 1; i >= 0; i-- {\n",
"\t\t\t\tdfs = append(dfs, key+children[i])\n",
"\t\t\t}\n",
"\t\t} else {\n",
"\t\t\terr := cb(ctx, key)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tif children[i] != \"\" {\n",
"\t\t\t\t\tdfs = append(dfs, key+children[i])\n",
"\t\t\t\t}\n"
],
"file_path": "command/operator_migrate.go",
"type": "replace",
"edit_start_line_idx": 313
} | Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor’s Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party’s
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients’ rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients’
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
party’s negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a party’s ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.
| vendor/github.com/hashicorp/memberlist/LICENSE | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.00018190135597251356,
0.00017760902119334787,
0.00017308669339399785,
0.00017742498312145472,
0.0000016130977655848255
] |
{
"id": 0,
"code_window": [
"\t\t\tsort.Strings(children)\n",
"\n",
"\t\t\t// remove List-triggering key and add children in reverse order\n",
"\t\t\tdfs = dfs[:len(dfs)-1]\n",
"\t\t\tfor i := len(children) - 1; i >= 0; i-- {\n",
"\t\t\t\tdfs = append(dfs, key+children[i])\n",
"\t\t\t}\n",
"\t\t} else {\n",
"\t\t\terr := cb(ctx, key)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tif children[i] != \"\" {\n",
"\t\t\t\t\tdfs = append(dfs, key+children[i])\n",
"\t\t\t\t}\n"
],
"file_path": "command/operator_migrate.go",
"type": "replace",
"edit_start_line_idx": 313
} | // cgo -godefs types_freebsd.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build amd64,freebsd
package unix
const (
SizeofPtr = 0x8
SizeofShort = 0x2
SizeofInt = 0x4
SizeofLong = 0x8
SizeofLongLong = 0x8
)
type (
_C_short int16
_C_int int32
_C_long int64
_C_long_long int64
)
type Timespec struct {
Sec int64
Nsec int64
}
type Timeval struct {
Sec int64
Usec int64
}
type Rusage struct {
Utime Timeval
Stime Timeval
Maxrss int64
Ixrss int64
Idrss int64
Isrss int64
Minflt int64
Majflt int64
Nswap int64
Inblock int64
Oublock int64
Msgsnd int64
Msgrcv int64
Nsignals int64
Nvcsw int64
Nivcsw int64
}
type Rlimit struct {
Cur int64
Max int64
}
type _Gid_t uint32
type Stat_t struct {
Dev uint32
Ino uint32
Mode uint16
Nlink uint16
Uid uint32
Gid uint32
Rdev uint32
Atimespec Timespec
Mtimespec Timespec
Ctimespec Timespec
Size int64
Blocks int64
Blksize int32
Flags uint32
Gen uint32
Lspare int32
Birthtimespec Timespec
}
type Statfs_t struct {
Version uint32
Type uint32
Flags uint64
Bsize uint64
Iosize uint64
Blocks uint64
Bfree uint64
Bavail int64
Files uint64
Ffree int64
Syncwrites uint64
Asyncwrites uint64
Syncreads uint64
Asyncreads uint64
Spare [10]uint64
Namemax uint32
Owner uint32
Fsid Fsid
Charspare [80]int8
Fstypename [16]int8
Mntfromname [88]int8
Mntonname [88]int8
}
type Flock_t struct {
Start int64
Len int64
Pid int32
Type int16
Whence int16
Sysid int32
Pad_cgo_0 [4]byte
}
type Dirent struct {
Fileno uint32
Reclen uint16
Type uint8
Namlen uint8
Name [256]int8
}
type Fsid struct {
Val [2]int32
}
const (
PathMax = 0x400
)
const (
FADV_NORMAL = 0x0
FADV_RANDOM = 0x1
FADV_SEQUENTIAL = 0x2
FADV_WILLNEED = 0x3
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
)
type RawSockaddrInet4 struct {
Len uint8
Family uint8
Port uint16
Addr [4]byte /* in_addr */
Zero [8]int8
}
type RawSockaddrInet6 struct {
Len uint8
Family uint8
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
type RawSockaddrUnix struct {
Len uint8
Family uint8
Path [104]int8
}
type RawSockaddrDatalink struct {
Len uint8
Family uint8
Index uint16
Type uint8
Nlen uint8
Alen uint8
Slen uint8
Data [46]int8
}
type RawSockaddr struct {
Len uint8
Family uint8
Data [14]int8
}
type RawSockaddrAny struct {
Addr RawSockaddr
Pad [92]int8
}
type _Socklen uint32
type Linger struct {
Onoff int32
Linger int32
}
type Iovec struct {
Base *byte
Len uint64
}
type IPMreq struct {
Multiaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type IPMreqn struct {
Multiaddr [4]byte /* in_addr */
Address [4]byte /* in_addr */
Ifindex int32
}
type IPv6Mreq struct {
Multiaddr [16]byte /* in6_addr */
Interface uint32
}
type Msghdr struct {
Name *byte
Namelen uint32
Pad_cgo_0 [4]byte
Iov *Iovec
Iovlen int32
Pad_cgo_1 [4]byte
Control *byte
Controllen uint32
Flags int32
}
type Cmsghdr struct {
Len uint32
Level int32
Type int32
}
type Inet6Pktinfo struct {
Addr [16]byte /* in6_addr */
Ifindex uint32
}
type IPv6MTUInfo struct {
Addr RawSockaddrInet6
Mtu uint32
}
type ICMPv6Filter struct {
Filt [8]uint32
}
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
SizeofSockaddrAny = 0x6c
SizeofSockaddrUnix = 0x6a
SizeofSockaddrDatalink = 0x36
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
SizeofIPv6Mreq = 0x14
SizeofMsghdr = 0x30
SizeofCmsghdr = 0xc
SizeofInet6Pktinfo = 0x14
SizeofIPv6MTUInfo = 0x20
SizeofICMPv6Filter = 0x20
)
const (
PTRACE_TRACEME = 0x0
PTRACE_CONT = 0x7
PTRACE_KILL = 0x8
)
type Kevent_t struct {
Ident uint64
Filter int16
Flags uint16
Fflags uint32
Data int64
Udata *byte
}
type FdSet struct {
X__fds_bits [16]uint64
}
const (
sizeofIfMsghdr = 0xa8
SizeofIfMsghdr = 0xa8
sizeofIfData = 0x98
SizeofIfData = 0x98
SizeofIfaMsghdr = 0x14
SizeofIfmaMsghdr = 0x10
SizeofIfAnnounceMsghdr = 0x18
SizeofRtMsghdr = 0x98
SizeofRtMetrics = 0x70
)
type ifMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Pad_cgo_0 [2]byte
Data ifData
}
type IfMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Pad_cgo_0 [2]byte
Data IfData
}
type ifData struct {
Type uint8
Physical uint8
Addrlen uint8
Hdrlen uint8
Link_state uint8
Vhid uint8
Datalen uint16
Mtu uint32
Metric uint32
Baudrate uint64
Ipackets uint64
Ierrors uint64
Opackets uint64
Oerrors uint64
Collisions uint64
Ibytes uint64
Obytes uint64
Imcasts uint64
Omcasts uint64
Iqdrops uint64
Oqdrops uint64
Noproto uint64
Hwassist uint64
X__ifi_epoch [8]byte
X__ifi_lastchange [16]byte
}
type IfData struct {
Type uint8
Physical uint8
Addrlen uint8
Hdrlen uint8
Link_state uint8
Spare_char1 uint8
Spare_char2 uint8
Datalen uint8
Mtu uint64
Metric uint64
Baudrate uint64
Ipackets uint64
Ierrors uint64
Opackets uint64
Oerrors uint64
Collisions uint64
Ibytes uint64
Obytes uint64
Imcasts uint64
Omcasts uint64
Iqdrops uint64
Noproto uint64
Hwassist uint64
Epoch int64
Lastchange Timeval
}
type IfaMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Pad_cgo_0 [2]byte
Metric int32
}
type IfmaMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Pad_cgo_0 [2]byte
}
type IfAnnounceMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Index uint16
Name [16]int8
What uint16
}
type RtMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Index uint16
Pad_cgo_0 [2]byte
Flags int32
Addrs int32
Pid int32
Seq int32
Errno int32
Fmask int32
Inits uint64
Rmx RtMetrics
}
type RtMetrics struct {
Locks uint64
Mtu uint64
Hopcount uint64
Expire uint64
Recvpipe uint64
Sendpipe uint64
Ssthresh uint64
Rtt uint64
Rttvar uint64
Pksent uint64
Weight uint64
Filler [3]uint64
}
const (
SizeofBpfVersion = 0x4
SizeofBpfStat = 0x8
SizeofBpfZbuf = 0x18
SizeofBpfProgram = 0x10
SizeofBpfInsn = 0x8
SizeofBpfHdr = 0x20
SizeofBpfZbufHeader = 0x20
)
type BpfVersion struct {
Major uint16
Minor uint16
}
type BpfStat struct {
Recv uint32
Drop uint32
}
type BpfZbuf struct {
Bufa *byte
Bufb *byte
Buflen uint64
}
type BpfProgram struct {
Len uint32
Pad_cgo_0 [4]byte
Insns *BpfInsn
}
type BpfInsn struct {
Code uint16
Jt uint8
Jf uint8
K uint32
}
type BpfHdr struct {
Tstamp Timeval
Caplen uint32
Datalen uint32
Hdrlen uint16
Pad_cgo_0 [6]byte
}
type BpfZbufHeader struct {
Kernel_gen uint32
Kernel_len uint32
User_gen uint32
X_bzh_pad [5]uint32
}
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed uint32
Ospeed uint32
}
type Winsize struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}
const (
AT_FDCWD = -0x64
AT_REMOVEDIR = 0x800
AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x200
)
type PollFd struct {
Fd int32
Events int16
Revents int16
}
const (
POLLERR = 0x8
POLLHUP = 0x10
POLLIN = 0x1
POLLINIGNEOF = 0x2000
POLLNVAL = 0x20
POLLOUT = 0x4
POLLPRI = 0x2
POLLRDBAND = 0x80
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
)
type CapRights struct {
Rights [2]uint64
}
type Utsname struct {
Sysname [256]byte
Nodename [256]byte
Release [256]byte
Version [256]byte
Machine [256]byte
}
| vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.00040443160105496645,
0.00018037075642496347,
0.00016170498565770686,
0.00017061998369172215,
0.000044341490138322115
] |
{
"id": 1,
"code_window": [
"\t\"github.com/hashicorp/vault/helper/testhelpers\"\n",
"\t\"github.com/hashicorp/vault/physical\"\n",
"\t\"github.com/hashicorp/vault/vault\"\n",
")\n",
"\n",
"func init() {\n",
"\trand.Seed(time.Now().UnixNano())\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"const trailing_slash_key = \"trailing_slash/\"\n",
"\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "add",
"edit_start_line_idx": 25
} | package command
import (
"context"
"fmt"
"io/ioutil"
"os"
"sort"
"strings"
"time"
"github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/helper/logging"
"github.com/hashicorp/vault/physical"
"github.com/hashicorp/vault/vault"
"github.com/mitchellh/cli"
"github.com/pkg/errors"
"github.com/posener/complete"
)
var _ cli.Command = (*OperatorMigrateCommand)(nil)
var _ cli.CommandAutocomplete = (*OperatorMigrateCommand)(nil)
var errAbort = errors.New("Migration aborted")
type OperatorMigrateCommand struct {
*BaseCommand
PhysicalBackends map[string]physical.Factory
flagConfig string
flagStart string
flagReset bool
logger log.Logger
ShutdownCh chan struct{}
}
type migratorConfig struct {
StorageSource *server.Storage `hcl:"-"`
StorageDestination *server.Storage `hcl:"-"`
}
func (c *OperatorMigrateCommand) Synopsis() string {
return "Migrates Vault data between storage backends"
}
func (c *OperatorMigrateCommand) Help() string {
helpText := `
Usage: vault operator migrate [options]
This command starts a storage backend migration process to copy all data
from one backend to another. This operates directly on encrypted data and
does not require a Vault server, nor any unsealing.
Start a migration with a configuration file:
$ vault operator migrate -config=migrate.hcl
For more information, please see the documentation.
` + c.Flags().Help()
return strings.TrimSpace(helpText)
}
func (c *OperatorMigrateCommand) Flags() *FlagSets {
set := NewFlagSets(c.UI)
f := set.NewFlagSet("Command Options")
f.StringVar(&StringVar{
Name: "config",
Target: &c.flagConfig,
Completion: complete.PredictOr(
complete.PredictFiles("*.hcl"),
),
Usage: "Path to a configuration file. This configuration file should " +
"contain only migrator directives.",
})
f.StringVar(&StringVar{
Name: "start",
Target: &c.flagStart,
Usage: "Only copy keys lexicographically at or after this value.",
})
f.BoolVar(&BoolVar{
Name: "reset",
Target: &c.flagReset,
Usage: "Reset the migration lock. No migration will occur.",
})
return set
}
func (c *OperatorMigrateCommand) AutocompleteArgs() complete.Predictor {
return nil
}
func (c *OperatorMigrateCommand) AutocompleteFlags() complete.Flags {
return c.Flags().Completions()
}
func (c *OperatorMigrateCommand) Run(args []string) int {
c.logger = logging.NewVaultLogger(log.Info)
f := c.Flags()
if err := f.Parse(args); err != nil {
c.UI.Error(err.Error())
return 1
}
if c.flagConfig == "" {
c.UI.Error("Must specify exactly one config path using -config")
return 1
}
config, err := c.loadMigratorConfig(c.flagConfig)
if err != nil {
c.UI.Error(fmt.Sprintf("Error loading configuration from %s: %s", c.flagConfig, err))
return 1
}
if err := c.migrate(config); err != nil {
if err == errAbort {
return 0
}
c.UI.Error(fmt.Sprintf("Error migrating: %s", err))
return 2
}
if c.flagReset {
c.UI.Output("Success! Migration lock reset (if it was set).")
} else {
c.UI.Output("Success! All of the keys have been migrated.")
}
return 0
}
// migrate attempts to instantiate the source and destinations backends,
// and then invoke the migration the the root of the keyspace.
func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error {
from, err := c.newBackend(config.StorageSource.Type, config.StorageSource.Config)
if err != nil {
return errwrap.Wrapf("error mounting 'storage_source': {{err}}", err)
}
if c.flagReset {
if err := SetStorageMigration(from, false); err != nil {
return errwrap.Wrapf("error reseting migration lock: {{err}}", err)
}
return nil
}
to, err := c.newBackend(config.StorageDestination.Type, config.StorageDestination.Config)
if err != nil {
return errwrap.Wrapf("error mounting 'storage_destination': {{err}}", err)
}
migrationStatus, err := CheckStorageMigration(from)
if err != nil {
return errwrap.Wrapf("error checking migration status: {{err}}", err)
}
if migrationStatus != nil {
return fmt.Errorf("Storage migration in progress (started: %s).", migrationStatus.Start.Format(time.RFC3339))
}
if err := SetStorageMigration(from, true); err != nil {
return errwrap.Wrapf("error setting migration lock: {{err}}", err)
}
defer SetStorageMigration(from, false)
ctx, cancelFunc := context.WithCancel(context.Background())
doneCh := make(chan error)
go func() {
doneCh <- c.migrateAll(ctx, from, to)
}()
select {
case err := <-doneCh:
cancelFunc()
return err
case <-c.ShutdownCh:
c.UI.Output("==> Migration shutdown triggered\n")
cancelFunc()
<-doneCh
return errAbort
}
}
// migrateAll copies all keys in lexicographic order.
func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.Backend, to physical.Backend) error {
return dfsScan(ctx, from, func(ctx context.Context, path string) error {
if path < c.flagStart || path == storageMigrationLock || path == vault.CoreLockPath {
return nil
}
entry, err := from.Get(ctx, path)
if err != nil {
return errwrap.Wrapf("error reading entry: {{err}}", err)
}
if entry == nil {
return nil
}
if err := to.Put(ctx, entry); err != nil {
return errwrap.Wrapf("error writing entry: {{err}}", err)
}
c.logger.Info("copied key", "path", path)
return nil
})
}
func (c *OperatorMigrateCommand) newBackend(kind string, conf map[string]string) (physical.Backend, error) {
factory, ok := c.PhysicalBackends[kind]
if !ok {
return nil, fmt.Errorf("no Vault storage backend named: %+q", kind)
}
return factory(conf, c.logger)
}
// loadMigratorConfig loads the configuration at the given path
func (c *OperatorMigrateCommand) loadMigratorConfig(path string) (*migratorConfig, error) {
fi, err := os.Stat(path)
if err != nil {
return nil, err
}
if fi.IsDir() {
return nil, fmt.Errorf("location is a directory, not a file")
}
d, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
obj, err := hcl.ParseBytes(d)
if err != nil {
return nil, err
}
var result migratorConfig
if err := hcl.DecodeObject(&result, obj); err != nil {
return nil, err
}
list, ok := obj.Node.(*ast.ObjectList)
if !ok {
return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
}
// Look for storage_* stanzas
for _, stanza := range []string{"storage_source", "storage_destination"} {
o := list.Filter(stanza)
if len(o.Items) != 1 {
return nil, fmt.Errorf("exactly one '%s' block is required", stanza)
}
if err := parseStorage(&result, o, stanza); err != nil {
return nil, errwrap.Wrapf("error parsing '%s': {{err}}", err)
}
}
return &result, nil
}
// parseStorage reuses the existing storage parsing that's part of the main Vault
// config processing, but only keeps the storage result.
func parseStorage(result *migratorConfig, list *ast.ObjectList, name string) error {
tmpConfig := new(server.Config)
if err := server.ParseStorage(tmpConfig, list, name); err != nil {
return err
}
switch name {
case "storage_source":
result.StorageSource = tmpConfig.Storage
case "storage_destination":
result.StorageDestination = tmpConfig.Storage
default:
return fmt.Errorf("unknown storage name: %s", name)
}
return nil
}
// dfsScan will invoke cb with every key from source.
// Keys will be traversed in lexicographic, depth-first order.
func dfsScan(ctx context.Context, source physical.Backend, cb func(ctx context.Context, path string) error) error {
dfs := []string{""}
for l := len(dfs); l > 0; l = len(dfs) {
key := dfs[len(dfs)-1]
if key == "" || strings.HasSuffix(key, "/") {
children, err := source.List(ctx, key)
if err != nil {
return errwrap.Wrapf("failed to scan for children: {{err}}", err)
}
sort.Strings(children)
// remove List-triggering key and add children in reverse order
dfs = dfs[:len(dfs)-1]
for i := len(children) - 1; i >= 0; i-- {
dfs = append(dfs, key+children[i])
}
} else {
err := cb(ctx, key)
if err != nil {
return err
}
dfs = dfs[:len(dfs)-1]
}
select {
case <-ctx.Done():
return nil
default:
}
}
return nil
}
| command/operator_migrate.go | 1 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.0067274803295731544,
0.0005184370093047619,
0.00016185246931854635,
0.00017348203982692212,
0.0011565781896933913
] |
{
"id": 1,
"code_window": [
"\t\"github.com/hashicorp/vault/helper/testhelpers\"\n",
"\t\"github.com/hashicorp/vault/physical\"\n",
"\t\"github.com/hashicorp/vault/vault\"\n",
")\n",
"\n",
"func init() {\n",
"\trand.Seed(time.Now().UnixNano())\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"const trailing_slash_key = \"trailing_slash/\"\n",
"\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "add",
"edit_start_line_idx": 25
} | ---
layout: "docs"
page_title: "Token Helpers"
sidebar_title: "Token Helpers"
sidebar_current: "docs-commands-token-helper"
description: |-
The Vault CLI supports external token helpers that make retrieving, setting and erasing tokens simpler to use.
---
# Token Helpers
The Vault CLI provides a built in tool for authenticating to any of the enabled auth backends. By default the Vault CLI will take the generated token after a successful authentication and store it on disk in the `~/.vault-token` file. This functionality can change in Vault via the use of a token helper. A token helper is an external program that Vault calls to save, retrieve or erase a saved token. The token helper could be a very simple script or a more complex program depending on your needs. The interface to the external token helper is extremely simple.
## Configuration
To configure a token helper, edit (or create) the file `~/.vault` and add a line similar to:
```
token_helper = "/path/to/token/helper.sh"
```
You will need to use the fully qualified path to the token helper script. The script should be executable.
## Developing a Token Helper
The interface to a token helper is extremely simple: the script is passed with one argument that could be `get`, `store` or `erase`. If the argument is `get`, the script should do whatever work it needs to do to retrieve the stored token and then print the token to `STDOUT`. If the argument is `store`, Vault is asking you to store the token. Finally, if the argument is `erase`, your program should erase the stored token.
If your program succeeds, it should exit with status code 0. If it encounters an issue that prevents it from working, it should exit with some other status code. You should write a user-friendly error message to `STDERR`. You should never write anything other than the token to `STDOUT`, as Vault assumes whatever it gets on `STDOUT` is the token.
### Example Token Helper
This is an example token helper written in Ruby that stores and retrieves tokens in a json file called `~/.vault_tokens`. The key is the environment variable $VAULT_ADDR, this allows the Vault user to easily store and retrieve tokens from a number of different Vault servers.
```
#!/usr/bin/env ruby
require 'json'
unless ENV['VAULT_ADDR']
STDERR.puts "No VAULT_ADDR environment variable set. Set it and run me again!"
exit 100
end
begin
tokens = JSON.parse(File.read("#{ENV['HOME']}/.vault_tokens"))
rescue Errno::ENOENT => e
# file doesn't exist so create a blank hash for it
tokens = {}
end
case ARGV.first
when 'get'
print tokens[ENV['VAULT_ADDR']] if tokens[ENV['VAULT_ADDR']]
exit 0
when 'store'
tokens[ENV['VAULT_ADDR']] = STDIN.read
when 'erase'
tokens.delete!(ENV['VAULT_ADDR'])
end
File.open("#{ENV['HOME']}/.vault_tokens", 'w') { |file| file.write(tokens.to_json) }
```
| website/source/docs/commands/token-helper.html.md | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.0005234574782662094,
0.00022057259047869593,
0.0001671587087912485,
0.00017099517572205514,
0.0001236808457178995
] |
{
"id": 1,
"code_window": [
"\t\"github.com/hashicorp/vault/helper/testhelpers\"\n",
"\t\"github.com/hashicorp/vault/physical\"\n",
"\t\"github.com/hashicorp/vault/vault\"\n",
")\n",
"\n",
"func init() {\n",
"\trand.Seed(time.Now().UnixNano())\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"const trailing_slash_key = \"trailing_slash/\"\n",
"\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "add",
"edit_start_line_idx": 25
} | package crypto
import (
"crypto"
"crypto/hmac"
"encoding/json"
"errors"
)
// SigningMethodHMAC implements the HMAC-SHA family of SigningMethods.
type SigningMethodHMAC struct {
Name string
Hash crypto.Hash
_ struct{}
}
// Specific instances of HMAC-SHA SigningMethods.
var (
// SigningMethodHS256 implements HS256.
SigningMethodHS256 = &SigningMethodHMAC{
Name: "HS256",
Hash: crypto.SHA256,
}
// SigningMethodHS384 implements HS384.
SigningMethodHS384 = &SigningMethodHMAC{
Name: "HS384",
Hash: crypto.SHA384,
}
// SigningMethodHS512 implements HS512.
SigningMethodHS512 = &SigningMethodHMAC{
Name: "HS512",
Hash: crypto.SHA512,
}
// ErrSignatureInvalid is returned when the provided signature is found
// to be invalid.
ErrSignatureInvalid = errors.New("signature is invalid")
)
// Alg implements the SigningMethod interface.
func (m *SigningMethodHMAC) Alg() string { return m.Name }
// Verify implements the Verify method from SigningMethod.
// For this signing method, must be a []byte.
func (m *SigningMethodHMAC) Verify(raw []byte, signature Signature, key interface{}) error {
keyBytes, ok := key.([]byte)
if !ok {
return ErrInvalidKey
}
hasher := hmac.New(m.Hash.New, keyBytes)
hasher.Write(raw)
if hmac.Equal(signature, hasher.Sum(nil)) {
return nil
}
return ErrSignatureInvalid
}
// Sign implements the Sign method from SigningMethod for this signing method.
// Key must be a []byte.
func (m *SigningMethodHMAC) Sign(data []byte, key interface{}) (Signature, error) {
keyBytes, ok := key.([]byte)
if !ok {
return nil, ErrInvalidKey
}
hasher := hmac.New(m.Hash.New, keyBytes)
hasher.Write(data)
return Signature(hasher.Sum(nil)), nil
}
// Hasher implements the SigningMethod interface.
func (m *SigningMethodHMAC) Hasher() crypto.Hash { return m.Hash }
// MarshalJSON implements json.Marshaler.
// See SigningMethodECDSA.MarshalJSON() for information.
func (m *SigningMethodHMAC) MarshalJSON() ([]byte, error) {
return []byte(`"` + m.Alg() + `"`), nil
}
var _ json.Marshaler = (*SigningMethodHMAC)(nil)
| vendor/github.com/briankassouf/jose/crypto/hmac.go | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.0002805804251693189,
0.0001856490271165967,
0.00016359100118279457,
0.00017223473696503788,
0.00003456246849964373
] |
{
"id": 1,
"code_window": [
"\t\"github.com/hashicorp/vault/helper/testhelpers\"\n",
"\t\"github.com/hashicorp/vault/physical\"\n",
"\t\"github.com/hashicorp/vault/vault\"\n",
")\n",
"\n",
"func init() {\n",
"\trand.Seed(time.Now().UnixNano())\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"const trailing_slash_key = \"trailing_slash/\"\n",
"\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "add",
"edit_start_line_idx": 25
} | MIT License
Copyright (c) 2016 json-iterator
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| vendor/github.com/json-iterator/go/LICENSE | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.00017682772886473686,
0.00017225298506673425,
0.00016823707846924663,
0.00017169411876238883,
0.0000035293114706291817
] |
{
"id": 2,
"code_window": [
"\t\tdfsScan(context.Background(), l, func(ctx context.Context, path string) error {\n",
"\t\t\tout = append(out, path)\n",
"\t\t\treturn nil\n",
"\t\t})\n",
"\n",
"\t\tvar keys []string\n",
"\t\tfor key := range data {\n",
"\t\t\tkeys = append(keys, key)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tdelete(data, trailing_slash_key)\n",
"\t\tdelete(data, \"\")\n",
"\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "add",
"edit_start_line_idx": 211
} | package command
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"testing"
"time"
"github.com/go-test/deep"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/helper/base62"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/physical"
"github.com/hashicorp/vault/vault"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func TestMigration(t *testing.T) {
t.Run("Default", func(t *testing.T) {
data := generateData()
fromFactory := physicalBackends["file"]
folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
defer os.RemoveAll(folder)
confFrom := map[string]string{
"path": folder,
}
from, err := fromFactory(confFrom, nil)
if err != nil {
t.Fatal(err)
}
if err := storeData(from, data); err != nil {
t.Fatal(err)
}
toFactory := physicalBackends["inmem"]
confTo := map[string]string{}
to, err := toFactory(confTo, nil)
if err != nil {
t.Fatal(err)
}
cmd := OperatorMigrateCommand{
logger: log.NewNullLogger(),
}
if err := cmd.migrateAll(context.Background(), from, to); err != nil {
t.Fatal(err)
}
if err := compareStoredData(to, data, ""); err != nil {
t.Fatal(err)
}
})
t.Run("Start option", func(t *testing.T) {
data := generateData()
fromFactory := physicalBackends["inmem"]
confFrom := map[string]string{}
from, err := fromFactory(confFrom, nil)
if err != nil {
t.Fatal(err)
}
if err := storeData(from, data); err != nil {
t.Fatal(err)
}
toFactory := physicalBackends["file"]
folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
defer os.RemoveAll(folder)
confTo := map[string]string{
"path": folder,
}
to, err := toFactory(confTo, nil)
if err != nil {
t.Fatal(err)
}
const start = "m"
cmd := OperatorMigrateCommand{
logger: log.NewNullLogger(),
flagStart: start,
}
if err := cmd.migrateAll(context.Background(), from, to); err != nil {
t.Fatal(err)
}
if err := compareStoredData(to, data, start); err != nil {
t.Fatal(err)
}
})
t.Run("Config parsing", func(t *testing.T) {
cmd := new(OperatorMigrateCommand)
cfgName := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
ioutil.WriteFile(cfgName, []byte(`
storage_source "src_type" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}`), 0644)
defer os.Remove(cfgName)
expCfg := &migratorConfig{
StorageSource: &server.Storage{
Type: "src_type",
Config: map[string]string{
"path": "src_path",
},
},
StorageDestination: &server.Storage{
Type: "dest_type",
Config: map[string]string{
"path": "dest_path",
},
},
}
cfg, err := cmd.loadMigratorConfig(cfgName)
if err != nil {
t.Fatal(cfg)
}
if diff := deep.Equal(cfg, expCfg); diff != nil {
t.Fatal(diff)
}
verifyBad := func(cfg string) {
ioutil.WriteFile(cfgName, []byte(cfg), 0644)
_, err := cmd.loadMigratorConfig(cfgName)
if err == nil {
t.Fatalf("expected error but none received from: %v", cfg)
}
}
// missing source
verifyBad(`
storage_destination "dest_type" {
path = "dest_path"
}`)
// missing destination
verifyBad(`
storage_source "src_type" {
path = "src_path"
}`)
// duplicate source
verifyBad(`
storage_source "src_type" {
path = "src_path"
}
storage_source "src_type2" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}`)
// duplicate destination
verifyBad(`
storage_source "src_type" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}
storage_destination "dest_type2" {
path = "dest_path"
}`)
})
t.Run("DFS Scan", func(t *testing.T) {
s, _ := physicalBackends["inmem"](map[string]string{}, nil)
data := generateData()
data["cc"] = []byte{}
data["c/d/e/f"] = []byte{}
data["c/d/e/g"] = []byte{}
data["c"] = []byte{}
storeData(s, data)
l := randomLister{s}
var out []string
dfsScan(context.Background(), l, func(ctx context.Context, path string) error {
out = append(out, path)
return nil
})
var keys []string
for key := range data {
keys = append(keys, key)
}
sort.Strings(keys)
if !reflect.DeepEqual(keys, out) {
t.Fatalf("expected equal: %v, %v", keys, out)
}
})
}
// randomLister wraps a physical backend, providing a List method
// that returns results in a random order.
type randomLister struct {
b physical.Backend
}
func (l randomLister) List(ctx context.Context, path string) ([]string, error) {
result, err := l.b.List(ctx, path)
if err != nil {
return nil, err
}
rand.Shuffle(len(result), func(i, j int) {
result[i], result[j] = result[j], result[i]
})
return result, err
}
func (l randomLister) Get(ctx context.Context, path string) (*physical.Entry, error) {
return l.b.Get(ctx, path)
}
func (l randomLister) Put(ctx context.Context, entry *physical.Entry) error {
return l.b.Put(ctx, entry)
}
func (l randomLister) Delete(ctx context.Context, path string) error {
return l.b.Delete(ctx, path)
}
// generateData creates a map of 500 random keys and values
func generateData() map[string][]byte {
result := make(map[string][]byte)
for i := 0; i < 500; i++ {
segments := make([]string, rand.Intn(8)+1)
for j := 0; j < len(segments); j++ {
s, _ := base62.Random(6)
segments[j] = s
}
data := make([]byte, 100)
rand.Read(data)
result[strings.Join(segments, "/")] = data
}
// Add special keys that should be excluded from migration
result[storageMigrationLock] = []byte{}
result[vault.CoreLockPath] = []byte{}
return result
}
func storeData(s physical.Backend, ref map[string][]byte) error {
for k, v := range ref {
entry := physical.Entry{
Key: k,
Value: v,
}
err := s.Put(context.Background(), &entry)
if err != nil {
return err
}
}
return nil
}
func compareStoredData(s physical.Backend, ref map[string][]byte, start string) error {
for k, v := range ref {
entry, err := s.Get(context.Background(), k)
if err != nil {
return err
}
if k == storageMigrationLock || k == vault.CoreLockPath {
if entry == nil {
continue
}
return fmt.Errorf("key found that should have been excluded: %s", k)
}
if k >= start {
if entry == nil {
return fmt.Errorf("key not found: %s", k)
}
if !bytes.Equal(v, entry.Value) {
return fmt.Errorf("values differ for key: %s", k)
}
} else {
if entry != nil {
return fmt.Errorf("found key the should have been skipped by start option: %s", k)
}
}
}
return nil
}
| command/operator_migrate_test.go | 1 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.9976248145103455,
0.06261100620031357,
0.00016406213399022818,
0.00017292069969698787,
0.24024759232997894
] |
{
"id": 2,
"code_window": [
"\t\tdfsScan(context.Background(), l, func(ctx context.Context, path string) error {\n",
"\t\t\tout = append(out, path)\n",
"\t\t\treturn nil\n",
"\t\t})\n",
"\n",
"\t\tvar keys []string\n",
"\t\tfor key := range data {\n",
"\t\t\tkeys = append(keys, key)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tdelete(data, trailing_slash_key)\n",
"\t\tdelete(data, \"\")\n",
"\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "add",
"edit_start_line_idx": 211
} | // Copyright 2013 go-dockerclient authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package docker
import (
"encoding/json"
"net"
"strings"
)
// Version returns version information about the docker server.
//
// See https://goo.gl/mU7yje for more details.
func (c *Client) Version() (*Env, error) {
resp, err := c.do("GET", "/version", doOptions{})
if err != nil {
return nil, err
}
defer resp.Body.Close()
var env Env
if err := env.Decode(resp.Body); err != nil {
return nil, err
}
return &env, nil
}
// DockerInfo contains information about the Docker server
//
// See https://goo.gl/bHUoz9 for more details.
type DockerInfo struct {
ID string
Containers int
ContainersRunning int
ContainersPaused int
ContainersStopped int
Images int
Driver string
DriverStatus [][2]string
SystemStatus [][2]string
Plugins PluginsInfo
MemoryLimit bool
SwapLimit bool
KernelMemory bool
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
CPUCfsQuota bool `json:"CpuCfsQuota"`
CPUShares bool
CPUSet bool
IPv4Forwarding bool
BridgeNfIptables bool
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
Debug bool
OomKillDisable bool
ExperimentalBuild bool
NFd int
NGoroutines int
SystemTime string
ExecutionDriver string
LoggingDriver string
CgroupDriver string
NEventsListener int
KernelVersion string
OperatingSystem string
OSType string
Architecture string
IndexServerAddress string
RegistryConfig *ServiceConfig
SecurityOptions []string
NCPU int
MemTotal int64
DockerRootDir string
HTTPProxy string `json:"HttpProxy"`
HTTPSProxy string `json:"HttpsProxy"`
NoProxy string
Name string
Labels []string
ServerVersion string
ClusterStore string
ClusterAdvertise string
Isolation string
InitBinary string
DefaultRuntime string
LiveRestoreEnabled bool
//Swarm swarm.Info
}
// PluginsInfo is a struct with the plugins registered with the docker daemon
//
// for more information, see: https://goo.gl/bHUoz9
type PluginsInfo struct {
// List of Volume plugins registered
Volume []string
// List of Network plugins registered
Network []string
// List of Authorization plugins registered
Authorization []string
}
// ServiceConfig stores daemon registry services configuration.
//
// for more information, see: https://goo.gl/7iFFDz
type ServiceConfig struct {
InsecureRegistryCIDRs []*NetIPNet
IndexConfigs map[string]*IndexInfo
Mirrors []string
}
// NetIPNet is the net.IPNet type, which can be marshalled and
// unmarshalled to JSON.
//
// for more information, see: https://goo.gl/7iFFDz
type NetIPNet net.IPNet
// MarshalJSON returns the JSON representation of the IPNet.
//
func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
return json.Marshal((*net.IPNet)(ipnet).String())
}
// UnmarshalJSON sets the IPNet from a byte array of JSON.
//
func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
var ipnetStr string
if err = json.Unmarshal(b, &ipnetStr); err == nil {
var cidr *net.IPNet
if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
*ipnet = NetIPNet(*cidr)
}
}
return
}
// IndexInfo contains information about a registry.
//
// for more information, see: https://goo.gl/7iFFDz
type IndexInfo struct {
Name string
Mirrors []string
Secure bool
Official bool
}
// Info returns system-wide information about the Docker server.
//
// See https://goo.gl/ElTHi2 for more details.
func (c *Client) Info() (*DockerInfo, error) {
resp, err := c.do("GET", "/info", doOptions{})
if err != nil {
return nil, err
}
defer resp.Body.Close()
var info DockerInfo
if err := json.NewDecoder(resp.Body).Decode(&info); err != nil {
return nil, err
}
return &info, nil
}
// ParseRepositoryTag gets the name of the repository and returns it splitted
// in two parts: the repository and the tag. It ignores the digest when it is
// present.
//
// Some examples:
//
// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest
// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, ""
// busybox:latest@sha256:4a731fb46adc5cefe3ae374a8b6020fc1b6ad667a279647766e9a3cd89f6fa92 -> busybox, latest
func ParseRepositoryTag(repoTag string) (repository string, tag string) {
parts := strings.SplitN(repoTag, "@", 2)
repoTag = parts[0]
n := strings.LastIndex(repoTag, ":")
if n < 0 {
return repoTag, ""
}
if tag := repoTag[n+1:]; !strings.Contains(tag, "/") {
return repoTag[:n], tag
}
return repoTag, ""
}
| vendor/github.com/ory/dockertest/docker/misc.go | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.00023323211644310504,
0.00017440188094042242,
0.0001643049472477287,
0.00016831779794301838,
0.00001515519034001045
] |
{
"id": 2,
"code_window": [
"\t\tdfsScan(context.Background(), l, func(ctx context.Context, path string) error {\n",
"\t\t\tout = append(out, path)\n",
"\t\t\treturn nil\n",
"\t\t})\n",
"\n",
"\t\tvar keys []string\n",
"\t\tfor key := range data {\n",
"\t\t\tkeys = append(keys, key)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tdelete(data, trailing_slash_key)\n",
"\t\tdelete(data, \"\")\n",
"\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "add",
"edit_start_line_idx": 211
} | <PageHeader as |p|>
<p.top>
{{key-value-header
baseKey=model
path="vault.cluster.secrets.backend.list"
mode=mode
root=root
showCurrent=true
}}
</p.top>
<p.levelLeft>
<h1 class="title is-3" data-test-secret-header="true">
{{#if (eq mode "create") }}
Create a PKI Role
{{else if (eq mode 'edit')}}
Edit PKI Role
{{else}}
PKI Role <code>{{model.id}}</code>
{{/if}}
</h1>
</p.levelLeft>
<p.levelRight>
<div class="field is-grouped">
{{#if (eq mode "show") }}
{{#if (or model.canUpdate model.canDelete)}}
<div class="control">
{{#secret-link
secret=model.id
mode="edit"
replace=true
class="button has-icon-right is-ghost is-compact"
data-test-edit-link=true
}}
Edit role
{{i-con glyph="chevron-right" size=11}}
{{/secret-link}}
</div>
{{/if}}
{{#if model.canGenerate}}
<div class="control">
{{#secret-link
mode="credentials"
secret=model.id
queryParams=(query-params action="issue")
class="button has-icon-right is-ghost is-compact"
data-test-credentials-link=true
}}
Generate Certificate
{{i-con glyph="chevron-right" size=11}}
{{/secret-link}}
</div>
{{/if}}
{{#if model.canSign}}
<div class="control">
{{#secret-link
mode="credentials"
secret=model.id
queryParams=(query-params action="sign")
class="button has-icon-right is-ghost is-compact"
data-test-sign-link=true
}}
Sign Certificate
{{i-con glyph="chevron-right" size=11}}
{{/secret-link}}
</div>
{{/if}}
{{/if}}
</div>
</p.levelRight>
</PageHeader>
{{#if (or (eq mode 'edit') (eq mode 'create'))}}
{{partial 'partials/role-pki/form'}}
{{else}}
{{partial 'partials/role-pki/show'}}
{{/if}}
| ui/app/templates/components/role-pki-edit.hbs | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.00017548285541124642,
0.00017289046081714332,
0.00016929820412769914,
0.00017357859178446233,
0.0000019281069398857653
] |
{
"id": 2,
"code_window": [
"\t\tdfsScan(context.Background(), l, func(ctx context.Context, path string) error {\n",
"\t\t\tout = append(out, path)\n",
"\t\t\treturn nil\n",
"\t\t})\n",
"\n",
"\t\tvar keys []string\n",
"\t\tfor key := range data {\n",
"\t\t\tkeys = append(keys, key)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tdelete(data, trailing_slash_key)\n",
"\t\tdelete(data, \"\")\n",
"\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "add",
"edit_start_line_idx": 211
} | package reflect2
import (
"reflect"
"unsafe"
)
type UnsafeArrayType struct {
unsafeType
elemRType unsafe.Pointer
pElemRType unsafe.Pointer
elemSize uintptr
likePtr bool
}
func newUnsafeArrayType(cfg *frozenConfig, type1 reflect.Type) *UnsafeArrayType {
return &UnsafeArrayType{
unsafeType: *newUnsafeType(cfg, type1),
elemRType: unpackEFace(type1.Elem()).data,
pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data,
elemSize: type1.Elem().Size(),
likePtr: likePtrType(type1),
}
}
func (type2 *UnsafeArrayType) LikePtr() bool {
return type2.likePtr
}
func (type2 *UnsafeArrayType) Indirect(obj interface{}) interface{} {
objEFace := unpackEFace(obj)
assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
return type2.UnsafeIndirect(objEFace.data)
}
func (type2 *UnsafeArrayType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
if type2.likePtr {
return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
}
return packEFace(type2.rtype, ptr)
}
func (type2 *UnsafeArrayType) SetIndex(obj interface{}, index int, elem interface{}) {
objEFace := unpackEFace(obj)
assertType("ArrayType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
elemEFace := unpackEFace(elem)
assertType("ArrayType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data)
}
func (type2 *UnsafeArrayType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) {
elemPtr := arrayAt(obj, index, type2.elemSize, "i < s.Len")
typedmemmove(type2.elemRType, elemPtr, elem)
}
func (type2 *UnsafeArrayType) GetIndex(obj interface{}, index int) interface{} {
objEFace := unpackEFace(obj)
assertType("ArrayType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
elemPtr := type2.UnsafeGetIndex(objEFace.data, index)
return packEFace(type2.pElemRType, elemPtr)
}
func (type2 *UnsafeArrayType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
return arrayAt(obj, index, type2.elemSize, "i < s.Len")
}
| vendor/github.com/modern-go/reflect2/unsafe_array.go | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.00017281361215282232,
0.0001672635698923841,
0.00016408028022851795,
0.00016665642033331096,
0.0000024825540094752796
] |
{
"id": 3,
"code_window": [
"\t// Add special keys that should be excluded from migration\n",
"\tresult[storageMigrationLock] = []byte{}\n",
"\tresult[vault.CoreLockPath] = []byte{}\n",
"\n",
"\treturn result\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Empty keys are now prevented in Vault, but older data sets\n",
"\t// might contain them.\n",
"\tresult[\"\"] = []byte{}\n",
"\tresult[trailing_slash_key] = []byte{}\n",
"\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "add",
"edit_start_line_idx": 269
} | package command
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"testing"
"time"
"github.com/go-test/deep"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/helper/base62"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/physical"
"github.com/hashicorp/vault/vault"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func TestMigration(t *testing.T) {
t.Run("Default", func(t *testing.T) {
data := generateData()
fromFactory := physicalBackends["file"]
folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
defer os.RemoveAll(folder)
confFrom := map[string]string{
"path": folder,
}
from, err := fromFactory(confFrom, nil)
if err != nil {
t.Fatal(err)
}
if err := storeData(from, data); err != nil {
t.Fatal(err)
}
toFactory := physicalBackends["inmem"]
confTo := map[string]string{}
to, err := toFactory(confTo, nil)
if err != nil {
t.Fatal(err)
}
cmd := OperatorMigrateCommand{
logger: log.NewNullLogger(),
}
if err := cmd.migrateAll(context.Background(), from, to); err != nil {
t.Fatal(err)
}
if err := compareStoredData(to, data, ""); err != nil {
t.Fatal(err)
}
})
t.Run("Start option", func(t *testing.T) {
data := generateData()
fromFactory := physicalBackends["inmem"]
confFrom := map[string]string{}
from, err := fromFactory(confFrom, nil)
if err != nil {
t.Fatal(err)
}
if err := storeData(from, data); err != nil {
t.Fatal(err)
}
toFactory := physicalBackends["file"]
folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
defer os.RemoveAll(folder)
confTo := map[string]string{
"path": folder,
}
to, err := toFactory(confTo, nil)
if err != nil {
t.Fatal(err)
}
const start = "m"
cmd := OperatorMigrateCommand{
logger: log.NewNullLogger(),
flagStart: start,
}
if err := cmd.migrateAll(context.Background(), from, to); err != nil {
t.Fatal(err)
}
if err := compareStoredData(to, data, start); err != nil {
t.Fatal(err)
}
})
t.Run("Config parsing", func(t *testing.T) {
cmd := new(OperatorMigrateCommand)
cfgName := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
ioutil.WriteFile(cfgName, []byte(`
storage_source "src_type" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}`), 0644)
defer os.Remove(cfgName)
expCfg := &migratorConfig{
StorageSource: &server.Storage{
Type: "src_type",
Config: map[string]string{
"path": "src_path",
},
},
StorageDestination: &server.Storage{
Type: "dest_type",
Config: map[string]string{
"path": "dest_path",
},
},
}
cfg, err := cmd.loadMigratorConfig(cfgName)
if err != nil {
t.Fatal(cfg)
}
if diff := deep.Equal(cfg, expCfg); diff != nil {
t.Fatal(diff)
}
verifyBad := func(cfg string) {
ioutil.WriteFile(cfgName, []byte(cfg), 0644)
_, err := cmd.loadMigratorConfig(cfgName)
if err == nil {
t.Fatalf("expected error but none received from: %v", cfg)
}
}
// missing source
verifyBad(`
storage_destination "dest_type" {
path = "dest_path"
}`)
// missing destination
verifyBad(`
storage_source "src_type" {
path = "src_path"
}`)
// duplicate source
verifyBad(`
storage_source "src_type" {
path = "src_path"
}
storage_source "src_type2" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}`)
// duplicate destination
verifyBad(`
storage_source "src_type" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}
storage_destination "dest_type2" {
path = "dest_path"
}`)
})
t.Run("DFS Scan", func(t *testing.T) {
s, _ := physicalBackends["inmem"](map[string]string{}, nil)
data := generateData()
data["cc"] = []byte{}
data["c/d/e/f"] = []byte{}
data["c/d/e/g"] = []byte{}
data["c"] = []byte{}
storeData(s, data)
l := randomLister{s}
var out []string
dfsScan(context.Background(), l, func(ctx context.Context, path string) error {
out = append(out, path)
return nil
})
var keys []string
for key := range data {
keys = append(keys, key)
}
sort.Strings(keys)
if !reflect.DeepEqual(keys, out) {
t.Fatalf("expected equal: %v, %v", keys, out)
}
})
}
// randomLister wraps a physical backend, providing a List method
// that returns results in a random order.
type randomLister struct {
b physical.Backend
}
func (l randomLister) List(ctx context.Context, path string) ([]string, error) {
result, err := l.b.List(ctx, path)
if err != nil {
return nil, err
}
rand.Shuffle(len(result), func(i, j int) {
result[i], result[j] = result[j], result[i]
})
return result, err
}
func (l randomLister) Get(ctx context.Context, path string) (*physical.Entry, error) {
return l.b.Get(ctx, path)
}
func (l randomLister) Put(ctx context.Context, entry *physical.Entry) error {
return l.b.Put(ctx, entry)
}
func (l randomLister) Delete(ctx context.Context, path string) error {
return l.b.Delete(ctx, path)
}
// generateData creates a map of 500 random keys and values
func generateData() map[string][]byte {
result := make(map[string][]byte)
for i := 0; i < 500; i++ {
segments := make([]string, rand.Intn(8)+1)
for j := 0; j < len(segments); j++ {
s, _ := base62.Random(6)
segments[j] = s
}
data := make([]byte, 100)
rand.Read(data)
result[strings.Join(segments, "/")] = data
}
// Add special keys that should be excluded from migration
result[storageMigrationLock] = []byte{}
result[vault.CoreLockPath] = []byte{}
return result
}
func storeData(s physical.Backend, ref map[string][]byte) error {
for k, v := range ref {
entry := physical.Entry{
Key: k,
Value: v,
}
err := s.Put(context.Background(), &entry)
if err != nil {
return err
}
}
return nil
}
func compareStoredData(s physical.Backend, ref map[string][]byte, start string) error {
for k, v := range ref {
entry, err := s.Get(context.Background(), k)
if err != nil {
return err
}
if k == storageMigrationLock || k == vault.CoreLockPath {
if entry == nil {
continue
}
return fmt.Errorf("key found that should have been excluded: %s", k)
}
if k >= start {
if entry == nil {
return fmt.Errorf("key not found: %s", k)
}
if !bytes.Equal(v, entry.Value) {
return fmt.Errorf("values differ for key: %s", k)
}
} else {
if entry != nil {
return fmt.Errorf("found key the should have been skipped by start option: %s", k)
}
}
}
return nil
}
| command/operator_migrate_test.go | 1 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.996992826461792,
0.03444498032331467,
0.00016601663082838058,
0.00017174810636788607,
0.1735222041606903
] |
{
"id": 3,
"code_window": [
"\t// Add special keys that should be excluded from migration\n",
"\tresult[storageMigrationLock] = []byte{}\n",
"\tresult[vault.CoreLockPath] = []byte{}\n",
"\n",
"\treturn result\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Empty keys are now prevented in Vault, but older data sets\n",
"\t// might contain them.\n",
"\tresult[\"\"] = []byte{}\n",
"\tresult[trailing_slash_key] = []byte{}\n",
"\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "add",
"edit_start_line_idx": 269
} | package protocol
import (
"strconv"
"time"
)
// Names of time formats supported by the SDK
const (
RFC822TimeFormatName = "rfc822"
ISO8601TimeFormatName = "iso8601"
UnixTimeFormatName = "unixTimestamp"
)
// Time formats supported by the SDK
const (
// RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
ISO8601TimeFormat = "2006-01-02T15:04:05Z"
)
// IsKnownTimestampFormat returns if the timestamp format name
// is know to the SDK's protocols.
func IsKnownTimestampFormat(name string) bool {
switch name {
case RFC822TimeFormatName:
fallthrough
case ISO8601TimeFormatName:
fallthrough
case UnixTimeFormatName:
return true
default:
return false
}
}
// FormatTime returns a string value of the time.
func FormatTime(name string, t time.Time) string {
t = t.UTC()
switch name {
case RFC822TimeFormatName:
return t.Format(RFC822TimeFormat)
case ISO8601TimeFormatName:
return t.Format(ISO8601TimeFormat)
case UnixTimeFormatName:
return strconv.FormatInt(t.Unix(), 10)
default:
panic("unknown timestamp format name, " + name)
}
}
// ParseTime attempts to parse the time given the format. Returns
// the time if it was able to be parsed, and fails otherwise.
func ParseTime(formatName, value string) (time.Time, error) {
switch formatName {
case RFC822TimeFormatName:
return time.Parse(RFC822TimeFormat, value)
case ISO8601TimeFormatName:
return time.Parse(ISO8601TimeFormat, value)
case UnixTimeFormatName:
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return time.Time{}, err
}
return time.Unix(int64(v), 0), nil
default:
panic("unknown timestamp format name, " + formatName)
}
}
| vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.00019284122390672565,
0.00017349640256725252,
0.00016458875325042754,
0.00017177574045490474,
0.000008140501449815929
] |
{
"id": 3,
"code_window": [
"\t// Add special keys that should be excluded from migration\n",
"\tresult[storageMigrationLock] = []byte{}\n",
"\tresult[vault.CoreLockPath] = []byte{}\n",
"\n",
"\treturn result\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Empty keys are now prevented in Vault, but older data sets\n",
"\t// might contain them.\n",
"\tresult[\"\"] = []byte{}\n",
"\tresult[trailing_slash_key] = []byte{}\n",
"\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "add",
"edit_start_line_idx": 269
} | ---
layout: "api"
page_title: "AppRole - Auth Methods - HTTP API"
sidebar_title: "AppRole"
sidebar_current: "api-http-auth-approle"
description: |-
This is the API documentation for the Vault AppRole auth method.
---
# AppRole Auth Method (API)
This is the API documentation for the Vault AppRole auth method. For
general information about the usage and operation of the AppRole method, please
see the [Vault AppRole method documentation](/docs/auth/approle.html).
This documentation assumes the AppRole method is mounted at the `/auth/approle`
path in Vault. Since it is possible to enable auth methods at any location,
please update your API calls accordingly.
## List Roles
This endpoint returns a list the existing AppRoles in the method.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `LIST` | `/auth/approle/role` | `200 application/json` |
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
--request LIST \
http://127.0.0.1:8200/v1/auth/approle/role
```
### Sample Response
```json
{
"auth": null,
"warnings": null,
"wrap_info": null,
"data": {
"keys": [
"dev",
"prod",
"test"
]
},
"lease_duration": 0,
"renewable": false,
"lease_id": ""
}
```
## Create/Update AppRole
Creates a new AppRole or updates an existing AppRole. This endpoint
supports both `create` and `update` capabilities. There can be one or more
constraints enabled on the role. It is required to have at least one of them
enabled while creating or updating a role.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `POST` | `/auth/approle/role/:role_name` | `204 (empty body)` |
### Parameters
- `role_name` `(string: <required>)` - Name of the AppRole.
- `bind_secret_id` `(bool: true)` - Require `secret_id` to be presented when
logging in using this AppRole.
- `secret_id_bound_cidrs` `(array: [])` - Comma-separated string or list of CIDR
blocks; if set, specifies blocks of IP addresses which can perform the login
operation.
- `token_bound_cidrs` `(array: [])` - Comma-separated string or list of CIDR
blocks; if set, specifies blocks of IP addresses which can use the auth tokens
generated by this role.
- `policies` `(array: [])` - Comma-separated list of policies set on tokens
issued via this AppRole.
- `secret_id_num_uses` `(integer: 0)` - Number of times any particular SecretID
can be used to fetch a token from this AppRole, after which the SecretID will
expire. A value of zero will allow unlimited uses.
- `secret_id_ttl` `(string: "")` - Duration in either an integer number of
seconds (`3600`) or an integer time unit (`60m`) after which any SecretID
expires.
- `token_num_uses` `(integer: 0)` - Number of times issued tokens can be used.
A value of 0 means unlimited uses.
- `token_ttl` `(string: "")` - Duration in either an integer number of seconds
(`3600`) or an integer time unit (`60m`) to set as the TTL for issued tokens
and at renewal time.
- `token_max_ttl` `(string: "")` - Duration in either an integer number of
seconds (`3600`) or an integer time unit (`60m`) after which the issued token
can no longer be renewed.
- `period` `(string: "")` - Duration in either an integer number of seconds
(`3600`) or an integer time unit (`60m`). If set, the token generated using
this AppRole is a _periodic_ token; so long as it is renewed it never expires,
but the TTL set on the token at each renewal is fixed to the value specified
here. If this value is modified, the token will pick up the new value at its
next renewal.
- `enable_local_secret_ids` `(bool: false)` - If set, the secret IDs generated
using this role will be cluster local. This can only be set during role
creation and once set, it can't be reset later.
- `token_type` `(string: "")` - The type of token that should be generated via
this role. Can be `service`, `batch`, or `default` to use the mount's default
(which unless changed will be `service` tokens).
### Sample Payload
```json
{
"token_ttl": "10m",
"token_max_ttl": "15m",
"policies": [
"default"
],
"period": 0,
"bind_secret_id": true
}
```
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
--data @payload.json \
http://127.0.0.1:8200/v1/auth/approle/role/application1
```
## Read AppRole
Reads the properties of an existing AppRole.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `GET` | `/auth/approle/role/:role_name` | `200 application/json` |
### Parameters
- `role_name` `(string: <required>)` - Name of the AppRole.
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
http://127.0.0.1:8200/v1/auth/approle/role/application1
```
### Sample Response
```json
{
"auth": null,
"warnings": null,
"wrap_info": null,
"data": {
"token_ttl": 1200,
"token_max_ttl": 1800,
"secret_id_ttl": 600,
"secret_id_num_uses": 40,
"policies": [
"default"
],
"period": 0,
"bind_secret_id": true,
"bound_cidr_list": []
},
"lease_duration": 0,
"renewable": false,
"lease_id": ""
}
```
## Delete AppRole
Deletes an existing AppRole from the method.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `DELETE` | `/auth/approle/role/:role_name` | `204 (empty body)` |
### Parameters
- `role_name` `(string: <required>)` - Name of the AppRole.
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
--request DELETE \
http://127.0.0.1:8200/v1/auth/approle/role/application1
```
## Read AppRole Role ID
Reads the RoleID of an existing AppRole.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `GET` | `/auth/approle/role/:role_name/role-id` | `200 application/json` |
### Parameters
- `role_name` `(string: <required>)` - Name of the AppRole.
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
http://127.0.0.1:8200/v1/auth/approle/role/application1/role-id
```
### Sample Response
```json
{
"auth": null,
"warnings": null,
"wrap_info": null,
"data": {
"role_id": "e5a7b66e-5d08-da9c-7075-71984634b882"
},
"lease_duration": 0,
"renewable": false,
"lease_id": ""
}
```
## Update AppRole Role ID
Updates the RoleID of an existing AppRole to a custom value.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `POST` | `/auth/approle/role/:role_name/role-id` | `204 (empty body)` |
### Parameters
- `role_name` `(string: <required>)` - Name of the AppRole.
- `role_id` `(string: <required>)` - Value to be set as RoleID.
### Sample Payload
```json
{
"role_id": "custom-role-id"
}
```
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
--data @payload.json \
http://127.0.0.1:8200/v1/auth/approle/role/application1/role-id
```
### Sample Response
```json
{
"auth": null,
"warnings": null,
"wrap_info": null,
"data": {
"role_id": "e5a7b66e-5d08-da9c-7075-71984634b882"
},
"lease_duration": 0,
"renewable": false,
"lease_id": ""
}
```
## Generate New Secret ID
Generates and issues a new SecretID on an existing AppRole. Similar to
tokens, the response will also contain a `secret_id_accessor` value which can
be used to read the properties of the SecretID without divulging the SecretID
itself, and also to delete the SecretID from the AppRole.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `POST` | `/auth/approle/role/:role_name/secret-id` | `200 application/json` |
### Parameters
- `role_name` `(string: <required>)` - Name of the AppRole.
- `metadata` `(string: "")` - Metadata to be tied to the SecretID. This should be
a JSON-formatted string containing the metadata in key-value pairs. This
metadata will be set on tokens issued with this SecretID, and is logged in
audit logs _in plaintext_.
- `cidr_list` `(array: [])` - Comma separated string or list of CIDR blocks
enforcing secret IDs to be used from specific set of IP addresses. If
`bound_cidr_list` is set on the role, then the list of CIDR blocks listed
here should be a subset of the CIDR blocks listed on the role.
- `token_bound_cidrs` `(array: [])` - Comma-separated string or list of CIDR
blocks; if set, specifies blocks of IP addresses which can use the auth tokens
generated by this SecretID. Overrides any role-set value but must be a subset.
### Sample Payload
```json
{
"metadata": "{ \"tag1\": \"production\" }"
}
```
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
--data @payload.json \
http://127.0.0.1:8200/v1/auth/approle/role/application1/secret-id
```
### Sample Response
```json
{
"auth": null,
"warnings": null,
"wrap_info": null,
"data": {
"secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780",
"secret_id": "841771dc-11c9-bbc7-bcac-6a3945a69cd9"
},
"lease_duration": 0,
"renewable": false,
"lease_id": ""
}
```
## List Secret ID Accessors
Lists the accessors of all the SecretIDs issued against the AppRole.
This includes the accessors for "custom" SecretIDs as well.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `LIST` | `/auth/approle/role/:role_name/secret-id` | `200 application/json` |
### Parameters
- `role_name` `(string: <required>)` - Name of the AppRole.
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
--request LIST \
http://127.0.0.1:8200/v1/auth/approle/role/application1/secret-id
```
### Sample Response
```json
{
"auth": null,
"warnings": null,
"wrap_info": null,
"data": {
"keys": [
"ce102d2a-8253-c437-bf9a-aceed4241491",
"a1c8dee4-b869-e68d-3520-2040c1a0849a",
"be83b7e2-044c-7244-07e1-47560ca1c787",
"84896a0c-1347-aa90-a4f6-aca8b7558780",
"239b1328-6523-15e7-403a-a48038cdc45a"
]
},
"lease_duration": 0,
"renewable": false,
"lease_id": ""
}
```
## Read AppRole Secret ID
Reads out the properties of a SecretID.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `POST` | `/auth/approle/role/:role_name/secret-id/lookup` | `200 application/json` |
### Parameters
- `role_name` `(string: <required>)` - Name of the AppRole.
- `secret_id` `(string: <required>)` - Secret ID attached to the role.
### Sample Payload
```json
{
"secret_id": "84896a0c-1347-aa90-a4f6-aca8b7558780"
}
```
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
--data @payload.json \
http://127.0.0.1:8200/v1/auth/approle/role/application1/secret-id/lookup
```
## Destroy AppRole Secret ID
Destroy an AppRole secret ID.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `POST` | `/auth/approle/role/:role_name/secret-id/destroy` | `204 (empty body)` |
### Parameters
- `role_name` `(string: <required>)` - Name of the AppRole.
- `secret_id` `(string: <required>)` - Secret ID attached to the role.
### Sample Payload
```json
{
"secret_id": "84896a0c-1347-aa90-a4f6-aca8b7558780"
}
```
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
--data @payload.json \
http://127.0.0.1:8200/v1/auth/approle/role/application1/secret-id/destroy
```
## Read AppRole Secret ID Accessor
Reads out the properties of a SecretID.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `POST` | `/auth/approle/role/:role_name/secret-id-accessor/lookup` | `200 application/json` |
### Parameters
- `role_name` `(string: <required>)` - Name of the AppRole.
- `secret_id_accessor` `(string: <required>)` - Secret ID accessor attached to the role.
### Sample Payload
```json
{
"secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780"
}
```
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
--data @payload.json \
http://127.0.0.1:8200/v1/auth/approle/role/application1/secret-id-accessor/lookup
```
## Destroy AppRole Secret ID Accessor
Destroy an AppRole secret ID by its accessor.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `POST` | `/auth/approle/role/:role_name/secret-id-accessor/destroy` | `204 (empty body)` |
### Parameters
- `role_name` `(string: <required>)` - Name of the AppRole.
- `secret_id_accessor` `(string: <required>)` - Secret ID accessor attached to the role.
### Sample Payload
```json
{
"secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780"
}
```
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
--data @payload.json \
http://127.0.0.1:8200/v1/auth/approle/role/application1/secret-id-accessor/destroy
```
## Create Custom AppRole Secret ID
Assigns a "custom" SecretID against an existing AppRole. This is used in the
"Push" model of operation.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `POST` | `/auth/approle/role/:role_name/custom-secret-id` | `200 application/json` |
### Parameters
- `role_name` `(string: <required>)` - Name of the AppRole.
- `secret_id` `(string: <required>)` - SecretID to be attached to the Role.
- `metadata` `(string: "")` - Metadata to be tied to the SecretID. This should be
a JSON-formatted string containing the metadata in key-value pairs. This
metadata will be set on tokens issued with this SecretID, and is logged in
audit logs _in plaintext_.
- `cidr_list` `(array: [])` - Comma separated string or list of CIDR blocks
enforcing secret IDs to be used from specific set of IP addresses. If
`bound_cidr_list` is set on the role, then the list of CIDR blocks listed
here should be a subset of the CIDR blocks listed on the role.
- `token_bound_cidrs` `(array: [])` - Comma-separated string or list of CIDR
blocks; if set, specifies blocks of IP addresses which can use the auth tokens
generated by this SecretID. Overrides any role-set value but must be a subset.
### Sample Payload
```json
{
"secret_id": "testsecretid"
}
```
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
--data @payload.json \
http://127.0.0.1:8200/v1/auth/approle/role/application1/custom-secret-id
```
### Sample Response
```json
{
"auth": null,
"warnings": null,
"wrap_info": null,
"data": {
"secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780",
"secret_id": "testsecretid"
},
"lease_duration": 0,
"renewable": false,
"lease_id": ""
}
```
## Login With AppRole
Issues a Vault token based on the presented credentials. `role_id` is always
required; if `bind_secret_id` is enabled (the default) on the AppRole,
`secret_id` is required too. Any other bound authentication values on the
AppRole (such as client IP CIDR) are also evaluated.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `POST` | `/auth/approle/login` | `200 application/json` |
### Parameters
- `role_id` `(string: <required>)` - RoleID of the AppRole.
- `secret_id` `(string: <required>)` - SecretID belonging to AppRole.
### Sample Payload
```json
{
"role_id": "59d6d1ca-47bb-4e7e-a40b-8be3bc5a0ba8",
"secret_id": "84896a0c-1347-aa90-a4f6-aca8b7558780"
}
```
### Sample Request
```
$ curl \
--request POST \
--data @payload.json \
http://127.0.0.1:8200/v1/auth/approle/login
```
### Sample Response
```json
{
"auth": {
"renewable": true,
"lease_duration": 1200,
"metadata": null,
"policies": [
"default"
],
"accessor": "fd6c9a00-d2dc-3b11-0be5-af7ae0e1d374",
"client_token": "5b1a0318-679c-9c45-e5c6-d1b9a9035d49"
},
"warnings": null,
"wrap_info": null,
"data": null,
"lease_duration": 0,
"renewable": false,
"lease_id": ""
}
```
## Read, Update, or Delete AppRole Properties
Updates the respective property in the existing AppRole. All of these
parameters of the AppRole can be updated using the `/auth/approle/role/:role_name`
endpoint directly. The endpoints for each field is provided separately
to be able to delegate specific endpoints using Vault's ACL system.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `GET/POST/DELETE` | `/auth/approle/role/:role_name/policies` | `200/204` |
| `GET/POST/DELETE` | `/auth/approle/role/:role_name/secret-id-num-uses` | `200/204` |
| `GET/POST/DELETE` | `/auth/approle/role/:role_name/secret-id-ttl` | `200/204` |
| `GET/POST/DELETE` | `/auth/approle/role/:role_name/token-ttl` | `200/204` |
| `GET/POST/DELETE` | `/auth/approle/role/:role_name/token-max-ttl` | `200/204` |
| `GET/POST/DELETE` | `/auth/approle/role/:role_name/bind-secret-id` | `200/204` |
| `GET/POST/DELETE` | `/auth/approle/role/:role_name/secret-id-bound-cidrs` | `200/204` |
| `GET/POST/DELETE` | `/auth/approle/role/:role_name/token-bound-cidrs` | `200/204` |
| `GET/POST/DELETE` | `/auth/approle/role/:role_name/period` | `200/204` |
Refer to `/auth/approle/role/:role_name` endpoint.
## Tidy Tokens
Performs some maintenance tasks to clean up invalid entries that may remain
in the token store. Generally, running this is not needed unless upgrade
notes or support personnel suggest it. This may perform a lot of I/O to the
storage method so should be used sparingly.
| Method | Path | Produces |
| :------- | :------------------------------ | :--------------------- |
| `POST` | `/auth/approle/tidy/secret-id` | `204 (empty body)` |
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
http://127.0.0.1:8200/v1/auth/approle/tidy/secret-id
```
### Sample Response
```json
{
"request_id": "b20b56e3-4699-5b19-cc6b-e74f7b787bbf",
"lease_id": "",
"renewable": false,
"lease_duration": 0,
"data": null,
"wrap_info": null,
"warnings": [
"Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs."
],
"auth": null
}
```
| website/source/api/auth/approle/index.html.md | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.00021285506954882294,
0.0001710837968857959,
0.000163391770911403,
0.00017085908621083945,
0.000006393404419213766
] |
{
"id": 3,
"code_window": [
"\t// Add special keys that should be excluded from migration\n",
"\tresult[storageMigrationLock] = []byte{}\n",
"\tresult[vault.CoreLockPath] = []byte{}\n",
"\n",
"\treturn result\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// Empty keys are now prevented in Vault, but older data sets\n",
"\t// might contain them.\n",
"\tresult[\"\"] = []byte{}\n",
"\tresult[trailing_slash_key] = []byte{}\n",
"\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "add",
"edit_start_line_idx": 269
} | module github.com/mitchellh/mapstructure
| vendor/github.com/mitchellh/mapstructure/go.mod | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.00017457662033848464,
0.00017457662033848464,
0.00017457662033848464,
0.00017457662033848464,
0
] |
{
"id": 4,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\tif k == storageMigrationLock || k == vault.CoreLockPath {\n",
"\t\t\tif entry == nil {\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif k == storageMigrationLock || k == vault.CoreLockPath || k == \"\" || strings.HasSuffix(k, \"/\") {\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "replace",
"edit_start_line_idx": 294
} | package command
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"testing"
"time"
"github.com/go-test/deep"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/helper/base62"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/physical"
"github.com/hashicorp/vault/vault"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func TestMigration(t *testing.T) {
t.Run("Default", func(t *testing.T) {
data := generateData()
fromFactory := physicalBackends["file"]
folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
defer os.RemoveAll(folder)
confFrom := map[string]string{
"path": folder,
}
from, err := fromFactory(confFrom, nil)
if err != nil {
t.Fatal(err)
}
if err := storeData(from, data); err != nil {
t.Fatal(err)
}
toFactory := physicalBackends["inmem"]
confTo := map[string]string{}
to, err := toFactory(confTo, nil)
if err != nil {
t.Fatal(err)
}
cmd := OperatorMigrateCommand{
logger: log.NewNullLogger(),
}
if err := cmd.migrateAll(context.Background(), from, to); err != nil {
t.Fatal(err)
}
if err := compareStoredData(to, data, ""); err != nil {
t.Fatal(err)
}
})
t.Run("Start option", func(t *testing.T) {
data := generateData()
fromFactory := physicalBackends["inmem"]
confFrom := map[string]string{}
from, err := fromFactory(confFrom, nil)
if err != nil {
t.Fatal(err)
}
if err := storeData(from, data); err != nil {
t.Fatal(err)
}
toFactory := physicalBackends["file"]
folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
defer os.RemoveAll(folder)
confTo := map[string]string{
"path": folder,
}
to, err := toFactory(confTo, nil)
if err != nil {
t.Fatal(err)
}
const start = "m"
cmd := OperatorMigrateCommand{
logger: log.NewNullLogger(),
flagStart: start,
}
if err := cmd.migrateAll(context.Background(), from, to); err != nil {
t.Fatal(err)
}
if err := compareStoredData(to, data, start); err != nil {
t.Fatal(err)
}
})
t.Run("Config parsing", func(t *testing.T) {
cmd := new(OperatorMigrateCommand)
cfgName := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
ioutil.WriteFile(cfgName, []byte(`
storage_source "src_type" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}`), 0644)
defer os.Remove(cfgName)
expCfg := &migratorConfig{
StorageSource: &server.Storage{
Type: "src_type",
Config: map[string]string{
"path": "src_path",
},
},
StorageDestination: &server.Storage{
Type: "dest_type",
Config: map[string]string{
"path": "dest_path",
},
},
}
cfg, err := cmd.loadMigratorConfig(cfgName)
if err != nil {
t.Fatal(cfg)
}
if diff := deep.Equal(cfg, expCfg); diff != nil {
t.Fatal(diff)
}
verifyBad := func(cfg string) {
ioutil.WriteFile(cfgName, []byte(cfg), 0644)
_, err := cmd.loadMigratorConfig(cfgName)
if err == nil {
t.Fatalf("expected error but none received from: %v", cfg)
}
}
// missing source
verifyBad(`
storage_destination "dest_type" {
path = "dest_path"
}`)
// missing destination
verifyBad(`
storage_source "src_type" {
path = "src_path"
}`)
// duplicate source
verifyBad(`
storage_source "src_type" {
path = "src_path"
}
storage_source "src_type2" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}`)
// duplicate destination
verifyBad(`
storage_source "src_type" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}
storage_destination "dest_type2" {
path = "dest_path"
}`)
})
t.Run("DFS Scan", func(t *testing.T) {
s, _ := physicalBackends["inmem"](map[string]string{}, nil)
data := generateData()
data["cc"] = []byte{}
data["c/d/e/f"] = []byte{}
data["c/d/e/g"] = []byte{}
data["c"] = []byte{}
storeData(s, data)
l := randomLister{s}
var out []string
dfsScan(context.Background(), l, func(ctx context.Context, path string) error {
out = append(out, path)
return nil
})
var keys []string
for key := range data {
keys = append(keys, key)
}
sort.Strings(keys)
if !reflect.DeepEqual(keys, out) {
t.Fatalf("expected equal: %v, %v", keys, out)
}
})
}
// randomLister wraps a physical backend, providing a List method
// that returns results in a random order.
type randomLister struct {
b physical.Backend
}
func (l randomLister) List(ctx context.Context, path string) ([]string, error) {
result, err := l.b.List(ctx, path)
if err != nil {
return nil, err
}
rand.Shuffle(len(result), func(i, j int) {
result[i], result[j] = result[j], result[i]
})
return result, err
}
func (l randomLister) Get(ctx context.Context, path string) (*physical.Entry, error) {
return l.b.Get(ctx, path)
}
func (l randomLister) Put(ctx context.Context, entry *physical.Entry) error {
return l.b.Put(ctx, entry)
}
func (l randomLister) Delete(ctx context.Context, path string) error {
return l.b.Delete(ctx, path)
}
// generateData creates a map of 500 random keys and values
func generateData() map[string][]byte {
result := make(map[string][]byte)
for i := 0; i < 500; i++ {
segments := make([]string, rand.Intn(8)+1)
for j := 0; j < len(segments); j++ {
s, _ := base62.Random(6)
segments[j] = s
}
data := make([]byte, 100)
rand.Read(data)
result[strings.Join(segments, "/")] = data
}
// Add special keys that should be excluded from migration
result[storageMigrationLock] = []byte{}
result[vault.CoreLockPath] = []byte{}
return result
}
func storeData(s physical.Backend, ref map[string][]byte) error {
for k, v := range ref {
entry := physical.Entry{
Key: k,
Value: v,
}
err := s.Put(context.Background(), &entry)
if err != nil {
return err
}
}
return nil
}
func compareStoredData(s physical.Backend, ref map[string][]byte, start string) error {
for k, v := range ref {
entry, err := s.Get(context.Background(), k)
if err != nil {
return err
}
if k == storageMigrationLock || k == vault.CoreLockPath {
if entry == nil {
continue
}
return fmt.Errorf("key found that should have been excluded: %s", k)
}
if k >= start {
if entry == nil {
return fmt.Errorf("key not found: %s", k)
}
if !bytes.Equal(v, entry.Value) {
return fmt.Errorf("values differ for key: %s", k)
}
} else {
if entry != nil {
return fmt.Errorf("found key the should have been skipped by start option: %s", k)
}
}
}
return nil
}
| command/operator_migrate_test.go | 1 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.9971168041229248,
0.09217949956655502,
0.00016314485401380807,
0.0002448303275741637,
0.2819086015224457
] |
{
"id": 4,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\tif k == storageMigrationLock || k == vault.CoreLockPath {\n",
"\t\t\tif entry == nil {\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif k == storageMigrationLock || k == vault.CoreLockPath || k == \"\" || strings.HasSuffix(k, \"/\") {\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "replace",
"edit_start_line_idx": 294
} | The MIT License (MIT)
Copyright (c) 2014 Florian Sundermann
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| vendor/github.com/boombuler/barcode/LICENSE | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.00017525978910271078,
0.00017389592539984733,
0.00017240791930817068,
0.00017402003868483007,
0.0000011675745099637425
] |
{
"id": 4,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\tif k == storageMigrationLock || k == vault.CoreLockPath {\n",
"\t\t\tif entry == nil {\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif k == storageMigrationLock || k == vault.CoreLockPath || k == \"\" || strings.HasSuffix(k, \"/\") {\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "replace",
"edit_start_line_idx": 294
} | vendor/github.com/modern-go/reflect2/relfect2_arm64.s | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.00017829884018283337,
0.00017829884018283337,
0.00017829884018283337,
0.00017829884018283337,
0
] |
|
{
"id": 4,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n",
"\n",
"\t\tif k == storageMigrationLock || k == vault.CoreLockPath {\n",
"\t\t\tif entry == nil {\n",
"\t\t\t\tcontinue\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tif k == storageMigrationLock || k == vault.CoreLockPath || k == \"\" || strings.HasSuffix(k, \"/\") {\n"
],
"file_path": "command/operator_migrate_test.go",
"type": "replace",
"edit_start_line_idx": 294
} | // Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uuid generates and inspects UUIDs.
//
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
// Services.
//
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
// maps or compared directly.
package uuid
| vendor/github.com/google/uuid/doc.go | 0 | https://github.com/hashicorp/vault/commit/a7531526abdb2cbad7a348bf618487eca6b05eb7 | [
0.00017511560872662812,
0.00017423543613404036,
0.0001733552635414526,
0.00017423543613404036,
8.80172592587769e-7
] |
{
"id": 0,
"code_window": [
" return filterResult;\n",
"}\n",
"\n",
"function filterRepoFiles(filter) {\n",
" const treeLink = $repoFindFileInput.attr('data-url-tree-link');\n",
" $repoFindFileTableBody.empty();\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"export function escapePath(s) {\n",
" return s.split('/').map(encodeURIComponent).join('/');\n",
"}\n",
"\n"
],
"file_path": "web_src/js/features/repo-findfile.js",
"type": "add",
"edit_start_line_idx": 74
} | import $ from 'jquery';
import {svg} from '../svg.js';
const {csrf} = window.config;
const threshold = 50;
let files = [];
let $repoFindFileInput, $repoFindFileTableBody, $repoFindFileNoResult;
// return the case-insensitive sub-match result as an array: [unmatched, matched, unmatched, matched, ...]
// res[even] is unmatched, res[odd] is matched, see unit tests for examples
// argument subLower must be a lower-cased string.
export function strSubMatch(full, subLower) {
const res = [''];
let i = 0, j = 0;
const fullLower = full.toLowerCase();
while (i < subLower.length && j < fullLower.length) {
if (subLower[i] === fullLower[j]) {
if (res.length % 2 !== 0) res.push('');
res[res.length - 1] += full[j];
j++;
i++;
} else {
if (res.length % 2 === 0) res.push('');
res[res.length - 1] += full[j];
j++;
}
}
if (i !== subLower.length) {
// if the sub string doesn't match the full, only return the full as unmatched.
return [full];
}
if (j < full.length) {
// append remaining chars from full to result as unmatched
if (res.length % 2 === 0) res.push('');
res[res.length - 1] += full.substring(j);
}
return res;
}
export function calcMatchedWeight(matchResult) {
let weight = 0;
for (let i = 0; i < matchResult.length; i++) {
if (i % 2 === 1) { // matches are on odd indices, see strSubMatch
// use a function f(x+x) > f(x) + f(x) to make the longer matched string has higher weight.
weight += matchResult[i].length * matchResult[i].length;
}
}
return weight;
}
export function filterRepoFilesWeighted(files, filter) {
let filterResult = [];
if (filter) {
const filterLower = filter.toLowerCase();
// TODO: for large repo, this loop could be slow, maybe there could be one more limit:
// ... && filterResult.length < threshold * 20, wait for more feedbacks
for (let i = 0; i < files.length; i++) {
const res = strSubMatch(files[i], filterLower);
if (res.length > 1) { // length==1 means unmatched, >1 means having matched sub strings
filterResult.push({matchResult: res, matchWeight: calcMatchedWeight(res)});
}
}
filterResult.sort((a, b) => b.matchWeight - a.matchWeight);
filterResult = filterResult.slice(0, threshold);
} else {
for (let i = 0; i < files.length && i < threshold; i++) {
filterResult.push({matchResult: [files[i]], matchWeight: 0});
}
}
return filterResult;
}
function filterRepoFiles(filter) {
const treeLink = $repoFindFileInput.attr('data-url-tree-link');
$repoFindFileTableBody.empty();
const filterResult = filterRepoFilesWeighted(files, filter);
const tmplRow = `<tr><td><a></a></td></tr>`;
$repoFindFileNoResult.toggle(filterResult.length === 0);
for (const r of filterResult) {
const $row = $(tmplRow);
const $a = $row.find('a');
$a.attr('href', `${treeLink}/${r.matchResult.join('')}`);
const $octiconFile = $(svg('octicon-file')).addClass('mr-3');
$a.append($octiconFile);
// if the target file path is "abc/xyz", to search "bx", then the matchResult is ['a', 'b', 'c/', 'x', 'yz']
// the matchResult[odd] is matched and highlighted to red.
for (let j = 0; j < r.matchResult.length; j++) {
if (!r.matchResult[j]) continue;
const $span = $('<span>').text(r.matchResult[j]);
if (j % 2 === 1) $span.addClass('ui text red');
$a.append($span);
}
$repoFindFileTableBody.append($row);
}
}
async function loadRepoFiles() {
files = await $.ajax({
url: $repoFindFileInput.attr('data-url-data-link'),
headers: {'X-Csrf-Token': csrf}
});
filterRepoFiles($repoFindFileInput.val());
}
export function initFindFileInRepo() {
$repoFindFileInput = $('#repo-file-find-input');
if (!$repoFindFileInput.length) return;
$repoFindFileTableBody = $('#repo-find-file-table tbody');
$repoFindFileNoResult = $('#repo-find-file-no-result');
$repoFindFileInput.on('input', () => filterRepoFiles($repoFindFileInput.val()));
loadRepoFiles();
}
| web_src/js/features/repo-findfile.js | 1 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.9990429282188416,
0.3931995928287506,
0.00016703283472452313,
0.07844717800617218,
0.45349612832069397
] |
{
"id": 0,
"code_window": [
" return filterResult;\n",
"}\n",
"\n",
"function filterRepoFiles(filter) {\n",
" const treeLink = $repoFindFileInput.attr('data-url-tree-link');\n",
" $repoFindFileTableBody.empty();\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"export function escapePath(s) {\n",
" return s.split('/').map(encodeURIComponent).join('/');\n",
"}\n",
"\n"
],
"file_path": "web_src/js/features/repo-findfile.js",
"type": "add",
"edit_start_line_idx": 74
} | <div class="runner-container">
{{template "base/alert" .}}
<h4 class="ui top attached header">
{{.locale.Tr "actions.runners.runner_manage_panel"}} ({{.locale.Tr "admin.total" .Total}})
<div class="ui right">
<div class="ui top right pointing dropdown">
<button class="ui button primary">
{{.locale.Tr "actions.runners.new"}}
{{svg "octicon-triangle-down" 14 "dropdown icon"}}
</button>
<div class="menu">
<div class="item">
{{/* TODO: replece the document link when there's a better one than the README of act_runner */}}
<a href="https://gitea.com/gitea/act_runner/src/branch/main/README.md">{{.locale.Tr "actions.runners.new_notice"}}</a>
</div>
<div class="divider"></div>
<div class="header">
Registration Token
</div>
<div class="ui input">
<input type="text" value="{{.RegistrationToken}}">
<div class="ui basic label button" data-clipboard-text="{{.RegistrationToken}}">
{{svg "octicon-copy" 14}}
</div>
</div>
<div class="divider"></div>
<div class="item">
<a href="{{$.Link}}/reset_registration_token">Reset registration token</a>
</div>
</div>
</div>
</div>
</h4>
<div class="ui attached segment">
<form class="ui form ignore-dirty" id="user-list-search-form" action="{{$.Link}}">
<!-- Search Text -->
<div class="ui fluid action input" style="max-width: 70%;">
<input name="q" value="{{.Keyword}}" placeholder="{{.locale.Tr "explore.search"}}..." autofocus>
<button class="ui primary button">{{.locale.Tr "explore.search"}}</button>
</div>
</form>
</div>
<div class="ui attached table segment">
<table class="ui very basic striped table unstackable">
<thead>
<tr>
<th data-sortt-asc="online" data-sortt-desc="offline">{{.locale.Tr "actions.runners.status"}}</th>
<th data-sortt-asc="alphabetically">{{.locale.Tr "actions.runners.id"}}</th>
<th>{{.locale.Tr "actions.runners.name"}}</th>
<th>{{.locale.Tr "actions.runners.owner_type"}}</th>
<th>{{.locale.Tr "actions.runners.labels"}}</th>
<th>{{.locale.Tr "actions.runners.last_online"}}</th>
<th></th>
</tr>
</thead>
<tbody>
{{if .Runners}}
{{range .Runners}}
<tr>
<td>
<span class="runner-status-{{if .IsOnline}}online{{else}}offline{{end}}">{{.StatusLocaleName $.locale}}</span>
</td>
<td>{{.ID}}</td>
<td><p class="tooltip" data-content="{{.Description}}">{{.Name}}</p></td>
<td>{{.OwnType}}</td>
<td class="runner-tags">
{{range .AllLabels}}<span class="ui label">{{.}}</span>{{end}}
</td>
<td>{{if .LastOnline}}{{TimeSinceUnix .LastOnline $.locale}}{{else}}{{$.locale.Tr "never"}}{{end}}</td>
<td class="runner-ops">
{{if .Editable $.RunnerOnwerID $.RunnerRepoID}}
<a href="{{$.Link}}/{{.ID}}">{{svg "octicon-pencil"}}</a>
{{end}}
</td>
</tr>
{{end}}
{{else}}
<tr>
<td class="center aligned" colspan="6">{{.locale.Tr "actions.runners.none"}}</td>
</tr>
{{end}}
</tbody>
</table>
</div>
{{template "base/paginate" .}}
</div>
| templates/shared/actions/runner_list.tmpl | 0 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.000182172647328116,
0.00016922559007070959,
0.0001654209045227617,
0.00016740054707042873,
0.000004950877155351918
] |
{
"id": 0,
"code_window": [
" return filterResult;\n",
"}\n",
"\n",
"function filterRepoFiles(filter) {\n",
" const treeLink = $repoFindFileInput.attr('data-url-tree-link');\n",
" $repoFindFileTableBody.empty();\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"export function escapePath(s) {\n",
" return s.split('/').map(encodeURIComponent).join('/');\n",
"}\n",
"\n"
],
"file_path": "web_src/js/features/repo-findfile.js",
"type": "add",
"edit_start_line_idx": 74
} | // Copyright 2021 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package util
import (
"bytes"
"unicode"
"github.com/yuin/goldmark/util"
)
type sanitizedError struct {
err error
}
func (err sanitizedError) Error() string {
return SanitizeCredentialURLs(err.err.Error())
}
func (err sanitizedError) Unwrap() error {
return err.err
}
// SanitizeErrorCredentialURLs wraps the error and make sure the returned error message doesn't contain sensitive credentials in URLs
func SanitizeErrorCredentialURLs(err error) error {
return sanitizedError{err: err}
}
const userPlaceholder = "sanitized-credential"
var schemeSep = []byte("://")
// SanitizeCredentialURLs remove all credentials in URLs (starting with "scheme://") for the input string: "https://user:[email protected]" => "https://[email protected]"
func SanitizeCredentialURLs(s string) string {
bs := util.StringToReadOnlyBytes(s)
schemeSepPos := bytes.Index(bs, schemeSep)
if schemeSepPos == -1 || bytes.IndexByte(bs[schemeSepPos:], '@') == -1 {
return s // fast return if there is no URL scheme or no userinfo
}
out := make([]byte, 0, len(bs)+len(userPlaceholder))
for schemeSepPos != -1 {
schemeSepPos += 3 // skip the "://"
sepAtPos := -1 // the possible '@' position: "https://foo@[^here]host"
sepEndPos := schemeSepPos // the possible end position: "The https://host[^here] in log for test"
sepLoop:
for ; sepEndPos < len(bs); sepEndPos++ {
c := bs[sepEndPos]
if ('A' <= c && c <= 'Z') || ('a' <= c && c <= 'z') || ('0' <= c && c <= '9') {
continue
}
switch c {
case '@':
sepAtPos = sepEndPos
case '-', '.', '_', '~', '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '%':
continue // due to RFC 3986, userinfo can contain - . _ ~ ! $ & ' ( ) * + , ; = : and any percent-encoded chars
default:
break sepLoop // if it is an invalid char for URL (eg: space, '/', and others), stop the loop
}
}
// if there is '@', and the string is like "s://u@h", then hide the "u" part
if sepAtPos != -1 && (schemeSepPos >= 4 && unicode.IsLetter(rune(bs[schemeSepPos-4]))) && sepAtPos-schemeSepPos > 0 && sepEndPos-sepAtPos > 0 {
out = append(out, bs[:schemeSepPos]...)
out = append(out, userPlaceholder...)
out = append(out, bs[sepAtPos:sepEndPos]...)
} else {
out = append(out, bs[:sepEndPos]...)
}
bs = bs[sepEndPos:]
schemeSepPos = bytes.Index(bs, schemeSep)
}
out = append(out, bs...)
return util.BytesToReadOnlyString(out)
}
| modules/util/sanitize.go | 0 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.00017902915715239942,
0.0001723460154607892,
0.00016716764366719872,
0.00017216173000633717,
0.00000416971715822001
] |
{
"id": 0,
"code_window": [
" return filterResult;\n",
"}\n",
"\n",
"function filterRepoFiles(filter) {\n",
" const treeLink = $repoFindFileInput.attr('data-url-tree-link');\n",
" $repoFindFileTableBody.empty();\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"export function escapePath(s) {\n",
" return s.split('/').map(encodeURIComponent).join('/');\n",
"}\n",
"\n"
],
"file_path": "web_src/js/features/repo-findfile.js",
"type": "add",
"edit_start_line_idx": 74
} | // Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package git
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
)
func TestRunWithContextStd(t *testing.T) {
cmd := NewCommand(context.Background(), "--version")
stdout, stderr, err := cmd.RunStdString(&RunOpts{})
assert.NoError(t, err)
assert.Empty(t, stderr)
assert.Contains(t, stdout, "git version")
cmd = NewCommand(context.Background(), "--no-such-arg")
stdout, stderr, err = cmd.RunStdString(&RunOpts{})
if assert.Error(t, err) {
assert.Equal(t, stderr, err.Stderr())
assert.Contains(t, err.Stderr(), "unknown option:")
assert.Contains(t, err.Error(), "exit status 129 - unknown option:")
assert.Empty(t, stdout)
}
cmd = NewCommand(context.Background())
cmd.AddDynamicArguments("-test")
assert.ErrorIs(t, cmd.Run(&RunOpts{}), ErrBrokenCommand)
cmd = NewCommand(context.Background())
cmd.AddDynamicArguments("--test")
assert.ErrorIs(t, cmd.Run(&RunOpts{}), ErrBrokenCommand)
subCmd := "version"
cmd = NewCommand(context.Background()).AddDynamicArguments(subCmd) // for test purpose only, the sub-command should never be dynamic for production
stdout, stderr, err = cmd.RunStdString(&RunOpts{})
assert.NoError(t, err)
assert.Empty(t, stderr)
assert.Contains(t, stdout, "git version")
}
func TestGitArgument(t *testing.T) {
assert.True(t, isValidArgumentOption("-x"))
assert.True(t, isValidArgumentOption("--xx"))
assert.False(t, isValidArgumentOption(""))
assert.False(t, isValidArgumentOption("x"))
assert.True(t, isSafeArgumentValue(""))
assert.True(t, isSafeArgumentValue("x"))
assert.False(t, isSafeArgumentValue("-x"))
}
| modules/git/command_test.go | 0 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.0001739637809805572,
0.00016994074394460768,
0.00016589998267591,
0.00016962160589173436,
0.0000030022254122741288
] |
{
"id": 1,
"code_window": [
"\n",
" $repoFindFileNoResult.toggle(filterResult.length === 0);\n",
" for (const r of filterResult) {\n",
" const $row = $(tmplRow);\n",
" const $a = $row.find('a');\n",
" $a.attr('href', `${treeLink}/${r.matchResult.join('')}`);\n",
" const $octiconFile = $(svg('octicon-file')).addClass('mr-3');\n",
" $a.append($octiconFile);\n",
" // if the target file path is \"abc/xyz\", to search \"bx\", then the matchResult is ['a', 'b', 'c/', 'x', 'yz']\n",
" // the matchResult[odd] is matched and highlighted to red.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" $a.attr('href', `${treeLink}/${escapePath(r.matchResult.join(''))}`);\n"
],
"file_path": "web_src/js/features/repo-findfile.js",
"type": "replace",
"edit_start_line_idx": 85
} | import {describe, expect, test} from 'vitest';
import {strSubMatch, calcMatchedWeight, filterRepoFilesWeighted} from './repo-findfile.js';
describe('Repo Find Files', () => {
test('strSubMatch', () => {
expect(strSubMatch('abc', '')).toEqual(['abc']);
expect(strSubMatch('abc', 'a')).toEqual(['', 'a', 'bc']);
expect(strSubMatch('abc', 'b')).toEqual(['a', 'b', 'c']);
expect(strSubMatch('abc', 'c')).toEqual(['ab', 'c']);
expect(strSubMatch('abc', 'ac')).toEqual(['', 'a', 'b', 'c']);
expect(strSubMatch('abc', 'z')).toEqual(['abc']);
expect(strSubMatch('abc', 'az')).toEqual(['abc']);
expect(strSubMatch('ABc', 'ac')).toEqual(['', 'A', 'B', 'c']);
expect(strSubMatch('abC', 'ac')).toEqual(['', 'a', 'b', 'C']);
expect(strSubMatch('aabbcc', 'abc')).toEqual(['', 'a', 'a', 'b', 'b', 'c', 'c']);
expect(strSubMatch('the/directory', 'hedir')).toEqual(['t', 'he', '/', 'dir', 'ectory']);
});
test('calcMatchedWeight', () => {
expect(calcMatchedWeight(['a', 'b', 'c', 'd']) < calcMatchedWeight(['a', 'bc', 'c'])).toBeTruthy();
});
test('filterRepoFilesWeighted', () => {
// the first matched result should always be the "word.txt"
let res = filterRepoFilesWeighted(['word.txt', 'we-got-result.dat'], 'word');
expect(res).toHaveLength(2);
expect(res[0].matchResult).toEqual(['', 'word', '.txt']);
res = filterRepoFilesWeighted(['we-got-result.dat', 'word.txt'], 'word');
expect(res).toHaveLength(2);
expect(res[0].matchResult).toEqual(['', 'word', '.txt']);
});
});
| web_src/js/features/repo-findfile.test.js | 1 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.00017427625425625592,
0.00016707900795154274,
0.00016305125609505922,
0.00016549427527934313,
0.000004284208444005344
] |
{
"id": 1,
"code_window": [
"\n",
" $repoFindFileNoResult.toggle(filterResult.length === 0);\n",
" for (const r of filterResult) {\n",
" const $row = $(tmplRow);\n",
" const $a = $row.find('a');\n",
" $a.attr('href', `${treeLink}/${r.matchResult.join('')}`);\n",
" const $octiconFile = $(svg('octicon-file')).addClass('mr-3');\n",
" $a.append($octiconFile);\n",
" // if the target file path is \"abc/xyz\", to search \"bx\", then the matchResult is ['a', 'b', 'c/', 'x', 'yz']\n",
" // the matchResult[odd] is matched and highlighted to red.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" $a.attr('href', `${treeLink}/${escapePath(r.matchResult.join(''))}`);\n"
],
"file_path": "web_src/js/features/repo-findfile.js",
"type": "replace",
"edit_start_line_idx": 85
} | // Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package issue
import (
"context"
"fmt"
"code.gitea.io/gitea/models/db"
issues_model "code.gitea.io/gitea/models/issues"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/notification"
)
func changeMilestoneAssign(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, oldMilestoneID int64) error {
// Only check if milestone exists if we don't remove it.
if issue.MilestoneID > 0 {
has, err := issues_model.HasMilestoneByRepoID(ctx, issue.RepoID, issue.MilestoneID)
if err != nil {
return fmt.Errorf("HasMilestoneByRepoID: %w", err)
}
if !has {
return fmt.Errorf("HasMilestoneByRepoID: issue doesn't exist")
}
}
if err := issues_model.UpdateIssueCols(ctx, issue, "milestone_id"); err != nil {
return err
}
if oldMilestoneID > 0 {
if err := issues_model.UpdateMilestoneCounters(ctx, oldMilestoneID); err != nil {
return err
}
}
if issue.MilestoneID > 0 {
if err := issues_model.UpdateMilestoneCounters(ctx, issue.MilestoneID); err != nil {
return err
}
}
if oldMilestoneID > 0 || issue.MilestoneID > 0 {
if err := issue.LoadRepo(ctx); err != nil {
return err
}
opts := &issues_model.CreateCommentOptions{
Type: issues_model.CommentTypeMilestone,
Doer: doer,
Repo: issue.Repo,
Issue: issue,
OldMilestoneID: oldMilestoneID,
MilestoneID: issue.MilestoneID,
}
if _, err := issues_model.CreateComment(ctx, opts); err != nil {
return err
}
}
return nil
}
// ChangeMilestoneAssign changes assignment of milestone for issue.
func ChangeMilestoneAssign(issue *issues_model.Issue, doer *user_model.User, oldMilestoneID int64) (err error) {
ctx, committer, err := db.TxContext(db.DefaultContext)
if err != nil {
return err
}
defer committer.Close()
if err = changeMilestoneAssign(ctx, doer, issue, oldMilestoneID); err != nil {
return err
}
if err = committer.Commit(); err != nil {
return fmt.Errorf("Commit: %w", err)
}
notification.NotifyIssueChangeMilestone(db.DefaultContext, doer, issue, oldMilestoneID)
return nil
}
| services/issue/milestone.go | 0 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.00017592613585293293,
0.00017394270980730653,
0.00017043310799635947,
0.00017432378081139177,
0.0000017993128267335123
] |
{
"id": 1,
"code_window": [
"\n",
" $repoFindFileNoResult.toggle(filterResult.length === 0);\n",
" for (const r of filterResult) {\n",
" const $row = $(tmplRow);\n",
" const $a = $row.find('a');\n",
" $a.attr('href', `${treeLink}/${r.matchResult.join('')}`);\n",
" const $octiconFile = $(svg('octicon-file')).addClass('mr-3');\n",
" $a.append($octiconFile);\n",
" // if the target file path is \"abc/xyz\", to search \"bx\", then the matchResult is ['a', 'b', 'c/', 'x', 'yz']\n",
" // the matchResult[odd] is matched and highlighted to red.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" $a.attr('href', `${treeLink}/${escapePath(r.matchResult.join(''))}`);\n"
],
"file_path": "web_src/js/features/repo-findfile.js",
"type": "replace",
"edit_start_line_idx": 85
} | // Copyright 2020 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package v1_13 //nolint
import (
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/xorm"
)
func AddProjectsInfo(x *xorm.Engine) error {
// Create new tables
type (
ProjectType uint8
ProjectBoardType uint8
)
type Project struct {
ID int64 `xorm:"pk autoincr"`
Title string `xorm:"INDEX NOT NULL"`
Description string `xorm:"TEXT"`
RepoID int64 `xorm:"INDEX"`
CreatorID int64 `xorm:"NOT NULL"`
IsClosed bool `xorm:"INDEX"`
BoardType ProjectBoardType
Type ProjectType
ClosedDateUnix timeutil.TimeStamp
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
}
if err := x.Sync2(new(Project)); err != nil {
return err
}
type Comment struct {
OldProjectID int64
ProjectID int64
}
if err := x.Sync2(new(Comment)); err != nil {
return err
}
type Repository struct {
ID int64
NumProjects int `xorm:"NOT NULL DEFAULT 0"`
NumClosedProjects int `xorm:"NOT NULL DEFAULT 0"`
}
if err := x.Sync2(new(Repository)); err != nil {
return err
}
// ProjectIssue saves relation from issue to a project
type ProjectIssue struct {
ID int64 `xorm:"pk autoincr"`
IssueID int64 `xorm:"INDEX"`
ProjectID int64 `xorm:"INDEX"`
ProjectBoardID int64 `xorm:"INDEX"`
}
if err := x.Sync2(new(ProjectIssue)); err != nil {
return err
}
type ProjectBoard struct {
ID int64 `xorm:"pk autoincr"`
Title string
Default bool `xorm:"NOT NULL DEFAULT false"`
ProjectID int64 `xorm:"INDEX NOT NULL"`
CreatorID int64 `xorm:"NOT NULL"`
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
}
return x.Sync2(new(ProjectBoard))
}
| models/migrations/v1_13/v146.go | 0 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.0001761724124662578,
0.0001719764550216496,
0.00016901592607609928,
0.00017156907415483147,
0.0000019970184439443983
] |
{
"id": 1,
"code_window": [
"\n",
" $repoFindFileNoResult.toggle(filterResult.length === 0);\n",
" for (const r of filterResult) {\n",
" const $row = $(tmplRow);\n",
" const $a = $row.find('a');\n",
" $a.attr('href', `${treeLink}/${r.matchResult.join('')}`);\n",
" const $octiconFile = $(svg('octicon-file')).addClass('mr-3');\n",
" $a.append($octiconFile);\n",
" // if the target file path is \"abc/xyz\", to search \"bx\", then the matchResult is ['a', 'b', 'c/', 'x', 'yz']\n",
" // the matchResult[odd] is matched and highlighted to red.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" $a.attr('href', `${treeLink}/${escapePath(r.matchResult.join(''))}`);\n"
],
"file_path": "web_src/js/features/repo-findfile.js",
"type": "replace",
"edit_start_line_idx": 85
} | ######################
## EPiServer Files
######################
*License.config
| options/gitignore/EPiServer | 0 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.0001682050060480833,
0.0001682050060480833,
0.0001682050060480833,
0.0001682050060480833,
0
] |
{
"id": 2,
"code_window": [
"import {describe, expect, test} from 'vitest';\n",
"import {strSubMatch, calcMatchedWeight, filterRepoFilesWeighted} from './repo-findfile.js';\n",
"\n",
"describe('Repo Find Files', () => {\n",
" test('strSubMatch', () => {\n",
" expect(strSubMatch('abc', '')).toEqual(['abc']);\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"import {strSubMatch, calcMatchedWeight, filterRepoFilesWeighted, escapePath} from './repo-findfile.js';\n"
],
"file_path": "web_src/js/features/repo-findfile.test.js",
"type": "replace",
"edit_start_line_idx": 1
} | import $ from 'jquery';
import {svg} from '../svg.js';
const {csrf} = window.config;
const threshold = 50;
let files = [];
let $repoFindFileInput, $repoFindFileTableBody, $repoFindFileNoResult;
// return the case-insensitive sub-match result as an array: [unmatched, matched, unmatched, matched, ...]
// res[even] is unmatched, res[odd] is matched, see unit tests for examples
// argument subLower must be a lower-cased string.
export function strSubMatch(full, subLower) {
const res = [''];
let i = 0, j = 0;
const fullLower = full.toLowerCase();
while (i < subLower.length && j < fullLower.length) {
if (subLower[i] === fullLower[j]) {
if (res.length % 2 !== 0) res.push('');
res[res.length - 1] += full[j];
j++;
i++;
} else {
if (res.length % 2 === 0) res.push('');
res[res.length - 1] += full[j];
j++;
}
}
if (i !== subLower.length) {
// if the sub string doesn't match the full, only return the full as unmatched.
return [full];
}
if (j < full.length) {
// append remaining chars from full to result as unmatched
if (res.length % 2 === 0) res.push('');
res[res.length - 1] += full.substring(j);
}
return res;
}
export function calcMatchedWeight(matchResult) {
let weight = 0;
for (let i = 0; i < matchResult.length; i++) {
if (i % 2 === 1) { // matches are on odd indices, see strSubMatch
// use a function f(x+x) > f(x) + f(x) to make the longer matched string has higher weight.
weight += matchResult[i].length * matchResult[i].length;
}
}
return weight;
}
export function filterRepoFilesWeighted(files, filter) {
let filterResult = [];
if (filter) {
const filterLower = filter.toLowerCase();
// TODO: for large repo, this loop could be slow, maybe there could be one more limit:
// ... && filterResult.length < threshold * 20, wait for more feedbacks
for (let i = 0; i < files.length; i++) {
const res = strSubMatch(files[i], filterLower);
if (res.length > 1) { // length==1 means unmatched, >1 means having matched sub strings
filterResult.push({matchResult: res, matchWeight: calcMatchedWeight(res)});
}
}
filterResult.sort((a, b) => b.matchWeight - a.matchWeight);
filterResult = filterResult.slice(0, threshold);
} else {
for (let i = 0; i < files.length && i < threshold; i++) {
filterResult.push({matchResult: [files[i]], matchWeight: 0});
}
}
return filterResult;
}
function filterRepoFiles(filter) {
const treeLink = $repoFindFileInput.attr('data-url-tree-link');
$repoFindFileTableBody.empty();
const filterResult = filterRepoFilesWeighted(files, filter);
const tmplRow = `<tr><td><a></a></td></tr>`;
$repoFindFileNoResult.toggle(filterResult.length === 0);
for (const r of filterResult) {
const $row = $(tmplRow);
const $a = $row.find('a');
$a.attr('href', `${treeLink}/${r.matchResult.join('')}`);
const $octiconFile = $(svg('octicon-file')).addClass('mr-3');
$a.append($octiconFile);
// if the target file path is "abc/xyz", to search "bx", then the matchResult is ['a', 'b', 'c/', 'x', 'yz']
// the matchResult[odd] is matched and highlighted to red.
for (let j = 0; j < r.matchResult.length; j++) {
if (!r.matchResult[j]) continue;
const $span = $('<span>').text(r.matchResult[j]);
if (j % 2 === 1) $span.addClass('ui text red');
$a.append($span);
}
$repoFindFileTableBody.append($row);
}
}
async function loadRepoFiles() {
files = await $.ajax({
url: $repoFindFileInput.attr('data-url-data-link'),
headers: {'X-Csrf-Token': csrf}
});
filterRepoFiles($repoFindFileInput.val());
}
export function initFindFileInRepo() {
$repoFindFileInput = $('#repo-file-find-input');
if (!$repoFindFileInput.length) return;
$repoFindFileTableBody = $('#repo-find-file-table tbody');
$repoFindFileNoResult = $('#repo-find-file-no-result');
$repoFindFileInput.on('input', () => filterRepoFiles($repoFindFileInput.val()));
loadRepoFiles();
}
| web_src/js/features/repo-findfile.js | 1 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.008833382278680801,
0.0025565586984157562,
0.0001780251768650487,
0.0019844321068376303,
0.002566552022472024
] |
{
"id": 2,
"code_window": [
"import {describe, expect, test} from 'vitest';\n",
"import {strSubMatch, calcMatchedWeight, filterRepoFilesWeighted} from './repo-findfile.js';\n",
"\n",
"describe('Repo Find Files', () => {\n",
" test('strSubMatch', () => {\n",
" expect(strSubMatch('abc', '')).toEqual(['abc']);\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"import {strSubMatch, calcMatchedWeight, filterRepoFilesWeighted, escapePath} from './repo-findfile.js';\n"
],
"file_path": "web_src/js/features/repo-findfile.test.js",
"type": "replace",
"edit_start_line_idx": 1
} | 65f1bf27bc3bf70f64657658635e66094edbcb4d
| tests/gitea-repositories-meta/user2/repo1.git/refs/heads/feature/1 | 0 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.00016596248315181583,
0.00016596248315181583,
0.00016596248315181583,
0.00016596248315181583,
0
] |
{
"id": 2,
"code_window": [
"import {describe, expect, test} from 'vitest';\n",
"import {strSubMatch, calcMatchedWeight, filterRepoFilesWeighted} from './repo-findfile.js';\n",
"\n",
"describe('Repo Find Files', () => {\n",
" test('strSubMatch', () => {\n",
" expect(strSubMatch('abc', '')).toEqual(['abc']);\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"import {strSubMatch, calcMatchedWeight, filterRepoFilesWeighted, escapePath} from './repo-findfile.js';\n"
],
"file_path": "web_src/js/features/repo-findfile.test.js",
"type": "replace",
"edit_start_line_idx": 1
} | // Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package math
import (
"bytes"
"github.com/yuin/goldmark/ast"
"github.com/yuin/goldmark/parser"
"github.com/yuin/goldmark/text"
"github.com/yuin/goldmark/util"
)
type blockParser struct {
parseDollars bool
}
// NewBlockParser creates a new math BlockParser
func NewBlockParser(parseDollarBlocks bool) parser.BlockParser {
return &blockParser{
parseDollars: parseDollarBlocks,
}
}
// Open parses the current line and returns a result of parsing.
func (b *blockParser) Open(parent ast.Node, reader text.Reader, pc parser.Context) (ast.Node, parser.State) {
line, segment := reader.PeekLine()
pos := pc.BlockOffset()
if pos == -1 || len(line[pos:]) < 2 {
return nil, parser.NoChildren
}
dollars := false
if b.parseDollars && line[pos] == '$' && line[pos+1] == '$' {
dollars = true
} else if line[pos] != '\\' || line[pos+1] != '[' {
return nil, parser.NoChildren
}
node := NewBlock(dollars, pos)
// Now we need to check if the ending block is on the segment...
endBytes := []byte{'\\', ']'}
if dollars {
endBytes = []byte{'$', '$'}
}
idx := bytes.Index(line[pos+2:], endBytes)
if idx >= 0 {
segment.Stop = segment.Start + idx + 2
reader.Advance(segment.Len() - 1)
segment.Start += 2
node.Lines().Append(segment)
node.Closed = true
return node, parser.Close | parser.NoChildren
}
reader.Advance(segment.Len() - 1)
segment.Start += 2
node.Lines().Append(segment)
return node, parser.NoChildren
}
// Continue parses the current line and returns a result of parsing.
func (b *blockParser) Continue(node ast.Node, reader text.Reader, pc parser.Context) parser.State {
block := node.(*Block)
if block.Closed {
return parser.Close
}
line, segment := reader.PeekLine()
w, pos := util.IndentWidth(line, 0)
if w < 4 {
if block.Dollars {
i := pos
for ; i < len(line) && line[i] == '$'; i++ {
}
length := i - pos
if length >= 2 && util.IsBlank(line[i:]) {
reader.Advance(segment.Stop - segment.Start - segment.Padding)
block.Closed = true
return parser.Close
}
} else if len(line[pos:]) > 1 && line[pos] == '\\' && line[pos+1] == ']' && util.IsBlank(line[pos+2:]) {
reader.Advance(segment.Stop - segment.Start - segment.Padding)
block.Closed = true
return parser.Close
}
}
pos, padding := util.IndentPosition(line, 0, block.Indent)
seg := text.NewSegmentPadding(segment.Start+pos, segment.Stop, padding)
node.Lines().Append(seg)
reader.AdvanceAndSetPadding(segment.Stop-segment.Start-pos-1, padding)
return parser.Continue | parser.NoChildren
}
// Close will be called when the parser returns Close.
func (b *blockParser) Close(node ast.Node, reader text.Reader, pc parser.Context) {
// noop
}
// CanInterruptParagraph returns true if the parser can interrupt paragraphs,
// otherwise false.
func (b *blockParser) CanInterruptParagraph() bool {
return true
}
// CanAcceptIndentedLine returns true if the parser can open new node when
// the given line is being indented more than 3 spaces.
func (b *blockParser) CanAcceptIndentedLine() bool {
return false
}
// Trigger returns a list of characters that triggers Parse method of
// this parser.
// If Trigger returns a nil, Open will be called with any lines.
//
// We leave this as nil as our parse method is quick enough
func (b *blockParser) Trigger() []byte {
return nil
}
| modules/markup/markdown/math/block_parser.go | 0 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.00017933343769982457,
0.0001739656290737912,
0.0001658425753703341,
0.00017508772725705057,
0.000003930553702957695
] |
{
"id": 2,
"code_window": [
"import {describe, expect, test} from 'vitest';\n",
"import {strSubMatch, calcMatchedWeight, filterRepoFilesWeighted} from './repo-findfile.js';\n",
"\n",
"describe('Repo Find Files', () => {\n",
" test('strSubMatch', () => {\n",
" expect(strSubMatch('abc', '')).toEqual(['abc']);\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"import {strSubMatch, calcMatchedWeight, filterRepoFilesWeighted, escapePath} from './repo-findfile.js';\n"
],
"file_path": "web_src/js/features/repo-findfile.test.js",
"type": "replace",
"edit_start_line_idx": 1
} | // Copyright 2021 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package bleve
import (
"github.com/blevesearch/bleve/v2"
)
// FlushingBatch is a batch of operations that automatically flushes to the
// underlying index once it reaches a certain size.
type FlushingBatch struct {
maxBatchSize int
batch *bleve.Batch
index bleve.Index
}
// NewFlushingBatch creates a new flushing batch for the specified index. Once
// the number of operations in the batch reaches the specified limit, the batch
// automatically flushes its operations to the index.
func NewFlushingBatch(index bleve.Index, maxBatchSize int) *FlushingBatch {
return &FlushingBatch{
maxBatchSize: maxBatchSize,
batch: index.NewBatch(),
index: index,
}
}
// Index add a new index to batch
func (b *FlushingBatch) Index(id string, data interface{}) error {
if err := b.batch.Index(id, data); err != nil {
return err
}
return b.flushIfFull()
}
// Delete add a delete index to batch
func (b *FlushingBatch) Delete(id string) error {
b.batch.Delete(id)
return b.flushIfFull()
}
func (b *FlushingBatch) flushIfFull() error {
if b.batch.Size() < b.maxBatchSize {
return nil
}
return b.Flush()
}
// Flush submit the batch and create a new one
func (b *FlushingBatch) Flush() error {
err := b.index.Batch(b.batch)
if err != nil {
return err
}
b.batch = b.index.NewBatch()
return nil
}
| modules/indexer/bleve/batch.go | 0 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.00017783547809813172,
0.00017277726146858186,
0.00016813476395327598,
0.0001726210757624358,
0.000003380857378942892
] |
{
"id": 3,
"code_window": [
"\n",
" res = filterRepoFilesWeighted(['we-got-result.dat', 'word.txt'], 'word');\n",
" expect(res).toHaveLength(2);\n",
" expect(res[0].matchResult).toEqual(['', 'word', '.txt']);\n",
" });\n",
"});"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
" test('escapePath', () => {\n",
" expect(escapePath('a/b/c')).toEqual('a/b/c');\n",
" expect(escapePath('a/b/ c')).toEqual('a/b/%20c');\n",
" });\n"
],
"file_path": "web_src/js/features/repo-findfile.test.js",
"type": "add",
"edit_start_line_idx": 34
} | import {describe, expect, test} from 'vitest';
import {strSubMatch, calcMatchedWeight, filterRepoFilesWeighted} from './repo-findfile.js';
describe('Repo Find Files', () => {
test('strSubMatch', () => {
expect(strSubMatch('abc', '')).toEqual(['abc']);
expect(strSubMatch('abc', 'a')).toEqual(['', 'a', 'bc']);
expect(strSubMatch('abc', 'b')).toEqual(['a', 'b', 'c']);
expect(strSubMatch('abc', 'c')).toEqual(['ab', 'c']);
expect(strSubMatch('abc', 'ac')).toEqual(['', 'a', 'b', 'c']);
expect(strSubMatch('abc', 'z')).toEqual(['abc']);
expect(strSubMatch('abc', 'az')).toEqual(['abc']);
expect(strSubMatch('ABc', 'ac')).toEqual(['', 'A', 'B', 'c']);
expect(strSubMatch('abC', 'ac')).toEqual(['', 'a', 'b', 'C']);
expect(strSubMatch('aabbcc', 'abc')).toEqual(['', 'a', 'a', 'b', 'b', 'c', 'c']);
expect(strSubMatch('the/directory', 'hedir')).toEqual(['t', 'he', '/', 'dir', 'ectory']);
});
test('calcMatchedWeight', () => {
expect(calcMatchedWeight(['a', 'b', 'c', 'd']) < calcMatchedWeight(['a', 'bc', 'c'])).toBeTruthy();
});
test('filterRepoFilesWeighted', () => {
// the first matched result should always be the "word.txt"
let res = filterRepoFilesWeighted(['word.txt', 'we-got-result.dat'], 'word');
expect(res).toHaveLength(2);
expect(res[0].matchResult).toEqual(['', 'word', '.txt']);
res = filterRepoFilesWeighted(['we-got-result.dat', 'word.txt'], 'word');
expect(res).toHaveLength(2);
expect(res[0].matchResult).toEqual(['', 'word', '.txt']);
});
});
| web_src/js/features/repo-findfile.test.js | 1 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.9982104301452637,
0.4992715120315552,
0.000177852445631288,
0.49934887886047363,
0.49875280261039734
] |
{
"id": 3,
"code_window": [
"\n",
" res = filterRepoFilesWeighted(['we-got-result.dat', 'word.txt'], 'word');\n",
" expect(res).toHaveLength(2);\n",
" expect(res[0].matchResult).toEqual(['', 'word', '.txt']);\n",
" });\n",
"});"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
" test('escapePath', () => {\n",
" expect(escapePath('a/b/c')).toEqual('a/b/c');\n",
" expect(escapePath('a/b/ c')).toEqual('a/b/%20c');\n",
" });\n"
],
"file_path": "web_src/js/features/repo-findfile.test.js",
"type": "add",
"edit_start_line_idx": 34
} | The Universal FOSS Exception, Version 1.0
In addition to the rights set forth in the other license(s) included in the distribution for this software, data, and/or documentation (collectively the "Software," and such licenses collectively with this additional permission the "Software License"), the copyright holders wish to facilitate interoperability with other software, data, and/or documentation distributed with complete corresponding source under a license that is OSI-approved and/or categorized by the FSF as free (collectively "Other FOSS"). We therefore hereby grant the following additional permission with respect to the use and distribution of the Software with Other FOSS, and the constants, function signatures, data structures and other invocation methods used to run or interact with each of them (as to each, such software's "Interfaces"):
(i) The Software's Interfaces may, to the extent permitted by the license of the Other FOSS, be copied into, used and distributed in the Other FOSS in order to enable interoperability, without requiring a change to the license of the Other FOSS other than as to any Interfaces of the Software embedded therein. The Software's Interfaces remain at all times under the Software License, including without limitation as used in the Other FOSS (which upon any such use also then contains a portion of the Software under the Software License).
(ii) The Other FOSS's Interfaces may, to the extent permitted by the license of the Other FOSS, be copied into, used and distributed in the Software in order to enable interoperability, without requiring that such Interfaces be licensed under the terms of the Software License or otherwise altering their original terms, if this does not require any portion of the Software other than such Interfaces to be licensed under the terms other than the Software License.
(iii) If only Interfaces and no other code is copied between the Software and the Other FOSS in either direction, the use and/or distribution of the Software with the Other FOSS shall not be deemed to require that the Other FOSS be licensed under the license of the Software, other than as to any Interfaces of the Software copied into the Other FOSS. This includes, by way of example and without limitation, statically or dynamically linking the Software together with Other FOSS after enabling interoperability using the Interfaces of one or both, and distributing the resulting combination under different licenses for the respective portions thereof.
For avoidance of doubt, a license which is OSI-approved or categorized by the FSF as free, includes, for the purpose of this permission, such licenses with additional permissions, and any license that has previously been so-approved or categorized as free, even if now deprecated or otherwise no longer recognized as approved or free. Nothing in this additional permission grants any right to distribute any portion of the Software on terms other than those of the Software License or grants any additional permission of any kind for use or distribution of the Software in conjunction with software other than Other FOSS.
| options/license/Universal-FOSS-exception-1.0 | 0 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.00017375341849401593,
0.0001732794044073671,
0.00017280537576880306,
0.0001732794044073671,
4.74021362606436e-7
] |
{
"id": 3,
"code_window": [
"\n",
" res = filterRepoFilesWeighted(['we-got-result.dat', 'word.txt'], 'word');\n",
" expect(res).toHaveLength(2);\n",
" expect(res[0].matchResult).toEqual(['', 'word', '.txt']);\n",
" });\n",
"});"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
" test('escapePath', () => {\n",
" expect(escapePath('a/b/c')).toEqual('a/b/c');\n",
" expect(escapePath('a/b/ c')).toEqual('a/b/%20c');\n",
" });\n"
],
"file_path": "web_src/js/features/repo-findfile.test.js",
"type": "add",
"edit_start_line_idx": 34
} | ---
date: "2022-08-23T00:00:00+00:00"
title: "Vagrant Packages Repository"
slug: "packages/vagrant"
draft: false
toc: false
menu:
sidebar:
parent: "packages"
name: "Vagrant"
weight: 120
identifier: "vagrant"
---
# Vagrant Packages Repository
Publish [Vagrant](https://www.vagrantup.com/) packages for your user or organization.
**Table of Contents**
{{< toc >}}
## Requirements
To work with the Vagrant package registry, you need [Vagrant](https://www.vagrantup.com/downloads) and a tool to make HTTP requests like `curl`.
## Publish a package
Publish a Vagrant box by performing a HTTP PUT request to the registry:
```
PUT https://gitea.example.com/api/packages/{owner}/vagrant/{package_name}/{package_version}/{provider}.box
```
| Parameter | Description |
| ----------------- | ----------- |
| `owner` | The owner of the package. |
| `package_name` | The package name. |
| `package_version` | The package version, semver compatible. |
| `provider` | One of the [supported provider names](https://www.vagrantup.com/docs/providers). |
Example for uploading a Hyper-V box:
```shell
curl --user your_username:your_password_or_token \
--upload-file path/to/your/vagrant.box \
https://gitea.example.com/api/packages/testuser/vagrant/test_system/1.0.0/hyperv.box
```
You cannot publish a box if a box of the same name, version and provider already exists. You must delete the existing package first.
## Install a package
To install a box from the package registry, execute the following command:
```shell
vagrant box add "https://gitea.example.com/api/packages/{owner}/vagrant/{package_name}"
```
| Parameter | Description |
| -------------- | ----------- |
| `owner` | The owner of the package. |
| `package_name` | The package name. |
For example:
```shell
vagrant box add "https://gitea.example.com/api/packages/testuser/vagrant/test_system"
```
This will install the latest version of the package. To add a specific version, use the `--box-version` parameter.
If the registry is private you can pass your [personal access token]({{< relref "doc/developers/api-usage.en-us.md#authentication" >}}) in the `VAGRANT_CLOUD_TOKEN` environment variable.
## Supported commands
```
vagrant box add
```
| docs/content/doc/packages/vagrant.en-us.md | 0 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.00017963480786420405,
0.00017317631863988936,
0.00016333507664967328,
0.00017336485325358808,
0.000004883234851149609
] |
{
"id": 3,
"code_window": [
"\n",
" res = filterRepoFilesWeighted(['we-got-result.dat', 'word.txt'], 'word');\n",
" expect(res).toHaveLength(2);\n",
" expect(res[0].matchResult).toEqual(['', 'word', '.txt']);\n",
" });\n",
"});"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
" test('escapePath', () => {\n",
" expect(escapePath('a/b/c')).toEqual('a/b/c');\n",
" expect(escapePath('a/b/ c')).toEqual('a/b/%20c');\n",
" });\n"
],
"file_path": "web_src/js/features/repo-findfile.test.js",
"type": "add",
"edit_start_line_idx": 34
} | EU DataGrid Software License
Copyright (c) 2001 EU DataGrid. All rights reserved.
This software includes voluntary contributions made to the EU DataGrid. For more information on the EU DataGrid, please see http://www.eu-datagrid.org/.
Installation, use, reproduction, display, modification and redistribution of this software, with or without modification, in source and binary forms, are permitted. Any exercise of rights under this license by you or your sub-licensees is subject to the following conditions:
1. Redistributions of this software, with or without modification, must reproduce the above copyright notice and the above license statement as well as this list of conditions, in the software, the user documentation and any other materials provided with the software.
2. The user documentation, if any, included with a redistribution, must include the following notice:
"This product includes software developed by the EU DataGrid (http://www.eu-datagrid.org/)."
Alternatively, if that is where third-party acknowledgments normally appear, this acknowledgment must be reproduced in the software itself.
3. The names "EDG", "EDG Toolkit", “EU DataGrid” and "EU DataGrid Project" may not be used to endorse or promote software, or products derived therefrom, except with prior written permission by [email protected].
4. You are under no obligation to provide anyone with any bug fixes, patches, upgrades or other modifications, enhancements or derivatives of the features,functionality or performance of this software that you may develop. However, if you publish or distribute your modifications, enhancements or derivative works without contemporaneously requiring users to enter into a separate written license agreement, then you are deemed to have granted participants in the EU DataGrid a worldwide, non-exclusive, royalty-free, perpetual license to install, use, reproduce, display, modify, redistribute and sub-license your modifications, enhancements or derivative works, whether in binary or source code form, under the license conditions stated in this list of conditions.
5. DISCLAIMER
THIS SOFTWARE IS PROVIDED BY THE EU DATAGRID AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, OF SATISFACTORY QUALITY, AND FITNESS FOR A PARTICULAR PURPOSE OR USE ARE DISCLAIMED. THE EU DATAGRID AND CONTRIBUTORS MAKE NO REPRESENTATION THAT THE SOFTWARE, MODIFICATIONS, ENHANCEMENTS OR DERIVATIVE WORKS THEREOF, WILL NOT INFRINGE ANY PATENT, COPYRIGHT, TRADE SECRET OR OTHER PROPRIETARY RIGHT.
6. LIMITATION OF LIABILITY
THE EU DATAGRID AND CONTRIBUTORS SHALL HAVE NO LIABILITY TO LICENSEE OR OTHER PERSONS FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, EXEMPLARY, OR PUNITIVE DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, LOSS OF USE, DATA OR PROFITS, OR BUSINESS INTERRUPTION, HOWEVER CAUSED AND ON ANY THEORY OF CONTRACT, WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR OTHERWISE, ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
| options/license/EUDatagrid | 0 | https://github.com/go-gitea/gitea/commit/ea13b23349ef98249deeb9469f6b1444de42abf5 | [
0.00017591942742001265,
0.0001714292011456564,
0.00016314347158186138,
0.00017522468988317996,
0.000005865753337275237
] |
{
"id": 0,
"code_window": [
"\n",
"package main\n",
"\n",
"import (\n",
"\t\"bytes\"\n",
"\t\"errors\"\n",
"\t\"fmt\"\n",
"\t\"os\"\n",
"\t\"os/signal\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 20
} | // Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package common contains various helper functions.
package common
import (
"encoding/hex"
)
// FromHex returns the bytes represented by the hexadecimal string s.
// s may be prefixed with "0x".
func FromHex(s string) []byte {
if has0xPrefix(s) {
s = s[2:]
}
if len(s)%2 == 1 {
s = "0" + s
}
return Hex2Bytes(s)
}
// CopyBytes returns an exact copy of the provided bytes.
func CopyBytes(b []byte) (copiedBytes []byte) {
if b == nil {
return nil
}
copiedBytes = make([]byte, len(b))
copy(copiedBytes, b)
return
}
// has0xPrefix validates str begins with '0x' or '0X'.
func has0xPrefix(str string) bool {
return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X')
}
// isHexCharacter returns bool of c being a valid hexadecimal.
func isHexCharacter(c byte) bool {
return ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')
}
// isHex validates whether each byte is valid hexadecimal string.
func isHex(str string) bool {
if len(str)%2 != 0 {
return false
}
for _, c := range []byte(str) {
if !isHexCharacter(c) {
return false
}
}
return true
}
// Bytes2Hex returns the hexadecimal encoding of d.
func Bytes2Hex(d []byte) string {
return hex.EncodeToString(d)
}
// Hex2Bytes returns the bytes represented by the hexadecimal string str.
func Hex2Bytes(str string) []byte {
h, _ := hex.DecodeString(str)
return h
}
// Hex2BytesFixed returns bytes of a specified fixed length flen.
func Hex2BytesFixed(str string, flen int) []byte {
h, _ := hex.DecodeString(str)
if len(h) == flen {
return h
}
if len(h) > flen {
return h[len(h)-flen:]
}
hh := make([]byte, flen)
copy(hh[flen-len(h):flen], h)
return hh
}
// RightPadBytes zero-pads slice to the right up to length l.
func RightPadBytes(slice []byte, l int) []byte {
if l <= len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded, slice)
return padded
}
// LeftPadBytes zero-pads slice to the left up to length l.
func LeftPadBytes(slice []byte, l int) []byte {
if l <= len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded[l-len(slice):], slice)
return padded
}
// TrimLeftZeroes returns a subslice of s without leading zeroes
func TrimLeftZeroes(s []byte) []byte {
idx := 0
for ; idx < len(s); idx++ {
if s[idx] != 0 {
break
}
}
return s[idx:]
}
// TrimRightZeroes returns a subslice of s without trailing zeroes
func TrimRightZeroes(s []byte) []byte {
idx := len(s)
for ; idx > 0; idx-- {
if s[idx-1] != 0 {
break
}
}
return s[:idx]
}
| common/bytes.go | 1 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.0009397414978593588,
0.0002595501719042659,
0.0001681730936979875,
0.00017068488523364067,
0.00020631658844649792
] |
{
"id": 0,
"code_window": [
"\n",
"package main\n",
"\n",
"import (\n",
"\t\"bytes\"\n",
"\t\"errors\"\n",
"\t\"fmt\"\n",
"\t\"os\"\n",
"\t\"os/signal\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 20
} | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package geth
import (
"fmt"
"math/big"
"reflect"
"testing"
"github.com/ethereum/go-ethereum/common"
)
func TestInterfaceGetSet(t *testing.T) {
var tests = []struct {
method string
input interface{}
expect interface{}
}{
{"Bool", true, true},
{"Bool", false, false},
{"Bools", &Bools{[]bool{false, true}}, &Bools{[]bool{false, true}}},
{"String", "go-ethereum", "go-ethereum"},
{"Strings", &Strings{strs: []string{"hello", "world"}}, &Strings{strs: []string{"hello", "world"}}},
{"Binary", []byte{0x01, 0x02}, []byte{0x01, 0x02}},
{"Binaries", &Binaries{[][]byte{{0x01, 0x02}, {0x03, 0x04}}}, &Binaries{[][]byte{{0x01, 0x02}, {0x03, 0x04}}}},
{"Address", &Address{common.HexToAddress("deadbeef")}, &Address{common.HexToAddress("deadbeef")}},
{"Addresses", &Addresses{[]common.Address{common.HexToAddress("deadbeef"), common.HexToAddress("cafebabe")}}, &Addresses{[]common.Address{common.HexToAddress("deadbeef"), common.HexToAddress("cafebabe")}}},
{"Hash", &Hash{common.HexToHash("deadbeef")}, &Hash{common.HexToHash("deadbeef")}},
{"Hashes", &Hashes{[]common.Hash{common.HexToHash("deadbeef"), common.HexToHash("cafebabe")}}, &Hashes{[]common.Hash{common.HexToHash("deadbeef"), common.HexToHash("cafebabe")}}},
{"Int8", int8(1), int8(1)},
{"Int16", int16(1), int16(1)},
{"Int32", int32(1), int32(1)},
{"Int64", int64(1), int64(1)},
{"Int8s", &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}, &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}},
{"Int16s", &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}, &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}},
{"Int32s", &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}, &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}},
{"Int64s", &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}, &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}},
{"Uint8", NewBigInt(1), NewBigInt(1)},
{"Uint16", NewBigInt(1), NewBigInt(1)},
{"Uint32", NewBigInt(1), NewBigInt(1)},
{"Uint64", NewBigInt(1), NewBigInt(1)},
{"Uint8s", &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}, &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}},
{"Uint16s", &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}, &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}},
{"Uint32s", &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}, &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}},
{"Uint64s", &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}, &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}},
{"BigInt", NewBigInt(1), NewBigInt(1)},
{"BigInts", &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}, &BigInts{[]*big.Int{big.NewInt(1), big.NewInt(2)}}},
}
args := NewInterfaces(len(tests))
callFn := func(receiver interface{}, method string, arg interface{}) interface{} {
rval := reflect.ValueOf(receiver)
rval.MethodByName(fmt.Sprintf("Set%s", method)).Call([]reflect.Value{reflect.ValueOf(arg)})
res := rval.MethodByName(fmt.Sprintf("Get%s", method)).Call(nil)
if len(res) > 0 {
return res[0].Interface()
}
return nil
}
for index, c := range tests {
// In theory the change of iface shouldn't effect the args value
iface, _ := args.Get(index)
result := callFn(iface, c.method, c.input)
if !reflect.DeepEqual(result, c.expect) {
t.Errorf("Interface get/set mismatch, want %v, got %v", c.expect, result)
}
// Check whether the underlying value in args is still zero
iface, _ = args.Get(index)
if iface.object != nil {
t.Error("Get operation is not write safe")
}
}
}
| mobile/interface_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00019830405653920025,
0.00017289596144109964,
0.00016316260735038668,
0.00017015860066749156,
0.00000921204627957195
] |
{
"id": 0,
"code_window": [
"\n",
"package main\n",
"\n",
"import (\n",
"\t\"bytes\"\n",
"\t\"errors\"\n",
"\t\"fmt\"\n",
"\t\"os\"\n",
"\t\"os/signal\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 20
} | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package backends
import (
"context"
"errors"
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
)
// This nil assignment ensures at compile time that SimulatedBackend implements bind.ContractBackend.
var _ bind.ContractBackend = (*SimulatedBackend)(nil)
var (
errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block")
errBlockDoesNotExist = errors.New("block does not exist in blockchain")
errTransactionDoesNotExist = errors.New("transaction does not exist")
)
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
// the background. Its main purpose is to allow for easy testing of contract bindings.
// Simulated backend implements the following interfaces:
// ChainReader, ChainStateReader, ContractBackend, ContractCaller, ContractFilterer, ContractTransactor,
// DeployBackend, GasEstimator, GasPricer, LogFilterer, PendingContractCaller, TransactionReader, and TransactionSender
type SimulatedBackend struct {
database ethdb.Database // In memory database to store our testing data
blockchain *core.BlockChain // Ethereum blockchain to handle the consensus
mu sync.Mutex
pendingBlock *types.Block // Currently pending block that will be imported on request
pendingState *state.StateDB // Currently pending state that will be the active on request
events *filters.EventSystem // Event system for filtering log events live
config *params.ChainConfig
}
// NewSimulatedBackendWithDatabase creates a new binding backend based on the given database
// and uses a simulated blockchain for testing purposes.
// A simulated backend always uses chainID 1337.
func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
genesis.MustCommit(database)
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
backend := &SimulatedBackend{
database: database,
blockchain: blockchain,
config: genesis.Config,
events: filters.NewEventSystem(&filterBackend{database, blockchain}, false),
}
backend.rollback(blockchain.CurrentBlock())
return backend
}
// NewSimulatedBackend creates a new binding backend using a simulated blockchain
// for testing purposes.
// A simulated backend always uses chainID 1337.
func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
return NewSimulatedBackendWithDatabase(rawdb.NewMemoryDatabase(), alloc, gasLimit)
}
// Close terminates the underlying blockchain's update loop.
func (b *SimulatedBackend) Close() error {
b.blockchain.Stop()
return nil
}
// Commit imports all the pending transactions as a single block and starts a
// fresh new state.
func (b *SimulatedBackend) Commit() {
b.mu.Lock()
defer b.mu.Unlock()
if _, err := b.blockchain.InsertChain([]*types.Block{b.pendingBlock}); err != nil {
panic(err) // This cannot happen unless the simulator is wrong, fail in that case
}
// Using the last inserted block here makes it possible to build on a side
// chain after a fork.
b.rollback(b.pendingBlock)
}
// Rollback aborts all pending transactions, reverting to the last committed state.
func (b *SimulatedBackend) Rollback() {
b.mu.Lock()
defer b.mu.Unlock()
b.rollback(b.blockchain.CurrentBlock())
}
func (b *SimulatedBackend) rollback(parent *types.Block) {
blocks, _ := core.GenerateChain(b.config, parent, ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {})
b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), b.blockchain.StateCache(), nil)
}
// Fork creates a side-chain that can be used to simulate reorgs.
//
// This function should be called with the ancestor block where the new side
// chain should be started. Transactions (old and new) can then be applied on
// top and Commit-ed.
//
// Note, the side-chain will only become canonical (and trigger the events) when
// it becomes longer. Until then CallContract will still operate on the current
// canonical chain.
//
// There is a % chance that the side chain becomes canonical at the same length
// to simulate live network behavior.
func (b *SimulatedBackend) Fork(ctx context.Context, parent common.Hash) error {
b.mu.Lock()
defer b.mu.Unlock()
if len(b.pendingBlock.Transactions()) != 0 {
return errors.New("pending block dirty")
}
block, err := b.blockByHash(ctx, parent)
if err != nil {
return err
}
b.rollback(block)
return nil
}
// stateByBlockNumber retrieves a state by a given blocknumber.
func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) {
if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) == 0 {
return b.blockchain.State()
}
block, err := b.blockByNumber(ctx, blockNumber)
if err != nil {
return nil, err
}
return b.blockchain.StateAt(block.Root())
}
// CodeAt returns the code associated with a certain account in the blockchain.
func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
b.mu.Lock()
defer b.mu.Unlock()
stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
if err != nil {
return nil, err
}
return stateDB.GetCode(contract), nil
}
// BalanceAt returns the wei balance of a certain account in the blockchain.
func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Address, blockNumber *big.Int) (*big.Int, error) {
b.mu.Lock()
defer b.mu.Unlock()
stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
if err != nil {
return nil, err
}
return stateDB.GetBalance(contract), nil
}
// NonceAt returns the nonce of a certain account in the blockchain.
func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address, blockNumber *big.Int) (uint64, error) {
b.mu.Lock()
defer b.mu.Unlock()
stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
if err != nil {
return 0, err
}
return stateDB.GetNonce(contract), nil
}
// StorageAt returns the value of key in the storage of an account in the blockchain.
func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Address, key common.Hash, blockNumber *big.Int) ([]byte, error) {
b.mu.Lock()
defer b.mu.Unlock()
stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
if err != nil {
return nil, err
}
val := stateDB.GetState(contract, key)
return val[:], nil
}
// TransactionReceipt returns the receipt of a transaction.
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
b.mu.Lock()
defer b.mu.Unlock()
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
if receipt == nil {
return nil, ethereum.NotFound
}
return receipt, nil
}
// TransactionByHash checks the pool of pending transactions in addition to the
// blockchain. The isPending return value indicates whether the transaction has been
// mined yet. Note that the transaction may not be part of the canonical chain even if
// it's not pending.
func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, bool, error) {
b.mu.Lock()
defer b.mu.Unlock()
tx := b.pendingBlock.Transaction(txHash)
if tx != nil {
return tx, true, nil
}
tx, _, _, _ = rawdb.ReadTransaction(b.database, txHash)
if tx != nil {
return tx, false, nil
}
return nil, false, ethereum.NotFound
}
// BlockByHash retrieves a block based on the block hash.
func (b *SimulatedBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
b.mu.Lock()
defer b.mu.Unlock()
return b.blockByHash(ctx, hash)
}
// blockByHash retrieves a block based on the block hash without Locking.
func (b *SimulatedBackend) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
if hash == b.pendingBlock.Hash() {
return b.pendingBlock, nil
}
block := b.blockchain.GetBlockByHash(hash)
if block != nil {
return block, nil
}
return nil, errBlockDoesNotExist
}
// BlockByNumber retrieves a block from the database by number, caching it
// (associated with its hash) if found.
func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
b.mu.Lock()
defer b.mu.Unlock()
return b.blockByNumber(ctx, number)
}
// blockByNumber retrieves a block from the database by number, caching it
// (associated with its hash) if found without Lock.
func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 {
return b.blockchain.CurrentBlock(), nil
}
block := b.blockchain.GetBlockByNumber(uint64(number.Int64()))
if block == nil {
return nil, errBlockDoesNotExist
}
return block, nil
}
// HeaderByHash returns a block header from the current canonical chain.
func (b *SimulatedBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
b.mu.Lock()
defer b.mu.Unlock()
if hash == b.pendingBlock.Hash() {
return b.pendingBlock.Header(), nil
}
header := b.blockchain.GetHeaderByHash(hash)
if header == nil {
return nil, errBlockDoesNotExist
}
return header, nil
}
// HeaderByNumber returns a block header from the current canonical chain. If number is
// nil, the latest known header is returned.
func (b *SimulatedBackend) HeaderByNumber(ctx context.Context, block *big.Int) (*types.Header, error) {
b.mu.Lock()
defer b.mu.Unlock()
if block == nil || block.Cmp(b.pendingBlock.Number()) == 0 {
return b.blockchain.CurrentHeader(), nil
}
return b.blockchain.GetHeaderByNumber(uint64(block.Int64())), nil
}
// TransactionCount returns the number of transactions in a given block.
func (b *SimulatedBackend) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) {
b.mu.Lock()
defer b.mu.Unlock()
if blockHash == b.pendingBlock.Hash() {
return uint(b.pendingBlock.Transactions().Len()), nil
}
block := b.blockchain.GetBlockByHash(blockHash)
if block == nil {
return uint(0), errBlockDoesNotExist
}
return uint(block.Transactions().Len()), nil
}
// TransactionInBlock returns the transaction for a specific block at a specific index.
func (b *SimulatedBackend) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) {
b.mu.Lock()
defer b.mu.Unlock()
if blockHash == b.pendingBlock.Hash() {
transactions := b.pendingBlock.Transactions()
if uint(len(transactions)) < index+1 {
return nil, errTransactionDoesNotExist
}
return transactions[index], nil
}
block := b.blockchain.GetBlockByHash(blockHash)
if block == nil {
return nil, errBlockDoesNotExist
}
transactions := block.Transactions()
if uint(len(transactions)) < index+1 {
return nil, errTransactionDoesNotExist
}
return transactions[index], nil
}
// PendingCodeAt returns the code associated with an account in the pending state.
func (b *SimulatedBackend) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) {
b.mu.Lock()
defer b.mu.Unlock()
return b.pendingState.GetCode(contract), nil
}
func newRevertError(result *core.ExecutionResult) *revertError {
reason, errUnpack := abi.UnpackRevert(result.Revert())
err := errors.New("execution reverted")
if errUnpack == nil {
err = fmt.Errorf("execution reverted: %v", reason)
}
return &revertError{
error: err,
reason: hexutil.Encode(result.Revert()),
}
}
// revertError is an API error that encompasses an EVM revert with JSON error
// code and a binary data blob.
type revertError struct {
error
reason string // revert reason hex encoded
}
// ErrorCode returns the JSON error code for a revert.
// See: https://github.com/ethereum/wiki/wiki/JSON-RPC-Error-Codes-Improvement-Proposal
func (e *revertError) ErrorCode() int {
return 3
}
// ErrorData returns the hex encoded revert reason.
func (e *revertError) ErrorData() interface{} {
return e.reason
}
// CallContract executes a contract call.
func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
b.mu.Lock()
defer b.mu.Unlock()
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
return nil, errBlockNumberUnsupported
}
stateDB, err := b.blockchain.State()
if err != nil {
return nil, err
}
res, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), stateDB)
if err != nil {
return nil, err
}
// If the result contains a revert reason, try to unpack and return it.
if len(res.Revert()) > 0 {
return nil, newRevertError(res)
}
return res.Return(), res.Err
}
// PendingCallContract executes a contract call on the pending state.
func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) {
b.mu.Lock()
defer b.mu.Unlock()
defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot())
res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
if err != nil {
return nil, err
}
// If the result contains a revert reason, try to unpack and return it.
if len(res.Revert()) > 0 {
return nil, newRevertError(res)
}
return res.Return(), res.Err
}
// PendingNonceAt implements PendingStateReader.PendingNonceAt, retrieving
// the nonce currently pending for the account.
func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) {
b.mu.Lock()
defer b.mu.Unlock()
return b.pendingState.GetOrNewStateObject(account).Nonce(), nil
}
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
// chain doesn't have miners, we just return a gas price of 1 for any call.
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
b.mu.Lock()
defer b.mu.Unlock()
if b.pendingBlock.Header().BaseFee != nil {
return b.pendingBlock.Header().BaseFee, nil
}
return big.NewInt(1), nil
}
// SuggestGasTipCap implements ContractTransactor.SuggestGasTipCap. Since the simulated
// chain doesn't have miners, we just return a gas tip of 1 for any call.
func (b *SimulatedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
return big.NewInt(1), nil
}
// EstimateGas executes the requested code against the currently pending block/state and
// returns the used amount of gas.
func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) {
b.mu.Lock()
defer b.mu.Unlock()
// Determine the lowest and highest possible gas limits to binary search in between
var (
lo uint64 = params.TxGas - 1
hi uint64
cap uint64
)
if call.Gas >= params.TxGas {
hi = call.Gas
} else {
hi = b.pendingBlock.GasLimit()
}
// Normalize the max fee per gas the call is willing to spend.
var feeCap *big.Int
if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) {
return 0, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
} else if call.GasPrice != nil {
feeCap = call.GasPrice
} else if call.GasFeeCap != nil {
feeCap = call.GasFeeCap
} else {
feeCap = common.Big0
}
// Recap the highest gas allowance with account's balance.
if feeCap.BitLen() != 0 {
balance := b.pendingState.GetBalance(call.From) // from can't be nil
available := new(big.Int).Set(balance)
if call.Value != nil {
if call.Value.Cmp(available) >= 0 {
return 0, errors.New("insufficient funds for transfer")
}
available.Sub(available, call.Value)
}
allowance := new(big.Int).Div(available, feeCap)
if allowance.IsUint64() && hi > allowance.Uint64() {
transfer := call.Value
if transfer == nil {
transfer = new(big.Int)
}
log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance,
"sent", transfer, "feecap", feeCap, "fundable", allowance)
hi = allowance.Uint64()
}
}
cap = hi
// Create a helper to check if a gas allowance results in an executable transaction
executable := func(gas uint64) (bool, *core.ExecutionResult, error) {
call.Gas = gas
snapshot := b.pendingState.Snapshot()
res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
b.pendingState.RevertToSnapshot(snapshot)
if err != nil {
if errors.Is(err, core.ErrIntrinsicGas) {
return true, nil, nil // Special case, raise gas limit
}
return true, nil, err // Bail out
}
return res.Failed(), res, nil
}
// Execute the binary search and hone in on an executable gas limit
for lo+1 < hi {
mid := (hi + lo) / 2
failed, _, err := executable(mid)
// If the error is not nil(consensus error), it means the provided message
// call or transaction will never be accepted no matter how much gas it is
// assigned. Return the error directly, don't struggle any more
if err != nil {
return 0, err
}
if failed {
lo = mid
} else {
hi = mid
}
}
// Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap {
failed, result, err := executable(hi)
if err != nil {
return 0, err
}
if failed {
if result != nil && result.Err != vm.ErrOutOfGas {
if len(result.Revert()) > 0 {
return 0, newRevertError(result)
}
return 0, result.Err
}
// Otherwise, the specified gas cap is too low
return 0, fmt.Errorf("gas required exceeds allowance (%d)", cap)
}
}
return hi, nil
}
// callContract implements common code between normal and pending contract calls.
// state is modified during execution, make sure to copy it if necessary.
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, stateDB *state.StateDB) (*core.ExecutionResult, error) {
// Gas prices post 1559 need to be initialized
if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) {
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
}
head := b.blockchain.CurrentHeader()
if !b.blockchain.Config().IsLondon(head.Number) {
// If there's no basefee, then it must be a non-1559 execution
if call.GasPrice == nil {
call.GasPrice = new(big.Int)
}
call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice
} else {
// A basefee is provided, necessitating 1559-type execution
if call.GasPrice != nil {
// User specified the legacy gas field, convert to 1559 gas typing
call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice
} else {
// User specified 1559 gas feilds (or none), use those
if call.GasFeeCap == nil {
call.GasFeeCap = new(big.Int)
}
if call.GasTipCap == nil {
call.GasTipCap = new(big.Int)
}
// Backfill the legacy gasPrice for EVM execution, unless we're all zeroes
call.GasPrice = new(big.Int)
if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 {
call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, head.BaseFee), call.GasFeeCap)
}
}
}
// Ensure message is initialized properly.
if call.Gas == 0 {
call.Gas = 50000000
}
if call.Value == nil {
call.Value = new(big.Int)
}
// Set infinite balance to the fake caller account.
from := stateDB.GetOrNewStateObject(call.From)
from.SetBalance(math.MaxBig256)
// Execute the call.
msg := callMsg{call}
txContext := core.NewEVMTxContext(msg)
evmContext := core.NewEVMBlockContext(block.Header(), b.blockchain, nil)
// Create a new environment which holds all relevant information
// about the transaction and calling mechanisms.
vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{NoBaseFee: true})
gasPool := new(core.GasPool).AddGas(math.MaxUint64)
return core.NewStateTransition(vmEnv, msg, gasPool).TransitionDb()
}
// SendTransaction updates the pending block to include the given transaction.
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
b.mu.Lock()
defer b.mu.Unlock()
// Get the last block
block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash())
if err != nil {
return fmt.Errorf("could not fetch parent")
}
// Check transaction validity
signer := types.MakeSigner(b.blockchain.Config(), block.Number())
sender, err := types.Sender(signer, tx)
if err != nil {
return fmt.Errorf("invalid transaction: %v", err)
}
nonce := b.pendingState.GetNonce(sender)
if tx.Nonce() != nonce {
return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce)
}
// Include tx in chain
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
for _, tx := range b.pendingBlock.Transactions() {
block.AddTxWithChain(b.blockchain, tx)
}
block.AddTxWithChain(b.blockchain, tx)
})
stateDB, _ := b.blockchain.State()
b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
return nil
}
// FilterLogs executes a log filter operation, blocking during execution and
// returning all the results in one batch.
//
// TODO(karalabe): Deprecate when the subscription one can return past data too.
func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
var filter *filters.Filter
if query.BlockHash != nil {
// Block filter requested, construct a single-shot filter
filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics)
} else {
// Initialize unset filter boundaries to run from genesis to chain head
from := int64(0)
if query.FromBlock != nil {
from = query.FromBlock.Int64()
}
to := int64(-1)
if query.ToBlock != nil {
to = query.ToBlock.Int64()
}
// Construct the range filter
filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
}
// Run the filter and return all the logs
logs, err := filter.Logs(ctx)
if err != nil {
return nil, err
}
res := make([]types.Log, len(logs))
for i, nLog := range logs {
res[i] = *nLog
}
return res, nil
}
// SubscribeFilterLogs creates a background log filtering operation, returning a
// subscription immediately, which can be used to stream the found events.
func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) {
// Subscribe to contract events
sink := make(chan []*types.Log)
sub, err := b.events.SubscribeLogs(query, sink)
if err != nil {
return nil, err
}
// Since we're getting logs in batches, we need to flatten them into a plain stream
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case logs := <-sink:
for _, nlog := range logs {
select {
case ch <- *nlog:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// SubscribeNewHead returns an event subscription for a new header.
func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) {
// subscribe to a new head
sink := make(chan *types.Header)
sub := b.events.SubscribeNewHeads(sink)
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case head := <-sink:
select {
case ch <- head:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// AdjustTime adds a time shift to the simulated clock.
// It can only be called on empty blocks.
func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
b.mu.Lock()
defer b.mu.Unlock()
if len(b.pendingBlock.Transactions()) != 0 {
return errors.New("Could not adjust time on non-empty block")
}
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
block.OffsetTime(int64(adjustment.Seconds()))
})
stateDB, _ := b.blockchain.State()
b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
return nil
}
// Blockchain returns the underlying blockchain.
func (b *SimulatedBackend) Blockchain() *core.BlockChain {
return b.blockchain
}
// callMsg implements core.Message to allow passing it as a transaction simulator.
type callMsg struct {
ethereum.CallMsg
}
func (m callMsg) From() common.Address { return m.CallMsg.From }
func (m callMsg) Nonce() uint64 { return 0 }
func (m callMsg) IsFake() bool { return true }
func (m callMsg) To() *common.Address { return m.CallMsg.To }
func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice }
func (m callMsg) GasFeeCap() *big.Int { return m.CallMsg.GasFeeCap }
func (m callMsg) GasTipCap() *big.Int { return m.CallMsg.GasTipCap }
func (m callMsg) Gas() uint64 { return m.CallMsg.Gas }
func (m callMsg) Value() *big.Int { return m.CallMsg.Value }
func (m callMsg) Data() []byte { return m.CallMsg.Data }
func (m callMsg) AccessList() types.AccessList { return m.CallMsg.AccessList }
// filterBackend implements filters.Backend to support filtering for logs without
// taking bloom-bits acceleration structures into account.
type filterBackend struct {
db ethdb.Database
bc *core.BlockChain
}
func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db }
func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") }
func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumber) (*types.Header, error) {
if block == rpc.LatestBlockNumber {
return fb.bc.CurrentHeader(), nil
}
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
}
func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
return fb.bc.GetHeaderByHash(hash), nil
}
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
number := rawdb.ReadHeaderNumber(fb.db, hash)
if number == nil {
return nil, nil
}
return rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config()), nil
}
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
number := rawdb.ReadHeaderNumber(fb.db, hash)
if number == nil {
return nil, nil
}
receipts := rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config())
if receipts == nil {
return nil, nil
}
logs := make([][]*types.Log, len(receipts))
for i, receipt := range receipts {
logs[i] = receipt.Logs
}
return logs, nil
}
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return nullSubscription()
}
func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
return fb.bc.SubscribeChainEvent(ch)
}
func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
return fb.bc.SubscribeRemovedLogsEvent(ch)
}
func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return fb.bc.SubscribeLogsEvent(ch)
}
func (fb *filterBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
return nullSubscription()
}
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
panic("not supported")
}
func nullSubscription() event.Subscription {
return event.NewSubscription(func(quit <-chan struct{}) error {
<-quit
return nil
})
}
| accounts/abi/bind/backends/simulated.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.000286449067061767,
0.00017339765327051282,
0.00016272942593786865,
0.00016963259258773178,
0.00001741941196087282
] |
{
"id": 0,
"code_window": [
"\n",
"package main\n",
"\n",
"import (\n",
"\t\"bytes\"\n",
"\t\"errors\"\n",
"\t\"fmt\"\n",
"\t\"os\"\n",
"\t\"os/signal\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 20
} | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
"sync"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
// hasher is a type used for the trie Hash operation. A hasher has some
// internal preallocated temp space
type hasher struct {
sha crypto.KeccakState
tmp []byte
encbuf rlp.EncoderBuffer
parallel bool // Whether to use paralallel threads when hashing
}
// hasherPool holds pureHashers
var hasherPool = sync.Pool{
New: func() interface{} {
return &hasher{
tmp: make([]byte, 0, 550), // cap is as large as a full fullNode.
sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
encbuf: rlp.NewEncoderBuffer(nil),
}
},
}
func newHasher(parallel bool) *hasher {
h := hasherPool.Get().(*hasher)
h.parallel = parallel
return h
}
func returnHasherToPool(h *hasher) {
hasherPool.Put(h)
}
// hash collapses a node down into a hash node, also returning a copy of the
// original node initialized with the computed hash to replace the original one.
func (h *hasher) hash(n node, force bool) (hashed node, cached node) {
// Return the cached hash if it's available
if hash, _ := n.cache(); hash != nil {
return hash, n
}
// Trie not processed yet, walk the children
switch n := n.(type) {
case *shortNode:
collapsed, cached := h.hashShortNodeChildren(n)
hashed := h.shortnodeToHash(collapsed, force)
// We need to retain the possibly _not_ hashed node, in case it was too
// small to be hashed
if hn, ok := hashed.(hashNode); ok {
cached.flags.hash = hn
} else {
cached.flags.hash = nil
}
return hashed, cached
case *fullNode:
collapsed, cached := h.hashFullNodeChildren(n)
hashed = h.fullnodeToHash(collapsed, force)
if hn, ok := hashed.(hashNode); ok {
cached.flags.hash = hn
} else {
cached.flags.hash = nil
}
return hashed, cached
default:
// Value and hash nodes don't have children so they're left as were
return n, n
}
}
// hashShortNodeChildren collapses the short node. The returned collapsed node
// holds a live reference to the Key, and must not be modified.
// The cached
func (h *hasher) hashShortNodeChildren(n *shortNode) (collapsed, cached *shortNode) {
// Hash the short node's child, caching the newly hashed subtree
collapsed, cached = n.copy(), n.copy()
// Previously, we did copy this one. We don't seem to need to actually
// do that, since we don't overwrite/reuse keys
//cached.Key = common.CopyBytes(n.Key)
collapsed.Key = hexToCompact(n.Key)
// Unless the child is a valuenode or hashnode, hash it
switch n.Val.(type) {
case *fullNode, *shortNode:
collapsed.Val, cached.Val = h.hash(n.Val, false)
}
return collapsed, cached
}
func (h *hasher) hashFullNodeChildren(n *fullNode) (collapsed *fullNode, cached *fullNode) {
// Hash the full node's children, caching the newly hashed subtrees
cached = n.copy()
collapsed = n.copy()
if h.parallel {
var wg sync.WaitGroup
wg.Add(16)
for i := 0; i < 16; i++ {
go func(i int) {
hasher := newHasher(false)
if child := n.Children[i]; child != nil {
collapsed.Children[i], cached.Children[i] = hasher.hash(child, false)
} else {
collapsed.Children[i] = nilValueNode
}
returnHasherToPool(hasher)
wg.Done()
}(i)
}
wg.Wait()
} else {
for i := 0; i < 16; i++ {
if child := n.Children[i]; child != nil {
collapsed.Children[i], cached.Children[i] = h.hash(child, false)
} else {
collapsed.Children[i] = nilValueNode
}
}
}
return collapsed, cached
}
// shortnodeToHash creates a hashNode from a shortNode. The supplied shortnode
// should have hex-type Key, which will be converted (without modification)
// into compact form for RLP encoding.
// If the rlp data is smaller than 32 bytes, `nil` is returned.
func (h *hasher) shortnodeToHash(n *shortNode, force bool) node {
n.encode(h.encbuf)
enc := h.encodedBytes()
if len(enc) < 32 && !force {
return n // Nodes smaller than 32 bytes are stored inside their parent
}
return h.hashData(enc)
}
// shortnodeToHash is used to creates a hashNode from a set of hashNodes, (which
// may contain nil values)
func (h *hasher) fullnodeToHash(n *fullNode, force bool) node {
n.encode(h.encbuf)
enc := h.encodedBytes()
if len(enc) < 32 && !force {
return n // Nodes smaller than 32 bytes are stored inside their parent
}
return h.hashData(enc)
}
// encodedBytes returns the result of the last encoding operation on h.encbuf.
// This also resets the encoder buffer.
//
// All node encoding must be done like this:
//
// node.encode(h.encbuf)
// enc := h.encodedBytes()
//
// This convention exists because node.encode can only be inlined/escape-analyzed when
// called on a concrete receiver type.
func (h *hasher) encodedBytes() []byte {
h.tmp = h.encbuf.AppendToBytes(h.tmp[:0])
h.encbuf.Reset(nil)
return h.tmp
}
// hashData hashes the provided data
func (h *hasher) hashData(data []byte) hashNode {
n := make(hashNode, 32)
h.sha.Reset()
h.sha.Write(data)
h.sha.Read(n)
return n
}
// proofHash is used to construct trie proofs, and returns the 'collapsed'
// node (for later RLP encoding) aswell as the hashed node -- unless the
// node is smaller than 32 bytes, in which case it will be returned as is.
// This method does not do anything on value- or hash-nodes.
func (h *hasher) proofHash(original node) (collapsed, hashed node) {
switch n := original.(type) {
case *shortNode:
sn, _ := h.hashShortNodeChildren(n)
return sn, h.shortnodeToHash(sn, false)
case *fullNode:
fn, _ := h.hashFullNodeChildren(n)
return fn, h.fullnodeToHash(fn, false)
default:
// Value and hash nodes don't have children so they're left as were
return n, n
}
}
| trie/hasher.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.0001750016090227291,
0.00017078084056265652,
0.0001651211641728878,
0.00017010256124194711,
0.0000026761717890622094
] |
{
"id": 1,
"code_window": [
"\tdefer stack.Close()\n",
"\n",
"\tdb := utils.MakeChainDatabase(ctx, stack, true)\n",
"\tdefer db.Close()\n",
"\n",
"\tkey, err := parseHexOrString(ctx.Args().Get(0))\n",
"\tif err != nil {\n",
"\t\tlog.Info(\"Could not decode the key\", \"error\", err)\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tkey, err := common.ParseHexOrString(ctx.Args().Get(0))\n"
],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 420
} | // Copyright 2020 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"errors"
"fmt"
"os"
"os/signal"
"path/filepath"
"sort"
"strconv"
"strings"
"syscall"
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
"github.com/olekukonko/tablewriter"
"gopkg.in/urfave/cli.v1"
)
var (
removedbCommand = cli.Command{
Action: utils.MigrateFlags(removeDB),
Name: "removedb",
Usage: "Remove blockchain and state databases",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
},
Category: "DATABASE COMMANDS",
Description: `
Remove blockchain and state databases`,
}
dbCommand = cli.Command{
Name: "db",
Usage: "Low level database operations",
ArgsUsage: "",
Category: "DATABASE COMMANDS",
Subcommands: []cli.Command{
dbInspectCmd,
dbStatCmd,
dbCompactCmd,
dbGetCmd,
dbDeleteCmd,
dbPutCmd,
dbGetSlotsCmd,
dbDumpFreezerIndex,
dbImportCmd,
dbExportCmd,
dbMetadataCmd,
dbMigrateFreezerCmd,
},
}
dbInspectCmd = cli.Command{
Action: utils.MigrateFlags(inspect),
Name: "inspect",
ArgsUsage: "<prefix> <start>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.AncientFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Usage: "Inspect the storage size for each type of data in the database",
Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
}
dbStatCmd = cli.Command{
Action: utils.MigrateFlags(dbStats),
Name: "stats",
Usage: "Print leveldb statistics",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
}
dbCompactCmd = cli.Command{
Action: utils.MigrateFlags(dbCompact),
Name: "compact",
Usage: "Compact leveldb database. WARNING: May take a very long time",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
},
Description: `This command performs a database compaction.
WARNING: This operation may take a very long time to finish, and may cause database
corruption if it is aborted during execution'!`,
}
dbGetCmd = cli.Command{
Action: utils.MigrateFlags(dbGet),
Name: "get",
Usage: "Show the value of a database key",
ArgsUsage: "<hex-encoded key>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command looks up the specified database key from the database.",
}
dbDeleteCmd = cli.Command{
Action: utils.MigrateFlags(dbDelete),
Name: "delete",
Usage: "Delete a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `This command deletes the specified database key from the database.
WARNING: This is a low-level operation which may cause database corruption!`,
}
dbPutCmd = cli.Command{
Action: utils.MigrateFlags(dbPut),
Name: "put",
Usage: "Set the value of a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key> <hex-encoded value>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `This command sets a given database key to the given value.
WARNING: This is a low-level operation which may cause database corruption!`,
}
dbGetSlotsCmd = cli.Command{
Action: utils.MigrateFlags(dbDumpTrie),
Name: "dumptrie",
Usage: "Show the storage key/values of a given storage trie",
ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command looks up the specified database key from the database.",
}
dbDumpFreezerIndex = cli.Command{
Action: utils.MigrateFlags(freezerInspect),
Name: "freezer-index",
Usage: "Dump out the index of a given freezer type",
ArgsUsage: "<type> <start (int)> <end (int)>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command displays information about the freezer index.",
}
dbImportCmd = cli.Command{
Action: utils.MigrateFlags(importLDBdata),
Name: "import",
Usage: "Imports leveldb-data from an exported RLP dump.",
ArgsUsage: "<dumpfile> <start (optional)",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "The import command imports the specific chain data from an RLP encoded stream.",
}
dbExportCmd = cli.Command{
Action: utils.MigrateFlags(exportChaindata),
Name: "export",
Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
ArgsUsage: "<type> <dumpfile>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
}
dbMetadataCmd = cli.Command{
Action: utils.MigrateFlags(showMetaData),
Name: "metadata",
Usage: "Shows metadata about the chain status.",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Shows metadata about the chain status.",
}
dbMigrateFreezerCmd = cli.Command{
Action: utils.MigrateFlags(freezerMigrate),
Name: "freezer-migrate",
Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
WARNING: please back-up the receipt files in your ancients before running this command.`,
}
)
func removeDB(ctx *cli.Context) error {
stack, config := makeConfigNode(ctx)
// Remove the full node state database
path := stack.ResolvePath("chaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node state database")
} else {
log.Info("Full node state database missing", "path", path)
}
// Remove the full node ancient database
path = config.Eth.DatabaseFreezer
switch {
case path == "":
path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
case !filepath.IsAbs(path):
path = config.Node.ResolvePath(path)
}
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node ancient database")
} else {
log.Info("Full node ancient database missing", "path", path)
}
// Remove the light node database
path = stack.ResolvePath("lightchaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "light node database")
} else {
log.Info("Light node database missing", "path", path)
}
return nil
}
// confirmAndRemoveDB prompts the user for a last confirmation and removes the
// folder if accepted.
func confirmAndRemoveDB(database string, kind string) {
confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
switch {
case err != nil:
utils.Fatalf("%v", err)
case !confirm:
log.Info("Database deletion skipped", "path", database)
default:
start := time.Now()
filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
// If we're at the top level folder, recurse into
if path == database {
return nil
}
// Delete all the files, but not subfolders
if !info.IsDir() {
os.Remove(path)
return nil
}
return filepath.SkipDir
})
log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
func inspect(ctx *cli.Context) error {
var (
prefix []byte
start []byte
)
if ctx.NArg() > 2 {
return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage)
}
if ctx.NArg() >= 1 {
if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
} else {
prefix = d
}
}
if ctx.NArg() >= 2 {
if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
return fmt.Errorf("failed to hex-decode 'start': %v", err)
} else {
start = d
}
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
return rawdb.InspectDatabase(db, prefix, start)
}
func showLeveldbStats(db ethdb.Stater) {
if stats, err := db.Stat("leveldb.stats"); err != nil {
log.Warn("Failed to read database stats", "error", err)
} else {
fmt.Println(stats)
}
if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
log.Warn("Failed to read database iostats", "error", err)
} else {
fmt.Println(ioStats)
}
}
func dbStats(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
showLeveldbStats(db)
return nil
}
func dbCompact(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
log.Info("Stats before compaction")
showLeveldbStats(db)
log.Info("Triggering compaction")
if err := db.Compact(nil, nil); err != nil {
log.Info("Compact err", "error", err)
return err
}
log.Info("Stats after compaction")
showLeveldbStats(db)
return nil
}
// dbGet shows the value of a given database key
func dbGet(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
key, err := parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
if err != nil {
log.Info("Get operation failed", "key", fmt.Sprintf("0x%#x", key), "error", err)
return err
}
fmt.Printf("key %#x: %#x\n", key, data)
return nil
}
// dbDelete deletes a key from the database
func dbDelete(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
key, err := parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
if err = db.Delete(key); err != nil {
log.Info("Delete operation returned an error", "key", fmt.Sprintf("0x%#x", key), "error", err)
return err
}
return nil
}
// dbPut overwrite a value in the database
func dbPut(ctx *cli.Context) error {
if ctx.NArg() != 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
var (
key []byte
value []byte
data []byte
err error
)
key, err = parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
value, err = hexutil.Decode(ctx.Args().Get(1))
if err != nil {
log.Info("Could not decode the value", "error", err)
return err
}
data, err = db.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
return db.Put(key, value)
}
// dbDumpTrie shows the key-value slots of a given storage trie
func dbDumpTrie(ctx *cli.Context) error {
if ctx.NArg() < 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
var (
root []byte
start []byte
max = int64(-1)
err error
)
if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
log.Info("Could not decode the root", "error", err)
return err
}
stRoot := common.BytesToHash(root)
if ctx.NArg() >= 2 {
if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
log.Info("Could not decode the seek position", "error", err)
return err
}
}
if ctx.NArg() >= 3 {
if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
log.Info("Could not decode the max count", "error", err)
return err
}
}
theTrie, err := trie.New(stRoot, trie.NewDatabase(db))
if err != nil {
return err
}
var count int64
it := trie.NewIterator(theTrie.NodeIterator(start))
for it.Next() {
if max > 0 && count == max {
fmt.Printf("Exiting after %d values\n", count)
break
}
fmt.Printf(" %d. key %#x: %#x\n", count, it.Key, it.Value)
count++
}
return it.Err
}
func freezerInspect(ctx *cli.Context) error {
var (
start, end int64
disableSnappy bool
err error
)
if ctx.NArg() < 3 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
kind := ctx.Args().Get(0)
if noSnap, ok := rawdb.FreezerNoSnappy[kind]; !ok {
var options []string
for opt := range rawdb.FreezerNoSnappy {
options = append(options, opt)
}
sort.Strings(options)
return fmt.Errorf("Could read freezer-type '%v'. Available options: %v", kind, options)
} else {
disableSnappy = noSnap
}
if start, err = strconv.ParseInt(ctx.Args().Get(1), 10, 64); err != nil {
log.Info("Could read start-param", "error", err)
return err
}
if end, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
log.Info("Could read count param", "error", err)
return err
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
log.Info("Opening freezer", "location", path, "name", kind)
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
return err
} else {
f.DumpIndex(start, end)
}
return nil
}
// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes
func parseHexOrString(str string) ([]byte, error) {
b, err := hexutil.Decode(str)
if errors.Is(err, hexutil.ErrMissingPrefix) {
return []byte(str), nil
}
return b, err
}
func importLDBdata(ctx *cli.Context) error {
start := 0
switch ctx.NArg() {
case 1:
break
case 2:
s, err := strconv.Atoi(ctx.Args().Get(1))
if err != nil {
return fmt.Errorf("second arg must be an integer: %v", err)
}
start = s
default:
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
var (
fName = ctx.Args().Get(0)
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during ldb import, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, false)
return utils.ImportLDBData(db, fName, int64(start), stop)
}
type preimageIterator struct {
iter ethdb.Iterator
}
func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
for iter.iter.Next() {
key := iter.iter.Key()
if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.iter.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *preimageIterator) Release() {
iter.iter.Release()
}
type snapshotIterator struct {
init bool
account ethdb.Iterator
storage ethdb.Iterator
}
func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
if !iter.init {
iter.init = true
return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
}
for iter.account.Next() {
key := iter.account.Key()
if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.account.Value(), true
}
}
for iter.storage.Next() {
key := iter.storage.Key()
if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
return utils.OpBatchAdd, key, iter.storage.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *snapshotIterator) Release() {
iter.account.Release()
iter.storage.Release()
}
// chainExporters defines the export scheme for all exportable chain data.
var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
"preimage": func(db ethdb.Database) utils.ChainDataIterator {
iter := db.NewIterator(rawdb.PreimagePrefix, nil)
return &preimageIterator{iter: iter}
},
"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
return &snapshotIterator{account: account, storage: storage}
},
}
func exportChaindata(ctx *cli.Context) error {
if ctx.NArg() < 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
// Parse the required chain data type, make sure it's supported.
kind := ctx.Args().Get(0)
kind = strings.ToLower(strings.Trim(kind, " "))
exporter, ok := chainExporters[kind]
if !ok {
var kinds []string
for kind := range chainExporters {
kinds = append(kinds, kind)
}
return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
}
var (
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during db export, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, true)
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
}
func showMetaData(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
ancients, err := db.Ancients()
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
}
pp := func(val *uint64) string {
if val == nil {
return "<nil>"
}
return fmt.Sprintf("%d (0x%x)", *val, *val)
}
data := [][]string{
{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
if b := rawdb.ReadHeadBlock(db); b != nil {
data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
}
if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
data = append(data, []string{"SkeletonSyncStatus", string(b)})
}
if h := rawdb.ReadHeadHeader(db); h != nil {
data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
}
data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
}...)
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Field", "Value"})
table.AppendBulk(data)
table.Render()
return nil
}
func freezerMigrate(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return err
}
if numAncients < 1 {
log.Info("No receipts in freezer to migrate")
return nil
}
isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
if err != nil {
return err
}
if !isFirstLegacy {
log.Info("No legacy receipts to migrate")
return nil
}
log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
start := time.Now()
if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
return err
}
if err := db.Close(); err != nil {
return err
}
log.Info("Migration finished", "duration", time.Since(start))
return nil
}
// dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
// non-empty receipt and checks its format. The index of this first non-empty element is
// the second return parameter.
func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return false, 0, err
}
if numAncients < 1 {
return false, 0, nil
}
if firstIdx >= numAncients {
return false, firstIdx, nil
}
var (
legacy bool
blob []byte
emptyRLPList = []byte{192}
)
// Find first block with non-empty receipt, only if
// the index is not already provided.
if firstIdx == 0 {
for i := uint64(0); i < numAncients; i++ {
blob, err = db.Ancient("receipts", i)
if err != nil {
return false, 0, err
}
if len(blob) == 0 {
continue
}
if !bytes.Equal(blob, emptyRLPList) {
firstIdx = i
break
}
}
}
// Is first non-empty receipt legacy?
first, err := db.Ancient("receipts", firstIdx)
if err != nil {
return false, 0, err
}
legacy, err = types.IsLegacyStoredReceipts(first)
return legacy, firstIdx, err
}
| cmd/geth/dbcmd.go | 1 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.9981046915054321,
0.21320006251335144,
0.00016445496294181794,
0.0007619853131473064,
0.38539329171180725
] |
{
"id": 1,
"code_window": [
"\tdefer stack.Close()\n",
"\n",
"\tdb := utils.MakeChainDatabase(ctx, stack, true)\n",
"\tdefer db.Close()\n",
"\n",
"\tkey, err := parseHexOrString(ctx.Args().Get(0))\n",
"\tif err != nil {\n",
"\t\tlog.Info(\"Could not decode the key\", \"error\", err)\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tkey, err := common.ParseHexOrString(ctx.Args().Get(0))\n"
],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 420
} | // Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bloombits
import (
"errors"
"github.com/ethereum/go-ethereum/core/types"
)
var (
// errSectionOutOfBounds is returned if the user tried to add more bloom filters
// to the batch than available space, or if tries to retrieve above the capacity.
errSectionOutOfBounds = errors.New("section out of bounds")
// errBloomBitOutOfBounds is returned if the user tried to retrieve specified
// bit bloom above the capacity.
errBloomBitOutOfBounds = errors.New("bloom bit out of bounds")
)
// Generator takes a number of bloom filters and generates the rotated bloom bits
// to be used for batched filtering.
type Generator struct {
blooms [types.BloomBitLength][]byte // Rotated blooms for per-bit matching
sections uint // Number of sections to batch together
nextSec uint // Next section to set when adding a bloom
}
// NewGenerator creates a rotated bloom generator that can iteratively fill a
// batched bloom filter's bits.
func NewGenerator(sections uint) (*Generator, error) {
if sections%8 != 0 {
return nil, errors.New("section count not multiple of 8")
}
b := &Generator{sections: sections}
for i := 0; i < types.BloomBitLength; i++ {
b.blooms[i] = make([]byte, sections/8)
}
return b, nil
}
// AddBloom takes a single bloom filter and sets the corresponding bit column
// in memory accordingly.
func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
// Make sure we're not adding more bloom filters than our capacity
if b.nextSec >= b.sections {
return errSectionOutOfBounds
}
if b.nextSec != index {
return errors.New("bloom filter with unexpected index")
}
// Rotate the bloom and insert into our collection
byteIndex := b.nextSec / 8
bitIndex := byte(7 - b.nextSec%8)
for byt := 0; byt < types.BloomByteLength; byt++ {
bloomByte := bloom[types.BloomByteLength-1-byt]
if bloomByte == 0 {
continue
}
base := 8 * byt
b.blooms[base+7][byteIndex] |= ((bloomByte >> 7) & 1) << bitIndex
b.blooms[base+6][byteIndex] |= ((bloomByte >> 6) & 1) << bitIndex
b.blooms[base+5][byteIndex] |= ((bloomByte >> 5) & 1) << bitIndex
b.blooms[base+4][byteIndex] |= ((bloomByte >> 4) & 1) << bitIndex
b.blooms[base+3][byteIndex] |= ((bloomByte >> 3) & 1) << bitIndex
b.blooms[base+2][byteIndex] |= ((bloomByte >> 2) & 1) << bitIndex
b.blooms[base+1][byteIndex] |= ((bloomByte >> 1) & 1) << bitIndex
b.blooms[base][byteIndex] |= (bloomByte & 1) << bitIndex
}
b.nextSec++
return nil
}
// Bitset returns the bit vector belonging to the given bit index after all
// blooms have been added.
func (b *Generator) Bitset(idx uint) ([]byte, error) {
if b.nextSec != b.sections {
return nil, errors.New("bloom not fully generated yet")
}
if idx >= types.BloomBitLength {
return nil, errBloomBitOutOfBounds
}
return b.blooms[idx], nil
}
| core/bloombits/generator.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00025527618709020317,
0.00018015342357102782,
0.000165736346389167,
0.00017346758977510035,
0.000025277919121435843
] |
{
"id": 1,
"code_window": [
"\tdefer stack.Close()\n",
"\n",
"\tdb := utils.MakeChainDatabase(ctx, stack, true)\n",
"\tdefer db.Close()\n",
"\n",
"\tkey, err := parseHexOrString(ctx.Args().Get(0))\n",
"\tif err != nil {\n",
"\t\tlog.Info(\"Could not decode the key\", \"error\", err)\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tkey, err := common.ParseHexOrString(ctx.Args().Get(0))\n"
],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 420
} | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package tests
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
// TransactionTest checks RLP decoding and sender derivation of transactions.
type TransactionTest struct {
RLP hexutil.Bytes `json:"rlp"`
Byzantium ttFork
Constantinople ttFork
Istanbul ttFork
EIP150 ttFork
EIP158 ttFork
Frontier ttFork
Homestead ttFork
}
type ttFork struct {
Sender common.UnprefixedAddress `json:"sender"`
Hash common.UnprefixedHash `json:"hash"`
}
func (tt *TransactionTest) Run(config *params.ChainConfig) error {
validateTx := func(rlpData hexutil.Bytes, signer types.Signer, isHomestead bool, isIstanbul bool) (*common.Address, *common.Hash, error) {
tx := new(types.Transaction)
if err := rlp.DecodeBytes(rlpData, tx); err != nil {
return nil, nil, err
}
sender, err := types.Sender(signer, tx)
if err != nil {
return nil, nil, err
}
// Intrinsic gas
requiredGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, isHomestead, isIstanbul)
if err != nil {
return nil, nil, err
}
if requiredGas > tx.Gas() {
return nil, nil, fmt.Errorf("insufficient gas ( %d < %d )", tx.Gas(), requiredGas)
}
h := tx.Hash()
return &sender, &h, nil
}
for _, testcase := range []struct {
name string
signer types.Signer
fork ttFork
isHomestead bool
isIstanbul bool
}{
{"Frontier", types.FrontierSigner{}, tt.Frontier, false, false},
{"Homestead", types.HomesteadSigner{}, tt.Homestead, true, false},
{"EIP150", types.HomesteadSigner{}, tt.EIP150, true, false},
{"EIP158", types.NewEIP155Signer(config.ChainID), tt.EIP158, true, false},
{"Byzantium", types.NewEIP155Signer(config.ChainID), tt.Byzantium, true, false},
{"Constantinople", types.NewEIP155Signer(config.ChainID), tt.Constantinople, true, false},
{"Istanbul", types.NewEIP155Signer(config.ChainID), tt.Istanbul, true, true},
} {
sender, txhash, err := validateTx(tt.RLP, testcase.signer, testcase.isHomestead, testcase.isIstanbul)
if testcase.fork.Sender == (common.UnprefixedAddress{}) {
if err == nil {
return fmt.Errorf("expected error, got none (address %v)[%v]", sender.String(), testcase.name)
}
continue
}
// Should resolve the right address
if err != nil {
return fmt.Errorf("got error, expected none: %v", err)
}
if sender == nil {
return fmt.Errorf("sender was nil, should be %x", common.Address(testcase.fork.Sender))
}
if *sender != common.Address(testcase.fork.Sender) {
return fmt.Errorf("sender mismatch: got %x, want %x", sender, testcase.fork.Sender)
}
if txhash == nil {
return fmt.Errorf("txhash was nil, should be %x", common.Hash(testcase.fork.Hash))
}
if *txhash != common.Hash(testcase.fork.Hash) {
return fmt.Errorf("hash mismatch: got %x, want %x", *txhash, testcase.fork.Hash)
}
}
return nil
}
| tests/transaction_test_util.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.00038740909076295793,
0.00020732333359774202,
0.00016159447841346264,
0.00017363775987178087,
0.00007321371231228113
] |
{
"id": 1,
"code_window": [
"\tdefer stack.Close()\n",
"\n",
"\tdb := utils.MakeChainDatabase(ctx, stack, true)\n",
"\tdefer db.Close()\n",
"\n",
"\tkey, err := parseHexOrString(ctx.Args().Get(0))\n",
"\tif err != nil {\n",
"\t\tlog.Info(\"Could not decode the key\", \"error\", err)\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tkey, err := common.ParseHexOrString(ctx.Args().Get(0))\n"
],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 420
} | {
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
"balance" : "0x0de0b6b3a7640000",
"code" : "0x6001",
"nonce" : "0x00",
"storage" : {
}
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
"balance" : "0x0de0b6b3a7640000",
"code" : "0x",
"nonce" : "0x00",
"storage" : {
}
}
}
| cmd/evm/testdata/23/alloc.json | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.000175683424458839,
0.00017209615907631814,
0.00016850887914188206,
0.00017209615907631814,
0.0000035872726584784687
] |
{
"id": 2,
"code_window": [
"\tdefer stack.Close()\n",
"\n",
"\tdb := utils.MakeChainDatabase(ctx, stack, false)\n",
"\tdefer db.Close()\n",
"\n",
"\tkey, err := parseHexOrString(ctx.Args().Get(0))\n",
"\tif err != nil {\n",
"\t\tlog.Info(\"Could not decode the key\", \"error\", err)\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tkey, err := common.ParseHexOrString(ctx.Args().Get(0))\n"
],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 446
} | // Copyright 2020 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"errors"
"fmt"
"os"
"os/signal"
"path/filepath"
"sort"
"strconv"
"strings"
"syscall"
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
"github.com/olekukonko/tablewriter"
"gopkg.in/urfave/cli.v1"
)
var (
removedbCommand = cli.Command{
Action: utils.MigrateFlags(removeDB),
Name: "removedb",
Usage: "Remove blockchain and state databases",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
},
Category: "DATABASE COMMANDS",
Description: `
Remove blockchain and state databases`,
}
dbCommand = cli.Command{
Name: "db",
Usage: "Low level database operations",
ArgsUsage: "",
Category: "DATABASE COMMANDS",
Subcommands: []cli.Command{
dbInspectCmd,
dbStatCmd,
dbCompactCmd,
dbGetCmd,
dbDeleteCmd,
dbPutCmd,
dbGetSlotsCmd,
dbDumpFreezerIndex,
dbImportCmd,
dbExportCmd,
dbMetadataCmd,
dbMigrateFreezerCmd,
},
}
dbInspectCmd = cli.Command{
Action: utils.MigrateFlags(inspect),
Name: "inspect",
ArgsUsage: "<prefix> <start>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.AncientFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Usage: "Inspect the storage size for each type of data in the database",
Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
}
dbStatCmd = cli.Command{
Action: utils.MigrateFlags(dbStats),
Name: "stats",
Usage: "Print leveldb statistics",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
}
dbCompactCmd = cli.Command{
Action: utils.MigrateFlags(dbCompact),
Name: "compact",
Usage: "Compact leveldb database. WARNING: May take a very long time",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
},
Description: `This command performs a database compaction.
WARNING: This operation may take a very long time to finish, and may cause database
corruption if it is aborted during execution'!`,
}
dbGetCmd = cli.Command{
Action: utils.MigrateFlags(dbGet),
Name: "get",
Usage: "Show the value of a database key",
ArgsUsage: "<hex-encoded key>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command looks up the specified database key from the database.",
}
dbDeleteCmd = cli.Command{
Action: utils.MigrateFlags(dbDelete),
Name: "delete",
Usage: "Delete a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `This command deletes the specified database key from the database.
WARNING: This is a low-level operation which may cause database corruption!`,
}
dbPutCmd = cli.Command{
Action: utils.MigrateFlags(dbPut),
Name: "put",
Usage: "Set the value of a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key> <hex-encoded value>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `This command sets a given database key to the given value.
WARNING: This is a low-level operation which may cause database corruption!`,
}
dbGetSlotsCmd = cli.Command{
Action: utils.MigrateFlags(dbDumpTrie),
Name: "dumptrie",
Usage: "Show the storage key/values of a given storage trie",
ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command looks up the specified database key from the database.",
}
dbDumpFreezerIndex = cli.Command{
Action: utils.MigrateFlags(freezerInspect),
Name: "freezer-index",
Usage: "Dump out the index of a given freezer type",
ArgsUsage: "<type> <start (int)> <end (int)>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "This command displays information about the freezer index.",
}
dbImportCmd = cli.Command{
Action: utils.MigrateFlags(importLDBdata),
Name: "import",
Usage: "Imports leveldb-data from an exported RLP dump.",
ArgsUsage: "<dumpfile> <start (optional)",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "The import command imports the specific chain data from an RLP encoded stream.",
}
dbExportCmd = cli.Command{
Action: utils.MigrateFlags(exportChaindata),
Name: "export",
Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
ArgsUsage: "<type> <dumpfile>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
}
dbMetadataCmd = cli.Command{
Action: utils.MigrateFlags(showMetaData),
Name: "metadata",
Usage: "Shows metadata about the chain status.",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Shows metadata about the chain status.",
}
dbMigrateFreezerCmd = cli.Command{
Action: utils.MigrateFlags(freezerMigrate),
Name: "freezer-migrate",
Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
WARNING: please back-up the receipt files in your ancients before running this command.`,
}
)
func removeDB(ctx *cli.Context) error {
stack, config := makeConfigNode(ctx)
// Remove the full node state database
path := stack.ResolvePath("chaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node state database")
} else {
log.Info("Full node state database missing", "path", path)
}
// Remove the full node ancient database
path = config.Eth.DatabaseFreezer
switch {
case path == "":
path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
case !filepath.IsAbs(path):
path = config.Node.ResolvePath(path)
}
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node ancient database")
} else {
log.Info("Full node ancient database missing", "path", path)
}
// Remove the light node database
path = stack.ResolvePath("lightchaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "light node database")
} else {
log.Info("Light node database missing", "path", path)
}
return nil
}
// confirmAndRemoveDB prompts the user for a last confirmation and removes the
// folder if accepted.
func confirmAndRemoveDB(database string, kind string) {
confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
switch {
case err != nil:
utils.Fatalf("%v", err)
case !confirm:
log.Info("Database deletion skipped", "path", database)
default:
start := time.Now()
filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
// If we're at the top level folder, recurse into
if path == database {
return nil
}
// Delete all the files, but not subfolders
if !info.IsDir() {
os.Remove(path)
return nil
}
return filepath.SkipDir
})
log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
func inspect(ctx *cli.Context) error {
var (
prefix []byte
start []byte
)
if ctx.NArg() > 2 {
return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage)
}
if ctx.NArg() >= 1 {
if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
} else {
prefix = d
}
}
if ctx.NArg() >= 2 {
if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
return fmt.Errorf("failed to hex-decode 'start': %v", err)
} else {
start = d
}
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
return rawdb.InspectDatabase(db, prefix, start)
}
func showLeveldbStats(db ethdb.Stater) {
if stats, err := db.Stat("leveldb.stats"); err != nil {
log.Warn("Failed to read database stats", "error", err)
} else {
fmt.Println(stats)
}
if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
log.Warn("Failed to read database iostats", "error", err)
} else {
fmt.Println(ioStats)
}
}
func dbStats(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
showLeveldbStats(db)
return nil
}
func dbCompact(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
log.Info("Stats before compaction")
showLeveldbStats(db)
log.Info("Triggering compaction")
if err := db.Compact(nil, nil); err != nil {
log.Info("Compact err", "error", err)
return err
}
log.Info("Stats after compaction")
showLeveldbStats(db)
return nil
}
// dbGet shows the value of a given database key
func dbGet(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
key, err := parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
if err != nil {
log.Info("Get operation failed", "key", fmt.Sprintf("0x%#x", key), "error", err)
return err
}
fmt.Printf("key %#x: %#x\n", key, data)
return nil
}
// dbDelete deletes a key from the database
func dbDelete(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
key, err := parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
if err = db.Delete(key); err != nil {
log.Info("Delete operation returned an error", "key", fmt.Sprintf("0x%#x", key), "error", err)
return err
}
return nil
}
// dbPut overwrite a value in the database
func dbPut(ctx *cli.Context) error {
if ctx.NArg() != 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
var (
key []byte
value []byte
data []byte
err error
)
key, err = parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
value, err = hexutil.Decode(ctx.Args().Get(1))
if err != nil {
log.Info("Could not decode the value", "error", err)
return err
}
data, err = db.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
return db.Put(key, value)
}
// dbDumpTrie shows the key-value slots of a given storage trie
func dbDumpTrie(ctx *cli.Context) error {
if ctx.NArg() < 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
var (
root []byte
start []byte
max = int64(-1)
err error
)
if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
log.Info("Could not decode the root", "error", err)
return err
}
stRoot := common.BytesToHash(root)
if ctx.NArg() >= 2 {
if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
log.Info("Could not decode the seek position", "error", err)
return err
}
}
if ctx.NArg() >= 3 {
if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
log.Info("Could not decode the max count", "error", err)
return err
}
}
theTrie, err := trie.New(stRoot, trie.NewDatabase(db))
if err != nil {
return err
}
var count int64
it := trie.NewIterator(theTrie.NodeIterator(start))
for it.Next() {
if max > 0 && count == max {
fmt.Printf("Exiting after %d values\n", count)
break
}
fmt.Printf(" %d. key %#x: %#x\n", count, it.Key, it.Value)
count++
}
return it.Err
}
func freezerInspect(ctx *cli.Context) error {
var (
start, end int64
disableSnappy bool
err error
)
if ctx.NArg() < 3 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
kind := ctx.Args().Get(0)
if noSnap, ok := rawdb.FreezerNoSnappy[kind]; !ok {
var options []string
for opt := range rawdb.FreezerNoSnappy {
options = append(options, opt)
}
sort.Strings(options)
return fmt.Errorf("Could read freezer-type '%v'. Available options: %v", kind, options)
} else {
disableSnappy = noSnap
}
if start, err = strconv.ParseInt(ctx.Args().Get(1), 10, 64); err != nil {
log.Info("Could read start-param", "error", err)
return err
}
if end, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
log.Info("Could read count param", "error", err)
return err
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
log.Info("Opening freezer", "location", path, "name", kind)
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
return err
} else {
f.DumpIndex(start, end)
}
return nil
}
// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes
func parseHexOrString(str string) ([]byte, error) {
b, err := hexutil.Decode(str)
if errors.Is(err, hexutil.ErrMissingPrefix) {
return []byte(str), nil
}
return b, err
}
func importLDBdata(ctx *cli.Context) error {
start := 0
switch ctx.NArg() {
case 1:
break
case 2:
s, err := strconv.Atoi(ctx.Args().Get(1))
if err != nil {
return fmt.Errorf("second arg must be an integer: %v", err)
}
start = s
default:
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
var (
fName = ctx.Args().Get(0)
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during ldb import, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, false)
return utils.ImportLDBData(db, fName, int64(start), stop)
}
type preimageIterator struct {
iter ethdb.Iterator
}
func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
for iter.iter.Next() {
key := iter.iter.Key()
if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.iter.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *preimageIterator) Release() {
iter.iter.Release()
}
type snapshotIterator struct {
init bool
account ethdb.Iterator
storage ethdb.Iterator
}
func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
if !iter.init {
iter.init = true
return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
}
for iter.account.Next() {
key := iter.account.Key()
if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.account.Value(), true
}
}
for iter.storage.Next() {
key := iter.storage.Key()
if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
return utils.OpBatchAdd, key, iter.storage.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *snapshotIterator) Release() {
iter.account.Release()
iter.storage.Release()
}
// chainExporters defines the export scheme for all exportable chain data.
var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
"preimage": func(db ethdb.Database) utils.ChainDataIterator {
iter := db.NewIterator(rawdb.PreimagePrefix, nil)
return &preimageIterator{iter: iter}
},
"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
return &snapshotIterator{account: account, storage: storage}
},
}
func exportChaindata(ctx *cli.Context) error {
if ctx.NArg() < 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
// Parse the required chain data type, make sure it's supported.
kind := ctx.Args().Get(0)
kind = strings.ToLower(strings.Trim(kind, " "))
exporter, ok := chainExporters[kind]
if !ok {
var kinds []string
for kind := range chainExporters {
kinds = append(kinds, kind)
}
return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
}
var (
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during db export, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, true)
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
}
func showMetaData(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
ancients, err := db.Ancients()
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
}
pp := func(val *uint64) string {
if val == nil {
return "<nil>"
}
return fmt.Sprintf("%d (0x%x)", *val, *val)
}
data := [][]string{
{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
if b := rawdb.ReadHeadBlock(db); b != nil {
data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
}
if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
data = append(data, []string{"SkeletonSyncStatus", string(b)})
}
if h := rawdb.ReadHeadHeader(db); h != nil {
data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
}
data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
}...)
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Field", "Value"})
table.AppendBulk(data)
table.Render()
return nil
}
func freezerMigrate(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return err
}
if numAncients < 1 {
log.Info("No receipts in freezer to migrate")
return nil
}
isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
if err != nil {
return err
}
if !isFirstLegacy {
log.Info("No legacy receipts to migrate")
return nil
}
log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
start := time.Now()
if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
return err
}
if err := db.Close(); err != nil {
return err
}
log.Info("Migration finished", "duration", time.Since(start))
return nil
}
// dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
// non-empty receipt and checks its format. The index of this first non-empty element is
// the second return parameter.
func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return false, 0, err
}
if numAncients < 1 {
return false, 0, nil
}
if firstIdx >= numAncients {
return false, firstIdx, nil
}
var (
legacy bool
blob []byte
emptyRLPList = []byte{192}
)
// Find first block with non-empty receipt, only if
// the index is not already provided.
if firstIdx == 0 {
for i := uint64(0); i < numAncients; i++ {
blob, err = db.Ancient("receipts", i)
if err != nil {
return false, 0, err
}
if len(blob) == 0 {
continue
}
if !bytes.Equal(blob, emptyRLPList) {
firstIdx = i
break
}
}
}
// Is first non-empty receipt legacy?
first, err := db.Ancient("receipts", firstIdx)
if err != nil {
return false, 0, err
}
legacy, err = types.IsLegacyStoredReceipts(first)
return legacy, firstIdx, err
}
| cmd/geth/dbcmd.go | 1 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.9981074333190918,
0.21164339780807495,
0.00016446906374767423,
0.0007288085762411356,
0.3887900412082672
] |
{
"id": 2,
"code_window": [
"\tdefer stack.Close()\n",
"\n",
"\tdb := utils.MakeChainDatabase(ctx, stack, false)\n",
"\tdefer db.Close()\n",
"\n",
"\tkey, err := parseHexOrString(ctx.Args().Get(0))\n",
"\tif err != nil {\n",
"\t\tlog.Info(\"Could not decode the key\", \"error\", err)\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tkey, err := common.ParseHexOrString(ctx.Args().Get(0))\n"
],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 446
} | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package flowcontrol
import (
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
)
type testNode struct {
node *ClientNode
bufLimit, capacity uint64
waitUntil mclock.AbsTime
index, totalCost uint64
}
const (
testMaxCost = 1000000
testLength = 100000
)
// testConstantTotalCapacity simulates multiple request sender nodes and verifies
// whether the total amount of served requests matches the expected value based on
// the total capacity and the duration of the test.
// Some nodes are sending requests occasionally so that their buffer should regularly
// reach the maximum while other nodes (the "max capacity nodes") are sending at the
// maximum permitted rate. The max capacity nodes are changed multiple times during
// a single test.
func TestConstantTotalCapacity(t *testing.T) {
testConstantTotalCapacity(t, 10, 1, 0)
testConstantTotalCapacity(t, 10, 1, 1)
testConstantTotalCapacity(t, 30, 1, 0)
testConstantTotalCapacity(t, 30, 2, 3)
testConstantTotalCapacity(t, 100, 1, 0)
testConstantTotalCapacity(t, 100, 3, 5)
testConstantTotalCapacity(t, 100, 5, 10)
}
func testConstantTotalCapacity(t *testing.T, nodeCount, maxCapacityNodes, randomSend int) {
clock := &mclock.Simulated{}
nodes := make([]*testNode, nodeCount)
var totalCapacity uint64
for i := range nodes {
nodes[i] = &testNode{capacity: uint64(50000 + rand.Intn(100000))}
totalCapacity += nodes[i].capacity
}
m := NewClientManager(PieceWiseLinear{{0, totalCapacity}}, clock)
for _, n := range nodes {
n.bufLimit = n.capacity * 6000
n.node = NewClientNode(m, ServerParams{BufLimit: n.bufLimit, MinRecharge: n.capacity})
}
maxNodes := make([]int, maxCapacityNodes)
for i := range maxNodes {
// we don't care if some indexes are selected multiple times
// in that case we have fewer max nodes
maxNodes[i] = rand.Intn(nodeCount)
}
var sendCount int
for i := 0; i < testLength; i++ {
now := clock.Now()
for _, idx := range maxNodes {
for nodes[idx].send(t, now) {
}
}
if rand.Intn(testLength) < maxCapacityNodes*3 {
maxNodes[rand.Intn(maxCapacityNodes)] = rand.Intn(nodeCount)
}
sendCount += randomSend
failCount := randomSend * 10
for sendCount > 0 && failCount > 0 {
if nodes[rand.Intn(nodeCount)].send(t, now) {
sendCount--
} else {
failCount--
}
}
clock.Run(time.Millisecond)
}
var totalCost uint64
for _, n := range nodes {
totalCost += n.totalCost
}
ratio := float64(totalCost) / float64(totalCapacity) / testLength
if ratio < 0.98 || ratio > 1.02 {
t.Errorf("totalCost/totalCapacity/testLength ratio incorrect (expected: 1, got: %f)", ratio)
}
}
func (n *testNode) send(t *testing.T, now mclock.AbsTime) bool {
if now < n.waitUntil {
return false
}
n.index++
if ok, _, _ := n.node.AcceptRequest(0, n.index, testMaxCost); !ok {
t.Fatalf("Rejected request after expected waiting time has passed")
}
rcost := uint64(rand.Int63n(testMaxCost))
bv := n.node.RequestProcessed(0, n.index, testMaxCost, rcost)
if bv < testMaxCost {
n.waitUntil = now + mclock.AbsTime((testMaxCost-bv)*1001000/n.capacity)
}
n.totalCost += rcost
return true
}
| les/flowcontrol/manager_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.0010081120999529958,
0.0002374032192165032,
0.0001697088882792741,
0.000173039166838862,
0.0002224980271421373
] |
{
"id": 2,
"code_window": [
"\tdefer stack.Close()\n",
"\n",
"\tdb := utils.MakeChainDatabase(ctx, stack, false)\n",
"\tdefer db.Close()\n",
"\n",
"\tkey, err := parseHexOrString(ctx.Args().Get(0))\n",
"\tif err != nil {\n",
"\t\tlog.Info(\"Could not decode the key\", \"error\", err)\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tkey, err := common.ParseHexOrString(ctx.Args().Get(0))\n"
],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 446
} | KEY-----
MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
SjY1bIw4iAJm2gsvvZhIrCHS3l6afab4pZB
l2+XsDulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTqJIDAQAB
AoGAwr7XOy5tM/V6eZanZzus1sClbjbE6HXnWwbZGOpet
3Zm4vD6MXcjpTLryzTQIvVdfQbRc6+MUVeLwZaTXtdZrhu+Jk7xnTPy8Jcb
uJqFk41Ew+mMogY/xEcfbWd6I+4xqjlLED gbIECQDvn+hgN4H
txxr397vjrIgPJpQvBsafG7b0dA4AFjwVbFLcj2pMQoozvpj1AA/v13/57K9vCxmb8QeD/asysgS5TeuNi8DoUBEmiSJwXvjwjNpNEbc6Iuyt7yMQAIt21su4b3sjXNueLKH85Q+phy2U
fQtuUE9txblTu14qN7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCzjA0CQQDU
y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtIX
qyUBnu3Xhs8ZfjLZO7BAkEAlT4R5Yl6cGhYZHOde3JEMhcVFMO8daFeo
f9Oeos0UotgiDktdQHxdNEwLjQflJJBzV+5OtwswCA=----EN RATESTI EY-- | tests/fuzzers/txfetcher/corpus/8a9ebedfbfec584d8b22761e6121dc1ca0248548-4 | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.0009106016368605196,
0.0005380649236030877,
0.00016552819579374045,
0.0005380649236030877,
0.000372536713257432
] |
{
"id": 2,
"code_window": [
"\tdefer stack.Close()\n",
"\n",
"\tdb := utils.MakeChainDatabase(ctx, stack, false)\n",
"\tdefer db.Close()\n",
"\n",
"\tkey, err := parseHexOrString(ctx.Args().Get(0))\n",
"\tif err != nil {\n",
"\t\tlog.Info(\"Could not decode the key\", \"error\", err)\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tkey, err := common.ParseHexOrString(ctx.Args().Get(0))\n"
],
"file_path": "cmd/geth/dbcmd.go",
"type": "replace",
"edit_start_line_idx": 446
} | // Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package enode
import (
"bytes"
"fmt"
"net"
"path/filepath"
"reflect"
"testing"
"time"
)
var keytestID = HexID("51232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439")
func TestDBNodeKey(t *testing.T) {
enc := nodeKey(keytestID)
want := []byte{
'n', ':',
0x51, 0x23, 0x2b, 0x8d, 0x78, 0x21, 0x61, 0x7d, // node id
0x2b, 0x29, 0xb5, 0x4b, 0x81, 0xcd, 0xef, 0xb9, //
0xb3, 0xe9, 0xc3, 0x7d, 0x7f, 0xd5, 0xf6, 0x32, //
0x70, 0xbc, 0xc9, 0xe1, 0xa6, 0xf6, 0xa4, 0x39, //
':', 'v', '4',
}
if !bytes.Equal(enc, want) {
t.Errorf("wrong encoded key:\ngot %q\nwant %q", enc, want)
}
id, _ := splitNodeKey(enc)
if id != keytestID {
t.Errorf("wrong ID from splitNodeKey")
}
}
func TestDBNodeItemKey(t *testing.T) {
wantIP := net.IP{127, 0, 0, 3}
wantField := "foobar"
enc := nodeItemKey(keytestID, wantIP, wantField)
want := []byte{
'n', ':',
0x51, 0x23, 0x2b, 0x8d, 0x78, 0x21, 0x61, 0x7d, // node id
0x2b, 0x29, 0xb5, 0x4b, 0x81, 0xcd, 0xef, 0xb9, //
0xb3, 0xe9, 0xc3, 0x7d, 0x7f, 0xd5, 0xf6, 0x32, //
0x70, 0xbc, 0xc9, 0xe1, 0xa6, 0xf6, 0xa4, 0x39, //
':', 'v', '4', ':',
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // IP
0x00, 0x00, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x03, //
':', 'f', 'o', 'o', 'b', 'a', 'r',
}
if !bytes.Equal(enc, want) {
t.Errorf("wrong encoded key:\ngot %q\nwant %q", enc, want)
}
id, ip, field := splitNodeItemKey(enc)
if id != keytestID {
t.Errorf("splitNodeItemKey returned wrong ID: %v", id)
}
if !ip.Equal(wantIP) {
t.Errorf("splitNodeItemKey returned wrong IP: %v", ip)
}
if field != wantField {
t.Errorf("splitNodeItemKey returned wrong field: %q", field)
}
}
var nodeDBInt64Tests = []struct {
key []byte
value int64
}{
{key: []byte{0x01}, value: 1},
{key: []byte{0x02}, value: 2},
{key: []byte{0x03}, value: 3},
}
func TestDBInt64(t *testing.T) {
db, _ := OpenDB("")
defer db.Close()
tests := nodeDBInt64Tests
for i := 0; i < len(tests); i++ {
// Insert the next value
if err := db.storeInt64(tests[i].key, tests[i].value); err != nil {
t.Errorf("test %d: failed to store value: %v", i, err)
}
// Check all existing and non existing values
for j := 0; j < len(tests); j++ {
num := db.fetchInt64(tests[j].key)
switch {
case j <= i && num != tests[j].value:
t.Errorf("test %d, item %d: value mismatch: have %v, want %v", i, j, num, tests[j].value)
case j > i && num != 0:
t.Errorf("test %d, item %d: value mismatch: have %v, want %v", i, j, num, 0)
}
}
}
}
func TestDBFetchStore(t *testing.T) {
node := NewV4(
hexPubkey("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
net.IP{192, 168, 0, 1},
30303,
30303,
)
inst := time.Now()
num := 314
db, _ := OpenDB("")
defer db.Close()
// Check fetch/store operations on a node ping object
if stored := db.LastPingReceived(node.ID(), node.IP()); stored.Unix() != 0 {
t.Errorf("ping: non-existing object: %v", stored)
}
if err := db.UpdateLastPingReceived(node.ID(), node.IP(), inst); err != nil {
t.Errorf("ping: failed to update: %v", err)
}
if stored := db.LastPingReceived(node.ID(), node.IP()); stored.Unix() != inst.Unix() {
t.Errorf("ping: value mismatch: have %v, want %v", stored, inst)
}
// Check fetch/store operations on a node pong object
if stored := db.LastPongReceived(node.ID(), node.IP()); stored.Unix() != 0 {
t.Errorf("pong: non-existing object: %v", stored)
}
if err := db.UpdateLastPongReceived(node.ID(), node.IP(), inst); err != nil {
t.Errorf("pong: failed to update: %v", err)
}
if stored := db.LastPongReceived(node.ID(), node.IP()); stored.Unix() != inst.Unix() {
t.Errorf("pong: value mismatch: have %v, want %v", stored, inst)
}
// Check fetch/store operations on a node findnode-failure object
if stored := db.FindFails(node.ID(), node.IP()); stored != 0 {
t.Errorf("find-node fails: non-existing object: %v", stored)
}
if err := db.UpdateFindFails(node.ID(), node.IP(), num); err != nil {
t.Errorf("find-node fails: failed to update: %v", err)
}
if stored := db.FindFails(node.ID(), node.IP()); stored != num {
t.Errorf("find-node fails: value mismatch: have %v, want %v", stored, num)
}
// Check fetch/store operations on an actual node object
if stored := db.Node(node.ID()); stored != nil {
t.Errorf("node: non-existing object: %v", stored)
}
if err := db.UpdateNode(node); err != nil {
t.Errorf("node: failed to update: %v", err)
}
if stored := db.Node(node.ID()); stored == nil {
t.Errorf("node: not found")
} else if !reflect.DeepEqual(stored, node) {
t.Errorf("node: data mismatch: have %v, want %v", stored, node)
}
}
var nodeDBSeedQueryNodes = []struct {
node *Node
pong time.Time
}{
// This one should not be in the result set because its last
// pong time is too far in the past.
{
node: NewV4(
hexPubkey("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
net.IP{127, 0, 0, 3},
30303,
30303,
),
pong: time.Now().Add(-3 * time.Hour),
},
// This one shouldn't be in the result set because its
// nodeID is the local node's ID.
{
node: NewV4(
hexPubkey("ff93ff820abacd4351b0f14e47b324bc82ff014c226f3f66a53535734a3c150e7e38ca03ef0964ba55acddc768f5e99cd59dea95ddd4defbab1339c92fa319b2"),
net.IP{127, 0, 0, 3},
30303,
30303,
),
pong: time.Now().Add(-4 * time.Second),
},
// These should be in the result set.
{
node: NewV4(
hexPubkey("c2b5eb3f5dde05f815b63777809ee3e7e0cbb20035a6b00ce327191e6eaa8f26a8d461c9112b7ab94698e7361fa19fd647e603e73239002946d76085b6f928d6"),
net.IP{127, 0, 0, 1},
30303,
30303,
),
pong: time.Now().Add(-2 * time.Second),
},
{
node: NewV4(
hexPubkey("6ca1d400c8ddf8acc94bcb0dd254911ad71a57bed5e0ae5aa205beed59b28c2339908e97990c493499613cff8ecf6c3dc7112a8ead220cdcd00d8847ca3db755"),
net.IP{127, 0, 0, 2},
30303,
30303,
),
pong: time.Now().Add(-3 * time.Second),
},
{
node: NewV4(
hexPubkey("234dc63fe4d131212b38236c4c3411288d7bec61cbf7b120ff12c43dc60c96182882f4291d209db66f8a38e986c9c010ff59231a67f9515c7d1668b86b221a47"),
net.IP{127, 0, 0, 3},
30303,
30303,
),
pong: time.Now().Add(-1 * time.Second),
},
{
node: NewV4(
hexPubkey("c013a50b4d1ebce5c377d8af8cb7114fd933ffc9627f96ad56d90fef5b7253ec736fd07ef9a81dc2955a997e54b7bf50afd0aa9f110595e2bec5bb7ce1657004"),
net.IP{127, 0, 0, 3},
30303,
30303,
),
pong: time.Now().Add(-2 * time.Second),
},
{
node: NewV4(
hexPubkey("f141087e3e08af1aeec261ff75f48b5b1637f594ea9ad670e50051646b0416daa3b134c28788cbe98af26992a47652889cd8577ccc108ac02c6a664db2dc1283"),
net.IP{127, 0, 0, 3},
30303,
30303,
),
pong: time.Now().Add(-2 * time.Second),
},
}
func TestDBSeedQuery(t *testing.T) {
// Querying seeds uses seeks an might not find all nodes
// every time when the database is small. Run the test multiple
// times to avoid flakes.
const attempts = 15
var err error
for i := 0; i < attempts; i++ {
if err = testSeedQuery(); err == nil {
return
}
}
if err != nil {
t.Errorf("no successful run in %d attempts: %v", attempts, err)
}
}
func testSeedQuery() error {
db, _ := OpenDB("")
defer db.Close()
// Insert a batch of nodes for querying
for i, seed := range nodeDBSeedQueryNodes {
if err := db.UpdateNode(seed.node); err != nil {
return fmt.Errorf("node %d: failed to insert: %v", i, err)
}
if err := db.UpdateLastPongReceived(seed.node.ID(), seed.node.IP(), seed.pong); err != nil {
return fmt.Errorf("node %d: failed to insert bondTime: %v", i, err)
}
}
// Retrieve the entire batch and check for duplicates
seeds := db.QuerySeeds(len(nodeDBSeedQueryNodes)*2, time.Hour)
have := make(map[ID]struct{})
for _, seed := range seeds {
have[seed.ID()] = struct{}{}
}
want := make(map[ID]struct{})
for _, seed := range nodeDBSeedQueryNodes[1:] {
want[seed.node.ID()] = struct{}{}
}
if len(seeds) != len(want) {
return fmt.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(want))
}
for id := range have {
if _, ok := want[id]; !ok {
return fmt.Errorf("extra seed: %v", id)
}
}
for id := range want {
if _, ok := have[id]; !ok {
return fmt.Errorf("missing seed: %v", id)
}
}
return nil
}
func TestDBPersistency(t *testing.T) {
root := t.TempDir()
var (
testKey = []byte("somekey")
testInt = int64(314)
)
// Create a persistent database and store some values
db, err := OpenDB(filepath.Join(root, "database"))
if err != nil {
t.Fatalf("failed to create persistent database: %v", err)
}
if err := db.storeInt64(testKey, testInt); err != nil {
t.Fatalf("failed to store value: %v.", err)
}
db.Close()
// Reopen the database and check the value
db, err = OpenDB(filepath.Join(root, "database"))
if err != nil {
t.Fatalf("failed to open persistent database: %v", err)
}
if val := db.fetchInt64(testKey); val != testInt {
t.Fatalf("value mismatch: have %v, want %v", val, testInt)
}
db.Close()
}
var nodeDBExpirationNodes = []struct {
node *Node
pong time.Time
storeNode bool
exp bool
}{
// Node has new enough pong time and isn't expired:
{
node: NewV4(
hexPubkey("8d110e2ed4b446d9b5fb50f117e5f37fb7597af455e1dab0e6f045a6eeaa786a6781141659020d38bdc5e698ed3d4d2bafa8b5061810dfa63e8ac038db2e9b67"),
net.IP{127, 0, 0, 1},
30303,
30303,
),
storeNode: true,
pong: time.Now().Add(-dbNodeExpiration + time.Minute),
exp: false,
},
// Node with pong time before expiration is removed:
{
node: NewV4(
hexPubkey("913a205579c32425b220dfba999d215066e5bdbf900226b11da1907eae5e93eb40616d47412cf819664e9eacbdfcca6b0c6e07e09847a38472d4be46ab0c3672"),
net.IP{127, 0, 0, 2},
30303,
30303,
),
storeNode: true,
pong: time.Now().Add(-dbNodeExpiration - time.Minute),
exp: true,
},
// Just pong time, no node stored:
{
node: NewV4(
hexPubkey("b56670e0b6bad2c5dab9f9fe6f061a16cf78d68b6ae2cfda3144262d08d97ce5f46fd8799b6d1f709b1abe718f2863e224488bd7518e5e3b43809ac9bd1138ca"),
net.IP{127, 0, 0, 3},
30303,
30303,
),
storeNode: false,
pong: time.Now().Add(-dbNodeExpiration - time.Minute),
exp: true,
},
// Node with multiple pong times, all older than expiration.
{
node: NewV4(
hexPubkey("29f619cebfd32c9eab34aec797ed5e3fe15b9b45be95b4df3f5fe6a9ae892f433eb08d7698b2ef3621568b0fb70d57b515ab30d4e72583b798298e0f0a66b9d1"),
net.IP{127, 0, 0, 4},
30303,
30303,
),
storeNode: true,
pong: time.Now().Add(-dbNodeExpiration - time.Minute),
exp: true,
},
{
node: NewV4(
hexPubkey("29f619cebfd32c9eab34aec797ed5e3fe15b9b45be95b4df3f5fe6a9ae892f433eb08d7698b2ef3621568b0fb70d57b515ab30d4e72583b798298e0f0a66b9d1"),
net.IP{127, 0, 0, 5},
30303,
30303,
),
storeNode: false,
pong: time.Now().Add(-dbNodeExpiration - 2*time.Minute),
exp: true,
},
// Node with multiple pong times, one newer, one older than expiration.
{
node: NewV4(
hexPubkey("3b73a9e5f4af6c4701c57c73cc8cfa0f4802840b24c11eba92aac3aef65644a3728b4b2aec8199f6d72bd66be2c65861c773129039bd47daa091ca90a6d4c857"),
net.IP{127, 0, 0, 6},
30303,
30303,
),
storeNode: true,
pong: time.Now().Add(-dbNodeExpiration + time.Minute),
exp: false,
},
{
node: NewV4(
hexPubkey("3b73a9e5f4af6c4701c57c73cc8cfa0f4802840b24c11eba92aac3aef65644a3728b4b2aec8199f6d72bd66be2c65861c773129039bd47daa091ca90a6d4c857"),
net.IP{127, 0, 0, 7},
30303,
30303,
),
storeNode: false,
pong: time.Now().Add(-dbNodeExpiration - time.Minute),
exp: true,
},
}
func TestDBExpiration(t *testing.T) {
db, _ := OpenDB("")
defer db.Close()
// Add all the test nodes and set their last pong time.
for i, seed := range nodeDBExpirationNodes {
if seed.storeNode {
if err := db.UpdateNode(seed.node); err != nil {
t.Fatalf("node %d: failed to insert: %v", i, err)
}
}
if err := db.UpdateLastPongReceived(seed.node.ID(), seed.node.IP(), seed.pong); err != nil {
t.Fatalf("node %d: failed to update bondTime: %v", i, err)
}
}
db.expireNodes()
// Check that expired entries have been removed.
unixZeroTime := time.Unix(0, 0)
for i, seed := range nodeDBExpirationNodes {
node := db.Node(seed.node.ID())
pong := db.LastPongReceived(seed.node.ID(), seed.node.IP())
if seed.exp {
if seed.storeNode && node != nil {
t.Errorf("node %d (%s) shouldn't be present after expiration", i, seed.node.ID().TerminalString())
}
if !pong.Equal(unixZeroTime) {
t.Errorf("pong time %d (%s %v) shouldn't be present after expiration", i, seed.node.ID().TerminalString(), seed.node.IP())
}
} else {
if seed.storeNode && node == nil {
t.Errorf("node %d (%s) should be present after expiration", i, seed.node.ID().TerminalString())
}
if !pong.Equal(seed.pong.Truncate(1 * time.Second)) {
t.Errorf("pong time %d (%s) should be %v after expiration, but is %v", i, seed.node.ID().TerminalString(), seed.pong, pong)
}
}
}
}
// This test checks that expiration works when discovery v5 data is present
// in the database.
func TestDBExpireV5(t *testing.T) {
db, _ := OpenDB("")
defer db.Close()
ip := net.IP{127, 0, 0, 1}
db.UpdateFindFailsV5(ID{}, ip, 4)
db.expireNodes()
}
| p2p/enode/nodedb_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/16701c51697e28986feebd122c6a491e4d9ac0e7 | [
0.9904390573501587,
0.18064437806606293,
0.00016451030387543142,
0.00017558127001393586,
0.3659122586250305
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.