hunk
dict
file
stringlengths
0
11.8M
file_path
stringlengths
2
234
label
int64
0
1
commit_url
stringlengths
74
103
dependency_score
sequencelengths
5
5
{ "id": 9, "code_window": [ "\t\t}\n", "\t\tswitch part {\n", "\t\tcase \"CON\", \"PRN\", \"AUX\", \"NUL\",\n", "\t\t\t\"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\", \"COM6\", \"COM7\", \"COM8\", \"COM9\",\n", "\t\t\t\"LPT1\", \"LPT2\", \"LPT3\", \"LPT4\", \"LPT5\", \"LPT6\", \"LPT7\", \"LPT8\", \"LPT9\":\n", "\t\t\t// These reserved names are not valid.\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tswitch strings.ToUpper(part) {\n" ], "file_path": "lib/fs/util.go", "type": "replace", "edit_start_line_idx": 70 }
// Copyright (C) 2016 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. package fs import ( "errors" "fmt" "os" "path/filepath" "runtime" "strings" ) var errNoHome = errors.New("no home directory found - set $HOME (or the platform equivalent)") func ExpandTilde(path string) (string, error) { if path == "~" { return getHomeDir() } path = filepath.FromSlash(path) if !strings.HasPrefix(path, fmt.Sprintf("~%c", PathSeparator)) { return path, nil } home, err := getHomeDir() if err != nil { return "", err } return filepath.Join(home, path[2:]), nil } func getHomeDir() (string, error) { if runtime.GOOS == "windows" { // Legacy -- we prioritize this for historical reasons, whereas // os.UserHomeDir uses %USERPROFILE% always. home := filepath.Join(os.Getenv("HomeDrive"), os.Getenv("HomePath")) if home != "" { return home, nil } } return os.UserHomeDir() } var windowsDisallowedCharacters = string([]rune{ '<', '>', ':', '"', '|', '?', '*', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, }) func WindowsInvalidFilename(name string) bool { // None of the path components should end in space or period, or be a // reserved name. // (https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file) for _, part := range strings.Split(name, `\`) { if len(part) == 0 { continue } switch part[len(part)-1] { case ' ', '.': // Names ending in space or period are not valid. return true } switch part { case "CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9": // These reserved names are not valid. return true } } // The path must not contain any disallowed characters return strings.ContainsAny(name, windowsDisallowedCharacters) } // IsParent compares paths purely lexicographically, meaning it returns false // if path and parent aren't both absolute or relative. func IsParent(path, parent string) bool { if parent == path { // Twice the same root on windows would not be caught at the end. return false } if filepath.IsAbs(path) != filepath.IsAbs(parent) { return false } if parent == "" || parent == "." { // The empty string is the parent of everything except the empty // string and ".". (Avoids panic in the last step.) return path != "" && path != "." } if parent == "/" { // The root is the parent of everything except itself, which would // not be caught below. return path != "/" } if parent[len(parent)-1] != PathSeparator { parent += string(PathSeparator) } return strings.HasPrefix(path, parent) } func CommonPrefix(first, second string) string { if filepath.IsAbs(first) != filepath.IsAbs(second) { // Whatever return "" } firstParts := strings.Split(filepath.Clean(first), string(PathSeparator)) secondParts := strings.Split(filepath.Clean(second), string(PathSeparator)) isAbs := filepath.IsAbs(first) && filepath.IsAbs(second) count := len(firstParts) if len(secondParts) < len(firstParts) { count = len(secondParts) } common := make([]string, 0, count) for i := 0; i < count; i++ { if firstParts[i] != secondParts[i] { break } common = append(common, firstParts[i]) } if isAbs { if runtime.GOOS == "windows" && isVolumeNameOnly(common) { // Because strings.Split strips out path separators, if we're at the volume name, we end up without a separator // Wedge an empty element to be joined with. common = append(common, "") } else if len(common) == 1 { // If isAbs on non Windows, first element in both first and second is "", hence joining that returns nothing. return string(PathSeparator) } } // This should only be true on Windows when drive letters are different or when paths are relative. // In case of UNC paths we should end up with more than a single element hence joining is fine if len(common) == 0 { return "" } // This has to be strings.Join, because filepath.Join([]string{"", "", "?", "C:", "Audrius"}...) returns garbage result := strings.Join(common, string(PathSeparator)) return filepath.Clean(result) } func isVolumeNameOnly(parts []string) bool { isNormalVolumeName := len(parts) == 1 && strings.HasSuffix(parts[0], ":") isUNCVolumeName := len(parts) == 4 && strings.HasSuffix(parts[3], ":") return isNormalVolumeName || isUNCVolumeName }
lib/fs/util.go
1
https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb
[ 0.9554246068000793, 0.09605734050273895, 0.00016611875616945326, 0.00017474683409091085, 0.2667904198169708 ]
{ "id": 9, "code_window": [ "\t\t}\n", "\t\tswitch part {\n", "\t\tcase \"CON\", \"PRN\", \"AUX\", \"NUL\",\n", "\t\t\t\"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\", \"COM6\", \"COM7\", \"COM8\", \"COM9\",\n", "\t\t\t\"LPT1\", \"LPT2\", \"LPT3\", \"LPT4\", \"LPT5\", \"LPT6\", \"LPT7\", \"LPT8\", \"LPT9\":\n", "\t\t\t// These reserved names are not valid.\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tswitch strings.ToUpper(part) {\n" ], "file_path": "lib/fs/util.go", "type": "replace", "edit_start_line_idx": 70 }
// Copyright (C) 2017 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. // +build !windows package syncthing import ( "os" ) func isSuperUser() bool { return os.Geteuid() == 0 }
lib/syncthing/superuser_unix.go
0
https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb
[ 0.0001742308959364891, 0.00016831251559779048, 0.00016239413525909185, 0.00016831251559779048, 0.0000059183803386986256 ]
{ "id": 9, "code_window": [ "\t\t}\n", "\t\tswitch part {\n", "\t\tcase \"CON\", \"PRN\", \"AUX\", \"NUL\",\n", "\t\t\t\"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\", \"COM6\", \"COM7\", \"COM8\", \"COM9\",\n", "\t\t\t\"LPT1\", \"LPT2\", \"LPT3\", \"LPT4\", \"LPT5\", \"LPT6\", \"LPT7\", \"LPT8\", \"LPT9\":\n", "\t\t\t// These reserved names are not valid.\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tswitch strings.ToUpper(part) {\n" ], "file_path": "lib/fs/util.go", "type": "replace", "edit_start_line_idx": 70 }
.\" Man page generated from reStructuredText. . .TH "SYNCTHING-REST-API" "7" "Sep 17, 2020" "v1" "Syncthing" .SH NAME syncthing-rest-api \- REST API . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .sp Syncthing exposes a REST interface over HTTP on the GUI port. This is used by the GUI (from Javascript) and can be used by other processes wishing to control Syncthing. In most cases both the input and output data is in JSON format. The interface is subject to change. .SH API KEY .sp To use the REST API an API key must be set and used. The API key can be generated in the GUI, or set in the \fBconfiguration/gui/apikey\fP element in the configuration file. To use an API key, set the request header \fBX\-API\-Key\fP to the API key value. For example, \fBcurl \-X POST \-H "X\-API\-Key: abc123" http://localhost:8384/rest/...\fP can be used to invoke with \fBcurl\fP (add \fB\-k\fP flag when using HTTPS with a Syncthing generated or self signed certificate). .SH SYSTEM ENDPOINTS .SS GET /rest/system/browse .sp Returns a list of directories matching the path given by the optional parameter \fBcurrent\fP\&. The path can use \fI\%patterns as described in Go’s filepath package\fP <\fBhttps://golang.org/pkg/path/filepath/#Match\fP>\&. A ‘*’ will always be appended to the given path (e.g. \fB/tmp/\fP matches all its subdirectories). If the option \fBcurrent\fP is not given, filesystem root paths are returned. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ curl \-H "X\-API\-Key: yourkey" localhost:8384/rest/system/browse | json_pp [ "/" ] $ curl \-H "X\-API\-Key: yourkey" localhost:8384/rest/system/browse?current=/var/ | json_pp [ "/var/backups/", "/var/cache/", "/var/lib/", "/var/local/", "/var/lock/", "/var/log/", "/var/mail/", "/var/opt/", "/var/run/", "/var/spool/", "/var/tmp/" ] $ curl \-H "X\-API\-Key: yourkey" localhost:8384/rest/system/browse?current=/var/*o | json_pp [ "/var/local/", "/var/lock/", "/var/log/", "/var/opt/", "/var/spool/" ] .ft P .fi .UNINDENT .UNINDENT .SS GET /rest/system/config .sp Returns the current configuration. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "version": 30, "folders": [ { "id": "GXWxf\-3zgnU", "label": "MyFolder", "filesystemType": "basic", "path": "...", "type": "sendreceive", "devices": [ { "deviceID": "...", "introducedBy": "" } ], "rescanIntervalS": 60, "fsWatcherEnabled": false, "fsWatcherDelayS": 10, "ignorePerms": false, "autoNormalize": true, "minDiskFree": { "value": 1, "unit": "%" }, "versioning": { "type": "simple", "params": { "keep": "5" } }, "copiers": 0, "pullerMaxPendingKiB": 0, "hashers": 0, "order": "random", "ignoreDelete": false, "scanProgressIntervalS": 0, "pullerPauseS": 0, "maxConflicts": 10, "disableSparseFiles": false, "disableTempIndexes": false, "paused": false, "weakHashThresholdPct": 25, "markerName": ".stfolder", "copyOwnershipFromParent": false, "modTimeWindowS": 0 } ], "devices": [ { "deviceID": "...", "name": "Laptop", "addresses": [ "dynamic", "tcp://192.168.1.2:22000" ], "compression": "metadata", "certName": "", "introducer": false, "skipIntroductionRemovals": false, "introducedBy": "", "paused": false, "allowedNetworks": [], "autoAcceptFolders": false, "maxSendKbps": 0, "maxRecvKbps": 0, "ignoredFolders": [], "pendingFolders": [ { "time": "2019\-06\-05T10:21:22+02:00", "id": "cpkn4\-57ysy", "label": "SomeonesFolder" } ], "maxRequestKiB": 0 } ], "gui": { "enabled": true, "address": "127.0.0.1:8384", "user": "Username", "password": "$2a$10$ZFws69T4FlvWwsqeIwL.TOo5zOYqsa/.TxlUnsGYS.j3JvjFTmxo6", "authMode": "static", "useTLS": false, "apiKey": "pGahcht56664QU5eoFQW6szbEG6Ec2Cr", "insecureAdminAccess": false, "theme": "default", "debugging": false, "insecureSkipHostcheck": false, "insecureAllowFrameLoading": false }, "ldap": { "address": "", "bindDN": "", "transport": "plain", "insecureSkipVerify": false }, "options": { "listenAddresses": [ "default" ], "globalAnnounceServers": [ "default" ], "globalAnnounceEnabled": true, "localAnnounceEnabled": true, "localAnnouncePort": 21027, "localAnnounceMCAddr": "[ff12::8384]:21027", "maxSendKbps": 0, "maxRecvKbps": 0, "reconnectionIntervalS": 60, "relaysEnabled": true, "relayReconnectIntervalM": 10, "startBrowser": false, "natEnabled": true, "natLeaseMinutes": 60, "natRenewalMinutes": 30, "natTimeoutSeconds": 10, "urAccepted": \-1, "urSeen": 2, "urUniqueId": "", "urURL": "https://data.syncthing.net/newdata", "urPostInsecurely": false, "urInitialDelayS": 1800, "restartOnWakeup": true, "autoUpgradeIntervalH": 12, "upgradeToPreReleases": false, "keepTemporariesH": 24, "cacheIgnoredFiles": false, "progressUpdateIntervalS": 5, "limitBandwidthInLan": false, "minHomeDiskFree": { "value": 1, "unit": "%" }, "releasesURL": "https://upgrades.syncthing.net/meta.json", "alwaysLocalNets": [], "overwriteRemoteDeviceNamesOnConnect": false, "tempIndexMinBlocks": 10, "unackedNotificationIDs": [], "trafficClass": 0, "defaultFolderPath": "~", "setLowPriority": true, "maxFolderConcurrency": 0, "crURL": "https://crash.syncthing.net/newcrash", "crashReportingEnabled": true, "stunKeepaliveStartS": 180, "stunKeepaliveMinS": 20, "stunServers": [ "default" ], "databaseTuning": "auto", "maxConcurrentIncomingRequestKiB": 0 }, "remoteIgnoredDevices": [], "pendingDevices": [] } .ft P .fi .UNINDENT .UNINDENT .SS GET /rest/system/config/insync .sp Returns whether the config is in sync, i.e. whether the running configuration is the same as that on disk. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "configInSync": true } .ft P .fi .UNINDENT .UNINDENT .SS POST /rest/system/config .sp Post the full contents of the configuration, in the same format as returned by the corresponding GET request. When posting the configuration succeeds, the posted configuration is immediately applied, except for changes that require a restart. Query rest\-config\-insync to check if a restart is required. .sp This endpoint is the main point to control Syncthing, even if the change only concerns a very small part of the config: The usual workflow is to get the config, modify the needed parts and post it again. .SS GET /rest/system/connections .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 Return format changed in 0.13.0. .UNINDENT .UNINDENT .sp Returns the list of configured devices and some metadata associated with them. The list also contains the local device itself as not connected. .sp The connection types are \fBTCP (Client)\fP, \fBTCP (Server)\fP, \fBRelay (Client)\fP and \fBRelay (Server)\fP\&. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "total" : { "paused" : false, "clientVersion" : "", "at" : "2015\-11\-07T17:29:47.691637262+01:00", "connected" : false, "inBytesTotal" : 1479, "type" : "", "outBytesTotal" : 1318, "address" : "" }, "connections" : { "YZJBJFX\-RDBL7WY\-6ZGKJ2D\-4MJB4E7\-ZATSDUY\-LD6Y3L3\-MLFUYWE\-AEMXJAC" : { "connected" : true, "inBytesTotal" : 556, "paused" : false, "at" : "2015\-11\-07T17:29:47.691548971+01:00", "clientVersion" : "v0.12.1", "address" : "127.0.0.1:22002", "type" : "TCP (Client)", "outBytesTotal" : 550 }, "DOVII4U\-SQEEESM\-VZ2CVTC\-CJM4YN5\-QNV7DCU\-5U3ASRL\-YVFG6TH\-W5DV5AA" : { "outBytesTotal" : 0, "type" : "", "address" : "", "at" : "0001\-01\-01T00:00:00Z", "clientVersion" : "", "paused" : false, "inBytesTotal" : 0, "connected" : false }, "UYGDMA4\-TPHOFO5\-2VQYDCC\-7CWX7XW\-INZINQT\-LE4B42N\-4JUZTSM\-IWCSXA4" : { "address" : "", "type" : "", "outBytesTotal" : 0, "connected" : false, "inBytesTotal" : 0, "paused" : false, "at" : "0001\-01\-01T00:00:00Z", "clientVersion" : "" } } } .ft P .fi .UNINDENT .UNINDENT .SS GET /rest/system/debug .sp New in version 0.12.0. .sp Returns the set of debug facilities and which of them are currently enabled. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "enabled": [ "beacon" ], "facilities": { "beacon": "Multicast and broadcast discovery", "config": "Configuration loading and saving", "connections": "Connection handling", "db": "The database layer", "dialer": "Dialing connections", "discover": "Remote device discovery", "events": "Event generation and logging", "http": "REST API", "main": "Main package", "model": "The root hub", "protocol": "The BEP protocol", "relay": "Relay connection handling", "scanner": "File change detection and hashing", "stats": "Persistent device and folder statistics", "sync": "Mutexes", "upgrade": "Binary upgrades", "upnp": "UPnP discovery and port mapping", "versioner": "File versioning" } } .ft P .fi .UNINDENT .UNINDENT .SS POST /rest/system/debug .sp New in version 0.12.0. .sp Enables or disables debugging for specified facilities. Give one or both of \fBenable\fP and \fBdisable\fP query parameters, with comma separated facility names. To disable debugging of the beacon and discovery packages, and enable it for config and db: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ curl \-H X\-API\-Key:abc123 \-X POST \(aqhttp://localhost:8384/rest/system/debug?disable=beacon,discovery&enable=config,db\(aq .ft P .fi .UNINDENT .UNINDENT .SS GET /rest/system/discovery .sp Returns the contents of the local discovery cache. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "LGFPDIT7SKNNJVJZA4FC7QNCRKCE753K72BW5QD2FOZ7FRFEP57Q": [ "192.162.129.11:22000" ] } .ft P .fi .UNINDENT .UNINDENT .SS POST /rest/system/discovery .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 Removed in v0.12.0. .UNINDENT .UNINDENT .sp Post with the query parameters \fBdevice\fP and \fBaddr\fP to add entries to the discovery cache. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C curl \-X POST http://127.0.0.1:8384/rest/system/discovery?device=LGFPDIT7SKNNJVJZA4FC7QNCRKCE753K72BW5QD2FOZ7FRFEP57Q\e&addr=192.162.129.11:22000 # Or with the X\-API\-Key header: curl \-X POST \-\-header "X\-API\-Key: TcE28kVPdtJ8COws1JdM0b2nodj77WeQ" http://127.0.0.1:8384/rest/system/discovery?device=LGFPDIT7SKNNJVJZA4FC7QNCRKCE753K72BW5QD2FOZ7FRFEP57Q\e&addr=192.162.129.11:22000 .ft P .fi .UNINDENT .UNINDENT .SS POST /rest/system/error/clear .sp Post with empty to body to remove all recent errors. .SS GET /rest/system/error .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 Return format changed in 0.12.0. .UNINDENT .UNINDENT .sp Returns the list of recent errors. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "errors": [ { "when": "2014\-09\-18T12:59:26.549953186+02:00", "message": "This is an error string" } ] } .ft P .fi .UNINDENT .UNINDENT .SS POST /rest/system/error .sp Post with an error message in the body (plain text) to register a new error. The new error will be displayed on any active GUI clients. .SS GET /rest/system/log .sp New in version 0.12.0. .sp Returns the list of recent log entries. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "messages": [ { "when": "2014\-09\-18T12:59:26.549953186+02:00", "message": "This is a log entry" } ] } .ft P .fi .UNINDENT .UNINDENT .SS POST /rest/system/pause .sp Pause the given device or all devices. .sp Takes the optional parameter \fBdevice\fP (device ID). When omitted, pauses all devices. Returns status 200 and no content upon success, or status 500 and a plain text error on failure. .SS GET /rest/system/ping .sp Returns a \fB{"ping": "pong"}\fP object. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "ping": "pong" } .ft P .fi .UNINDENT .UNINDENT .SS POST /rest/system/ping .sp Returns a \fB{"ping": "pong"}\fP object. .SS POST /rest/system/reset .sp Post with empty body to erase the current index database and restart Syncthing. With no query parameters, the entire database is erased from disk. By specifying the \fBfolder\fP parameter with a valid folder ID, only information for that folder will be erased: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ curl \-X POST \-H "X\-API\-Key: abc123" http://localhost:8384/rest/system/reset?folder=default .ft P .fi .UNINDENT .UNINDENT .sp \fBCaution\fP: See \fB\-reset\-database\fP for \fB\&.stfolder\fP creation side\-effect and caution regarding mountpoints. .SS POST /rest/system/restart .sp Post with empty body to immediately restart Syncthing. .SS POST /rest/system/resume .sp Resume the given device or all devices. .sp Takes the optional parameter \fBdevice\fP (device ID). When omitted, resumes all devices. Returns status 200 and no content upon success, or status 500 and a plain text error on failure. .SS POST /rest/system/shutdown .sp Post with empty body to cause Syncthing to exit and not restart. .SS GET /rest/system/status .sp Returns information about current system status and resource usage. The CPU percent value has been deprecated from the API and will always report 0. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "alloc": 30618136, "connectionServiceStatus": { "dynamic+https://relays.syncthing.net/endpoint": { "error": null, "lanAddresses": [ "relay://23.92.71.120:443/?id=53STGR7\-YBM6FCX\-PAZ2RHM\-YPY6OEJ\-WYHVZO7\-PCKQRCK\-PZLTP7T\-434XCAD&pingInterval=1m0s&networkTimeout=2m0s&sessionLimitBps=0&globalLimitBps=0&statusAddr=:22070&providedBy=canton7" ], "wanAddresses": [ "relay://23.92.71.120:443/?id=53STGR7\-YBM6FCX\-PAZ2RHM\-YPY6OEJ\-WYHVZO7\-PCKQRCK\-PZLTP7T\-434XCAD&pingInterval=1m0s&networkTimeout=2m0s&sessionLimitBps=0&globalLimitBps=0&statusAddr=:22070&providedBy=canton7" ] }, "tcp://0.0.0.0:22000": { "error": null, "lanAddresses": [ "tcp://0.0.0.0:22000" ], "wanAddresses": [ "tcp://0.0.0.0:22000" ] } }, "cpuPercent": 0, "discoveryEnabled": true, "discoveryErrors": { "global@https://discovery\-v4\-1.syncthing.net/v2/": "500 Internal Server Error", "global@https://discovery\-v4\-2.syncthing.net/v2/": "Post https://discovery\-v4\-2.syncthing.net/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)", "global@https://discovery\-v4\-3.syncthing.net/v2/": "Post https://discovery\-v4\-3.syncthing.net/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)", "global@https://discovery\-v6\-1.syncthing.net/v2/": "Post https://discovery\-v6\-1.syncthing.net/v2/: dial tcp [2001:470:28:4d6::5]:443: connect: no route to host", "global@https://discovery\-v6\-2.syncthing.net/v2/": "Post https://discovery\-v6\-2.syncthing.net/v2/: dial tcp [2604:a880:800:10::182:a001]:443: connect: no route to host", "global@https://discovery\-v6\-3.syncthing.net/v2/": "Post https://discovery\-v6\-3.syncthing.net/v2/: dial tcp [2400:6180:0:d0::d9:d001]:443: connect: no route to host" }, "discoveryMethods": 8, "goroutines": 49, "lastDialStatus": { "tcp://10.20.30.40": { "when": "2019\-05\-16T07:41:23Z", "error": "dial tcp 10.20.30.40:22000: i/o timeout" }, "tcp://172.16.33.3:22000": { "when": "2019\-05\-16T07:40:43Z", "ok": true }, "tcp://83.233.120.221:22000": { "when": "2019\-05\-16T07:41:13Z", "error": "dial tcp 83.233.120.221:22000: connect: connection refused" } }, "myID": "P56IOI7\-MZJNU2Y\-IQGDREY\-DM2MGTI\-MGL3BXN\-PQ6W5BM\-TBBZ4TJ\-XZWICQ2", "pathSeparator": "/", "startTime": "2016\-06\-06T19:41:43.039284753+02:00", "sys": 42092792, "themes": [ "default", "dark" ], "tilde": "/Users/jb", "uptime": 2635 } .ft P .fi .UNINDENT .UNINDENT .sp New in version 1.2.0: The \fBlastDialStatus\fP dictionary contains the last error (or \fBnull\fP for success) for each peer address that Syncthing has attempted to contact. The \fBconnectionServiceStatus\fP entries gained \fB"error": null\fP attributes where previously there would be no \fBerror\fP attribute at all in the success case. .SS GET /rest/system/upgrade .sp Checks for a possible upgrade and returns an object describing the newest version and upgrade possibility. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "latest": "v0.14.47", "majorNewer": false, "newer": true, "running": "v0.14.46" } .ft P .fi .UNINDENT .UNINDENT .SS POST /rest/system/upgrade .sp Perform an upgrade to the newest released version and restart. Does nothing if there is no newer version than currently running. .SS GET /rest/system/version .sp Returns the current Syncthing version information. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "arch": "amd64", "longVersion": "syncthing v0.10.27+3\-gea8c3de (go1.4 darwin\-amd64 default) jb@syno 2015\-03\-16 11:01:29 UTC", "os": "darwin", "version": "v0.10.27+3\-gea8c3de" } .ft P .fi .UNINDENT .UNINDENT .SH DATABASE ENDPOINTS .SS GET /rest/db/browse .sp Returns the directory tree of the global model. Directories are always JSON objects (map/dictionary), and files are always arrays of modification time and size. The first integer is the files modification time, and the second integer is the file size. .sp The call takes one mandatory \fBfolder\fP parameter and two optional parameters. Optional parameter \fBlevels\fP defines how deep within the tree we want to dwell down (0 based, defaults to unlimited depth) Optional parameter \fBprefix\fP defines a prefix within the tree where to start building the structure. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ curl \-s http://localhost:8384/rest/db/browse?folder=default | json_pp { "directory": { "file": ["2015\-04\-20T22:20:45+09:00", 130940928], "subdirectory": { "another file": ["2015\-04\-20T22:20:45+09:00", 130940928] } }, "rootfile": ["2015\-04\-20T22:20:45+09:00", 130940928] } $ curl \-s http://localhost:8384/rest/db/browse?folder=default&levels=0 | json_pp { "directory": {}, "rootfile": ["2015\-04\-20T22:20:45+09:00", 130940928] } $ curl \-s http://localhost:8384/rest/db/browse?folder=default&levels=1 | json_pp { "directory": { "file": ["2015\-04\-20T22:20:45+09:00", 130940928], "subdirectory": {} }, "rootfile": ["2015\-04\-20T22:20:45+09:00", 130940928] } $ curl \-s http://localhost:8384/rest/db/browse?folder=default&prefix=directory/subdirectory | json_pp { "another file": ["2015\-04\-20T22:20:45+09:00", 130940928] } $ curl \-s http://localhost:8384/rest/db/browse?folder=default&prefix=directory&levels=0 | json_pp { "file": ["2015\-04\-20T22:20:45+09:00", 130940928], "subdirectory": {} } .ft P .fi .UNINDENT .UNINDENT .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 This is an expensive call, increasing CPU and RAM usage on the device. Use sparingly. .UNINDENT .UNINDENT .SS GET /rest/db/completion .sp Returns the completion percentage (0 to 100) and byte / item counts. Takes optional \fBdevice\fP and \fBfolder\fP parameters: .INDENT 0.0 .IP \(bu 2 \fBfolder\fP specifies the folder ID to calculate completion for. An empty or absent \fBfolder\fP parameter means all folders as an aggregate. .IP \(bu 2 \fBdevice\fP specifies the device ID to calculate completion for. An empty or absent \fBdevice\fP parameter means the local device. .UNINDENT .sp If a device is specified but no folder, completion is calculated for all folders shared with that device. .SS Example Queries .sp Completion status for folder \fBabcd\-1234\fP on device \fBI6KAH76\-...\-3PSROAU\fP: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C /rest/db/completion?folder=abcd\-1234&device=I6KAH76\-...\-3PSROAU .ft P .fi .UNINDENT .UNINDENT .sp Aggregated completion status for device \fBI6KAH76\-...\-3PSROAU\fP (all folders shared with them): .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C /rest/db/completion?device=I6KAH76\-...\-3PSROAU .ft P .fi .UNINDENT .UNINDENT .sp Completion status for folder \fBabcd\-1234\fP on the local device: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C /rest/db/completion?folder=abcd\-1234 .ft P .fi .UNINDENT .UNINDENT .sp Aggregated completion status for all folders on the local device: .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C /rest/db/completion .ft P .fi .UNINDENT .UNINDENT .SS Example Response .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "completion": 99.9937565835, "globalBytes": 156793013575, "needBytes": 9789241, "globalItems": 7823, "needItems": 412, "needDeletes": 0 } .ft P .fi .UNINDENT .UNINDENT .sp New in version 1.8.0: The ability to aggregate multiple folders by leaving out the folder ID. Querying data for the local device by leaving out the device ID. Returning the \fBglobalItems\fP counter in the response. .SS GET /rest/db/file .sp Returns most data available about a given file, including version and availability. Takes \fBfolder\fP and \fBfile\fP parameters. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "availability": [ { "id": "ITZRNXE\-YNROGBZ\-HXTH5P7\-VK5NYE5\-QHRQGE2\-7JQ6VNJ\-KZUEDIU\-5PPR5AM", "fromTemporary": false } ], "global": { "deleted": false, "ignored": false, "invalid": false, "localFlags": 0, "modified": "2018\-08\-18T12:21:13.836784059+02:00", "modifiedBy": "SYNO4VL", "mustRescan": false, "name": "testfile", "noPermissions": false, "numBlocks": 1, "permissions": "0755", "sequence": 107499, "size": 1234, "type": 0, "version": [ "SYNO4VL:1" ] }, "local": { "deleted": false, "ignored": false, "invalid": false, "localFlags": 0, "modified": "2018\-08\-18T12:21:13.836784059+02:00", "modifiedBy": "SYNO4VL", "mustRescan": false, "name": "testfile", "noPermissions": false, "numBlocks": 1, "permissions": "0755", "sequence": 111038, "size": 1234, "type": 0, "version": [ "SYNO4VL:1" ] } } .ft P .fi .UNINDENT .UNINDENT .SS GET /rest/db/ignores .sp Takes one parameter, \fBfolder\fP, and returns the content of the \fB\&.stignore\fP as the \fBignore\fP field. A second field, \fBexpanded\fP, provides a list of strings which represent globbing patterns described by gobwas/glob (based on standard wildcards) that match the patterns in \fB\&.stignore\fP and all the includes. If appropriate these globs are prepended by the following modifiers: \fB!\fP to negate the glob, \fB(?i)\fP to do case insensitive matching and \fB(?d)\fP to enable removing of ignored files in an otherwise empty directory. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "ignore": [ "(?i)/Backups" ], "expanded": [ "(?i)Backups", "(?i)Backups/**" ] } .ft P .fi .UNINDENT .UNINDENT .SS POST /rest/db/ignores .sp Expects a format similar to the output of \fBGET\fP call, but only containing the \fBignore\fP field (\fBexpanded\fP field should be omitted). It takes one parameter, \fBfolder\fP, and either updates the content of the \fB\&.stignore\fP echoing it back as a response, or returns an error. .SS GET /rest/db/need .sp Takes one mandatory parameter, \fBfolder\fP, and returns lists of files which are needed by this device in order for it to become in sync. .sp Furthermore takes an optional \fBpage\fP and \fBperpage\fP arguments for pagination. Pagination happens, across the union of all needed files, that is \- across all 3 sections of the response. For example, given the current need state is as follows: .INDENT 0.0 .IP 1. 3 \fBprogress\fP has 15 items .IP 2. 3 \fBqueued\fP has 3 items .IP 3. 3 \fBrest\fP has 12 items .UNINDENT .sp If you issue a query with \fBpage=1\fP and \fBperpage=10\fP, only the \fBprogress\fP section in the response will have 10 items. If you issue a request query with \fBpage=2\fP and \fBperpage=10\fP, \fBprogress\fP section will have the last 5 items, \fBqueued\fP section will have all 3 items, and \fBrest\fP section will have first 2 items. If you issue a query for \fBpage=3\fP and \fBperpage=10\fP, you will only have the last 10 items of the \fBrest\fP section. .sp In all these calls, \fBtotal\fP will be 30 to indicate the total number of available items. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { # Files currently being downloaded "progress": [ { "flags": "0755", "sequence": 6, "modified": "2015\-04\-20T23:06:12+09:00", "name": "ls", "size": 34640, "version": [ "5157751870738175669:1" ] } ], # Files queued to be downloaded next (as per array order) "queued": [ ... ], # Files to be downloaded after all queued files will be downloaded. # This happens when we start downloading files, and new files get added while we are downloading. "rest": [ ... ], "page": 1, "perpage": 100, "total": 2000 } .ft P .fi .UNINDENT .UNINDENT .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 This is an expensive call, increasing CPU and RAM usage on the device. Use sparingly. .UNINDENT .UNINDENT .SS POST /rest/db/override .sp Request override of a send only folder. Override means to make the local version latest, overriding changes made on other devices. This API call does nothing if the folder is not a send only folder. .sp Takes the mandatory parameter \fIfolder\fP (folder ID). .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C curl \-X POST \-H X\-API\-key:... http://127.0.0.1:8384/rest/db/override?folder=default .ft P .fi .UNINDENT .UNINDENT .SS POST /rest/db/prio .sp Moves the file to the top of the download queue. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C curl \-X POST http://127.0.0.1:8384/rest/db/prio?folder=default&file=foo/bar .ft P .fi .UNINDENT .UNINDENT .sp Response contains the same output as \fBGET /rest/db/need\fP .SS POST /rest/db/revert .sp New in version 0.14.50. .sp Request revert of a receive only folder. Reverting a folder means to undo all local changes. This API call does nothing if the folder is not a receive only folder. .sp Takes the mandatory parameter \fIfolder\fP (folder ID). .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C curl \-X POST \-H X\-API\-Key:... http://127.0.0.1:8384/rest/db/revert?folder=default .ft P .fi .UNINDENT .UNINDENT .SS POST /rest/db/scan .sp Request immediate scan. Takes the optional parameters \fBfolder\fP (folder ID), \fBsub\fP (path relative to the folder root) and \fBnext\fP (time in seconds). If \fBfolder\fP is omitted or empty all folders are scanned. If \fBsub\fP is given, only this path (and children, in case it’s a directory) is scanned. The \fBnext\fP argument delays Syncthing’s automated rescan interval for a given amount of seconds. .sp Requesting scan of a path that no longer exists, but previously did, is valid and will result in Syncthing noticing the deletion of the path in question. .sp Returns status 200 and no content upon success, or status 500 and a plain text error if an error occurred during scanning. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C curl \-X POST http://127.0.0.1:8384/rest/db/scan?folder=default&sub=foo/bar .ft P .fi .UNINDENT .UNINDENT .SS GET /rest/db/status .sp Returns information about the current status of a folder. .sp Parameters: \fBfolder\fP, the ID of a folder. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "globalBytes": 0, "globalDeleted": 0, "globalDirectories": 0, "globalFiles": 0, "globalSymlinks": 0, "globalTotalItems": 0, "ignorePatterns": false, "inSyncBytes": 0, "inSyncFiles": 0, "invalid": "", "localBytes": 0, "localDeleted": 0, "localDirectories": 0, "localFiles": 0, "localSymlinks": 0, "localTotalItems": 0, "needBytes": 0, "needDeletes": 0, "needDirectories": 0, "needFiles": 0, "needSymlinks": 0, "needTotalItems": 0, "pullErrors": 0, "receiveOnlyChangedBytes": 0, "receiveOnlyChangedDeletes": 0, "receiveOnlyChangedDirectories": 0, "receiveOnlyChangedFiles": 0, "receiveOnlyChangedSymlinks": 0, "receiveOnlyTotalItems": 0, "sequence": 0, "state": "idle", "stateChanged": "2018\-08\-08T07:04:57.301064781+02:00", "version": 0 } .ft P .fi .UNINDENT .UNINDENT .sp The various fields have the following meaning: .INDENT 0.0 .TP .B global*: Data in the cluster latest version. .TP .B inSync*: Data that is locally the same as the cluster latest version. .TP .B local*: Data that is locally present, regardless of whether it’s the same or different version as the cluster latest version. .TP .B need*: Data that is needed to become up to date with the cluster latest version (i.e., data that is out of sync). .TP .B receiveOnlyChanged*: Data that has changed locally in a receive only folder, and thus not been sent to the cluster. .TP .B invalid: Deprecated, always empty. .TP .B pullErrors: The number of files that failed to sync during the last sync operations. .TP .B sequence: The current folder sequence number. .TP .B state: The current folder state. .TP .B stateChanged: When the folder state last changed. .TP .B version: Deprecated, equivalent to the sequence number. .UNINDENT .sp \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 This is an expensive call, increasing CPU and RAM usage on the device. Use sparingly. .UNINDENT .UNINDENT .SH EVENT ENDPOINTS .SS GET /rest/events .sp To receive events, perform a HTTP GET of \fB/rest/events\fP\&. .sp To filter the event list, in effect creating a specific subscription for only the desired event types, add a parameter \fBevents=EventTypeA,EventTypeB,...\fP where the event types are any of the event\-types\&. .sp The optional parameter \fBsince=<lastSeenID>\fP sets the ID of the last event you’ve already seen. Syncthing returns a JSON encoded array of event objects, starting at the event just after the one with this last seen ID. The default value is 0, which returns all events. There is a limit to the number of events buffered, so if the rate of events is high or the time between polling calls is long some events might be missed. This can be detected by noting a discontinuity in the event IDs. .sp If no new events are produced since \fB<lastSeenID>\fP, the HTTP call blocks and waits for new events to happen before returning. By default it times out after 60 seconds returning an empty array. The time out duration can be customized with the optional parameter \fBtimeout=<seconds>\fP\&. .sp To receive only a limited number of events, add the \fBlimit=<n>\fP parameter with a suitable value for \fBn\fP and only the \fIlast\fP \fBn\fP events will be returned. This can be used to catch up with the latest event ID after a disconnection for example: \fB/rest/events?since=0&limit=1\fP\&. .SH STATISTICS ENDPOINTS .SS GET /rest/stats/device .sp Returns general statistics about devices. Currently, only contains the time the device was last seen. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ curl \-s http://localhost:8384/rest/stats/device | json { "P56IOI7\-MZJNU2Y\-IQGDREY\-DM2MGTI\-MGL3BXN\-PQ6W5BM\-TBBZ4TJ\-XZWICQ2": { "lastSeen" : "2015\-04\-18T11:21:31.3256277+01:00" } } .ft P .fi .UNINDENT .UNINDENT .SS GET /rest/stats/folder .sp Returns general statistics about folders. Currently contains the last scan time and the last synced file. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ curl \-s http://localhost:8384/rest/stats/folder | json { "folderid" : { "lastScan": "2016\-06\-02T13:28:01.288181412\-04:00", "lastFile" : { "filename" : "file/name", "at" : "2015\-04\-16T22:04:18.3066971+01:00" } } } .ft P .fi .UNINDENT .UNINDENT .SH MISC SERVICES ENDPOINTS .SS GET /rest/svc/deviceid .sp Verifies and formats a device ID. Accepts all currently valid formats (52 or 56 characters with or without separators, upper or lower case, with trivial substitutions). Takes one parameter, \fBid\fP, and returns either a valid device ID in modern format, or an error. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C $ curl \-s http://localhost:8384/rest/svc/deviceid?id=1234 | json { "error": "device ID invalid: incorrect length" } $ curl \-s http://localhost:8384/rest/svc/deviceid?id=p56ioi7m\-\-zjnu2iq\-gdr\-eydm\-2mgtmgl3bxnpq6w5btbbz4tjxzwicq | json { "id": "P56IOI7\-MZJNU2Y\-IQGDREY\-DM2MGTI\-MGL3BXN\-PQ6W5BM\-TBBZ4TJ\-XZWICQ2" } .ft P .fi .UNINDENT .UNINDENT .SS GET /rest/svc/lang .sp Returns a list of canonicalized localization codes, as picked up from the \fBAccept\-Language\fP header sent by the browser. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C ["sv_sv","sv","en_us","en"] .ft P .fi .UNINDENT .UNINDENT .SS GET /rest/svc/random/string .sp Returns a strong random generated string (alphanumeric) of the specified length. Takes the \fBlength\fP parameter. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "random": "FdPaEaZQ56sXEKYNxpgF" } .ft P .fi .UNINDENT .UNINDENT .SS GET /rest/svc/report .sp Returns the data sent in the anonymous usage report. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C { "folderMaxMiB" : 0, "platform" : "linux\-amd64", "totMiB" : 0, "longVersion" : "syncthing v0.12.2 \e"Beryllium Bedbug\e" (go1.4.3 linux\-amd64 default) unknown\[email protected] 2015\-11\-09 13:23:26 UTC", "upgradeAllowedManual" : true, "totFiles" : 3, "folderUses" : { "ignorePerms" : 0, "autoNormalize" : 0, "sendonly" : 0, "ignoreDelete" : 0 }, "memoryUsageMiB" : 13, "version" : "v0.12.2", "sha256Perf" : 27.28, "numFolders" : 2, "memorySize" : 1992, "announce" : { "defaultServersIP" : 0, "otherServers" : 0, "globalEnabled" : false, "defaultServersDNS" : 1, "localEnabled" : false }, "usesRateLimit" : false, "numCPU" : 2, "uniqueID" : "", "urVersion" : 2, "rescanIntvs" : [ 60, 60 ], "numDevices" : 2, "folderMaxFiles" : 3, "relays" : { "defaultServers" : 1, "enabled" : true, "otherServers" : 0 }, "deviceUses" : { "compressMetadata" : 1, "customCertName" : 0, "staticAddr" : 1, "compressAlways" : 0, "compressNever" : 1, "introducer" : 0, "dynamicAddr" : 1 }, "upgradeAllowedAuto" : false } .ft P .fi .UNINDENT .UNINDENT .SH AUTHOR The Syncthing Authors .SH COPYRIGHT 2014-2019, The Syncthing Authors .\" Generated by docutils manpage writer. .
man/syncthing-rest-api.7
0
https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb
[ 0.0001807703956728801, 0.00016776971460785717, 0.00015913085371721536, 0.00016852769476827234, 0.000004126596650166903 ]
{ "id": 9, "code_window": [ "\t\t}\n", "\t\tswitch part {\n", "\t\tcase \"CON\", \"PRN\", \"AUX\", \"NUL\",\n", "\t\t\t\"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\", \"COM6\", \"COM7\", \"COM8\", \"COM9\",\n", "\t\t\t\"LPT1\", \"LPT2\", \"LPT3\", \"LPT4\", \"LPT5\", \"LPT6\", \"LPT7\", \"LPT8\", \"LPT9\":\n", "\t\t\t// These reserved names are not valid.\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tswitch strings.ToUpper(part) {\n" ], "file_path": "lib/fs/util.go", "type": "replace", "edit_start_line_idx": 70 }
// Copyright (C) 2014 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. // +build ignore package main import ( "encoding/json" "log" "os" "path/filepath" "regexp" "strings" "golang.org/x/net/html" ) var trans = make(map[string]string) var attrRe = regexp.MustCompile(`\{\{'([^']+)'\s+\|\s+translate\}\}`) var attrReCond = regexp.MustCompile(`\{\{.+\s+\?\s+'([^']+)'\s+:\s+'([^']+)'\s+\|\s+translate\}\}`) // exceptions to the untranslated text warning var noStringRe = regexp.MustCompile( `^((\W*\{\{.*?\}\} ?.?\/?.?(bps)?\W*)+(\.stignore)?|[^a-zA-Z]+.?[^a-zA-Z]*|[kMGT]?B|Twitter|JS\W?|DEV|https?://\S+)$`) // exceptions to the untranslated text warning specific to aboutModalView.html var aboutRe = regexp.MustCompile(`^([^/]+/[^/]+|(The Go Pro|Font Awesome ).+|Build \{\{.+\}\}|Copyright .+ the Syncthing Authors\.)$`) func generalNode(n *html.Node, filename string) { translate := false if n.Type == html.ElementNode { if n.Data == "translate" { // for <translate>Text</translate> translate = true } else if n.Data == "style" { return } else { for _, a := range n.Attr { if a.Key == "translate" { translate = true } else if a.Key == "id" && (a.Val == "contributor-list" || a.Val == "copyright-notices") { // Don't translate a list of names and // copyright notices of other projects return } else { for _, matches := range attrRe.FindAllStringSubmatch(a.Val, -1) { translation(matches[1]) } for _, matches := range attrReCond.FindAllStringSubmatch(a.Val, -1) { translation(matches[1]) translation(matches[2]) } if a.Key == "data-content" && !noStringRe.MatchString(a.Val) { log.Println("Untranslated data-content string (" + filename + "):") log.Print("\t" + a.Val) } } } } } else if n.Type == html.TextNode { v := strings.TrimSpace(n.Data) if len(v) > 1 && !noStringRe.MatchString(v) && !(filename == "aboutModalView.html" && aboutRe.MatchString(v)) && !(filename == "logbar.html" && (v == "warn" || v == "errors")) { log.Println("Untranslated text node (" + filename + "):") log.Print("\t" + v) } } for c := n.FirstChild; c != nil; c = c.NextSibling { if translate { inTranslate(c, filename) } else { generalNode(c, filename) } } } func inTranslate(n *html.Node, filename string) { if n.Type == html.TextNode { translation(n.Data) } else { log.Println("translate node with non-text child < (" + filename + ")") log.Println(n) } if n.FirstChild != nil { log.Println("translate node has children (" + filename + "):") log.Println(n.Data) } } func translation(v string) { v = strings.TrimSpace(v) if _, ok := trans[v]; !ok { av := strings.Replace(v, "{%", "{{", -1) av = strings.Replace(av, "%}", "}}", -1) trans[v] = av } } func walkerFor(basePath string) filepath.WalkFunc { return func(name string, info os.FileInfo, err error) error { if err != nil { return err } if filepath.Ext(name) == ".html" && info.Mode().IsRegular() { fd, err := os.Open(name) if err != nil { log.Fatal(err) } doc, err := html.Parse(fd) if err != nil { log.Fatal(err) } fd.Close() generalNode(doc, filepath.Base(name)) } return nil } } func main() { fd, err := os.Open(os.Args[1]) if err != nil { log.Fatal(err) } err = json.NewDecoder(fd).Decode(&trans) if err != nil { log.Fatal(err) } fd.Close() var guiDir = os.Args[2] filepath.Walk(guiDir, walkerFor(guiDir)) bs, err := json.MarshalIndent(trans, "", " ") if err != nil { log.Fatal(err) } os.Stdout.Write(bs) os.Stdout.WriteString("\n") }
script/translate.go
0
https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb
[ 0.00022559995704796165, 0.00017301045591011643, 0.0001618413662072271, 0.0001700441789580509, 0.000014444784937950317 ]
{ "id": 10, "code_window": [ "\t\tcase \"CON\", \"PRN\", \"AUX\", \"NUL\",\n", "\t\t\t\"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\", \"COM6\", \"COM7\", \"COM8\", \"COM9\",\n", "\t\t\t\"LPT1\", \"LPT2\", \"LPT3\", \"LPT4\", \"LPT5\", \"LPT6\", \"LPT7\", \"LPT8\", \"LPT9\":\n", "\t\t\t// These reserved names are not valid.\n", "\t\t\treturn true\n", "\t\t}\n", "\t}\n", "\n", "\t// The path must not contain any disallowed characters\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\treturn errInvalidFilenameWindowsReservedName\n" ], "file_path": "lib/fs/util.go", "type": "replace", "edit_start_line_idx": 75 }
// Copyright (C) 2016 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. package fs import ( "context" "errors" "io" "os" "path/filepath" "strings" "time" ) // The Filesystem interface abstracts access to the file system. type Filesystem interface { Chmod(name string, mode FileMode) error Lchown(name string, uid, gid int) error Chtimes(name string, atime time.Time, mtime time.Time) error Create(name string) (File, error) CreateSymlink(target, name string) error DirNames(name string) ([]string, error) Lstat(name string) (FileInfo, error) Mkdir(name string, perm FileMode) error MkdirAll(name string, perm FileMode) error Open(name string) (File, error) OpenFile(name string, flags int, mode FileMode) (File, error) ReadSymlink(name string) (string, error) Remove(name string) error RemoveAll(name string) error Rename(oldname, newname string) error Stat(name string) (FileInfo, error) SymlinksSupported() bool Walk(name string, walkFn WalkFunc) error // If setup fails, returns non-nil error, and if afterwards a fatal (!) // error occurs, sends that error on the channel. Afterwards this watch // can be considered stopped. Watch(path string, ignore Matcher, ctx context.Context, ignorePerms bool) (<-chan Event, <-chan error, error) Hide(name string) error Unhide(name string) error Glob(pattern string) ([]string, error) Roots() ([]string, error) Usage(name string) (Usage, error) Type() FilesystemType URI() string SameFile(fi1, fi2 FileInfo) bool } // The File interface abstracts access to a regular file, being a somewhat // smaller interface than os.File type File interface { io.Closer io.Reader io.ReaderAt io.Seeker io.Writer io.WriterAt Name() string Truncate(size int64) error Stat() (FileInfo, error) Sync() error } // The FileInfo interface is almost the same as os.FileInfo, but with the // Sys method removed (as we don't want to expose whatever is underlying) // and with a couple of convenience methods added. type FileInfo interface { // Standard things present in os.FileInfo Name() string Mode() FileMode Size() int64 ModTime() time.Time IsDir() bool // Extensions IsRegular() bool IsSymlink() bool Owner() int Group() int } // FileMode is similar to os.FileMode type FileMode uint32 func (fm FileMode) String() string { return os.FileMode(fm).String() } // Usage represents filesystem space usage type Usage struct { Free int64 Total int64 } type Matcher interface { ShouldIgnore(name string) bool SkipIgnoredDirs() bool } type MatchResult interface { IsIgnored() bool } type Event struct { Name string Type EventType } type EventType int const ( NonRemove EventType = 1 + iota Remove Mixed // Should probably not be necessary to be used in filesystem interface implementation ) // Merge returns Mixed, except if evType and other are the same and not Mixed. func (evType EventType) Merge(other EventType) EventType { return evType | other } func (evType EventType) String() string { switch { case evType == NonRemove: return "non-remove" case evType == Remove: return "remove" case evType == Mixed: return "mixed" default: panic("bug: Unknown event type") } } var ErrWatchNotSupported = errors.New("watching is not supported") // Equivalents from os package. const ModePerm = FileMode(os.ModePerm) const ModeSetgid = FileMode(os.ModeSetgid) const ModeSetuid = FileMode(os.ModeSetuid) const ModeSticky = FileMode(os.ModeSticky) const ModeSymlink = FileMode(os.ModeSymlink) const ModeType = FileMode(os.ModeType) const PathSeparator = os.PathSeparator const OptAppend = os.O_APPEND const OptCreate = os.O_CREATE const OptExclusive = os.O_EXCL const OptReadOnly = os.O_RDONLY const OptReadWrite = os.O_RDWR const OptSync = os.O_SYNC const OptTruncate = os.O_TRUNC const OptWriteOnly = os.O_WRONLY // SkipDir is used as a return value from WalkFuncs to indicate that // the directory named in the call is to be skipped. It is not returned // as an error by any function. var SkipDir = filepath.SkipDir // IsExist is the equivalent of os.IsExist var IsExist = os.IsExist // IsExist is the equivalent of os.ErrExist var ErrExist = os.ErrExist // IsNotExist is the equivalent of os.IsNotExist var IsNotExist = os.IsNotExist // ErrNotExist is the equivalent of os.ErrNotExist var ErrNotExist = os.ErrNotExist // IsPermission is the equivalent of os.IsPermission var IsPermission = os.IsPermission // IsPathSeparator is the equivalent of os.IsPathSeparator var IsPathSeparator = os.IsPathSeparator type Option func(Filesystem) func NewFilesystem(fsType FilesystemType, uri string, opts ...Option) Filesystem { var fs Filesystem switch fsType { case FilesystemTypeBasic: fs = newBasicFilesystem(uri, opts...) case FilesystemTypeFake: fs = newFakeFilesystem(uri, opts...) default: l.Debugln("Unknown filesystem", fsType, uri) fs = &errorFilesystem{ fsType: fsType, uri: uri, err: errors.New("filesystem with type " + fsType.String() + " does not exist."), } } if l.ShouldDebug("walkfs") { return NewWalkFilesystem(&logFilesystem{fs}) } if l.ShouldDebug("fs") { return &logFilesystem{NewWalkFilesystem(fs)} } return NewWalkFilesystem(fs) } // IsInternal returns true if the file, as a path relative to the folder // root, represents an internal file that should always be ignored. The file // path must be clean (i.e., in canonical shortest form). func IsInternal(file string) bool { // fs cannot import config, so we hard code .stfolder here (config.DefaultMarkerName) internals := []string{".stfolder", ".stignore", ".stversions"} for _, internal := range internals { if file == internal { return true } if IsParent(file, internal) { return true } } return false } // Canonicalize checks that the file path is valid and returns it in the "canonical" form: // - /foo/bar -> foo/bar // - / -> "." func Canonicalize(file string) (string, error) { pathSep := string(PathSeparator) if strings.HasPrefix(file, pathSep+pathSep) { // The relative path may pretend to be an absolute path within // the root, but the double path separator on Windows implies // something else and is out of spec. return "", ErrNotRelative } // The relative path should be clean from internal dotdots and similar // funkyness. file = filepath.Clean(file) // It is not acceptable to attempt to traverse upwards. switch file { case "..": return "", ErrNotRelative } if strings.HasPrefix(file, ".."+pathSep) { return "", ErrNotRelative } if strings.HasPrefix(file, pathSep) { if file == pathSep { return ".", nil } return file[1:], nil } return file, nil } // unwrapFilesystem removes "wrapping" filesystems to expose the underlying filesystem. func unwrapFilesystem(fs Filesystem) Filesystem { for { switch sfs := fs.(type) { case *logFilesystem: fs = sfs.Filesystem case *walkFilesystem: fs = sfs.Filesystem case *MtimeFS: fs = sfs.Filesystem default: return sfs } } }
lib/fs/filesystem.go
1
https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb
[ 0.0030292777810245752, 0.00046250474406406283, 0.00016207269800361246, 0.00017809403652790934, 0.0006686918204650283 ]
{ "id": 10, "code_window": [ "\t\tcase \"CON\", \"PRN\", \"AUX\", \"NUL\",\n", "\t\t\t\"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\", \"COM6\", \"COM7\", \"COM8\", \"COM9\",\n", "\t\t\t\"LPT1\", \"LPT2\", \"LPT3\", \"LPT4\", \"LPT5\", \"LPT6\", \"LPT7\", \"LPT8\", \"LPT9\":\n", "\t\t\t// These reserved names are not valid.\n", "\t\t\treturn true\n", "\t\t}\n", "\t}\n", "\n", "\t// The path must not contain any disallowed characters\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\treturn errInvalidFilenameWindowsReservedName\n" ], "file_path": "lib/fs/util.go", "type": "replace", "edit_start_line_idx": 75 }
// Copyright (C) 2018 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. //go:generate go run ../../../script/genassets.go -o gui.files.go ../gui // Package auto contains auto generated files for web assets. package auto
cmd/strelaypoolsrv/auto/doc.go
0
https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb
[ 0.00017380081408191472, 0.0001709491916699335, 0.00016809756925795227, 0.0001709491916699335, 0.000002851622411981225 ]
{ "id": 10, "code_window": [ "\t\tcase \"CON\", \"PRN\", \"AUX\", \"NUL\",\n", "\t\t\t\"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\", \"COM6\", \"COM7\", \"COM8\", \"COM9\",\n", "\t\t\t\"LPT1\", \"LPT2\", \"LPT3\", \"LPT4\", \"LPT5\", \"LPT6\", \"LPT7\", \"LPT8\", \"LPT9\":\n", "\t\t\t// These reserved names are not valid.\n", "\t\t\treturn true\n", "\t\t}\n", "\t}\n", "\n", "\t// The path must not contain any disallowed characters\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\treturn errInvalidFilenameWindowsReservedName\n" ], "file_path": "lib/fs/util.go", "type": "replace", "edit_start_line_idx": 75 }
-----BEGIN RSA PRIVATE KEY----- MIIG5AIBAAKCAYEArDOcd5ftR7SnalxF1ckU3lDQpgfMIPhFDU//4dvdSSFevrMu VDTbUYhyCfGtg/g+F5TmKhZgE2peYhllITupz5MP7OHGaO2GHf2XnUDD4QUO3E+K VAUw7dyFSwy09esqApVLzH3+ov+QXyyzmRWPsJe9u18BHU1Hob/RmBhS9m2CAJgz N6EJ8KGjApiW3iR8lD/hjVyiIVde8IRD6qYHEJYiPJuziTVcQpCblVYxTz3ScmmT 190/O9UvViIpcOPQdwgOdewPNNMK35c9Edt0AH5flYp6jgrja9NkLQJ3+KOiro6y l9IUS5w87GMxI8qzI8SgCAZZpYSoLbu1FJPvxV4p5eHwuprBCwmFYZWw6Y7rqH0s N52C+3TeObJCMNP9ilPadqRI+G0Q99TCaloeR022x33r/8D8SIn3FP35zrlFM+Dv qlxoS6glbNb/Bj3p9vN0XONORCuynOGe9F/4h/DaNnrbrRWqJOxBsZTsbbcJaKAT fWU/Z9GcC+pUpPRhAgMBAAECggGAL8+Unc/c3Y/W+7zq1tShqqgdhjub/XtxEKUp kngNFITjXWc6cb7LNfQAVap4Vq/R7ZI15XGY80sRMYODhJqgJzXZshdtkyx/lEwY kFyvBgb1fU3IRlO6phAYIiJBDBZi75ysEvbYgEEcwJAUvWgzIQDAeQmDsbMHNG2h r+zw++Kjua6IaeWYcOsv60Safsr6m96wrSMPENrFTVor0TaPt5c3okRIsMvT9ddY mzn3Lt0nVQTjO4f+SoqCPhP2FZXqksfKlZlKlr6BLxXGt6b49OrLSXM5eQXIcIZn ZDRsO24X5z8156qPgM9cA8oNEjuSdnArUTreBOsTwNoSpf24Qadsv/uTZlaHM19V q6zQvkjH3ERcOpixmg48TKdIj8cPYxezvcbNqSbZmdyQuaVlgDbUxwYI8A4IhhWl 6xhwpX3qPDgw/QHIEngFIWfiIfCk11EPY0SN4cGO6f1rLYug8kqxMPuIQ5Jz9Hhx eFSRnr/fWoJcVYG6bMDKn9YWObQBAoHBAM8NahsLbjl8mdT43LH1Od1tDmDch+0Y JM7TgiIN/GM3piZSpGMOFqToLAqvY+Gf3l4sPgNs10cqdPAEpMk8MJ/IXGmbKq38 iVmMaqHTQorCxyUbc54q9AbFU4HKv//F6ZN6K1wSaJt2RBeZpYI+MyBXr5baFiBZ ddXtXlqoEcCFyNR0DhlXrlZPs+cnyM2ZDp++lpn9Wfy+zkv36+NWpAkXVnARjxdF l6M+L7OlurYAWiyJE4uHUjawAM82i5+w8QKBwQDU6RCN6/AMmVrYqPy+7QcnAq67 tPDv25gzVExeMKLBAMoz1TkMS+jIF1NMp3cYg5GbLqvx8Qd27fjFbWe/GPeZvlgL qdQI/T8J60dHAySMeOFOB2QWXhI1kwh0b2X0SDkTgfdJBKGdrKVcLTuLyVE24exu yRc8cXpYwBtVkXNBYFd7XEM+tC4b1khO23OJXHJUen9+hgsmn8/zUjASAoq3+Zly J+OHwwXcDcTFLeok3kX3A9NuqIV/Fa9DOGYlenECgcEAvO1onDTZ5uqjE4nhFyDE JB+WtxuDi/wz2eV1IM3SNlZY7S8LgLciQmb3iOhxIzdVGGkWTNnLtcwv17LlCho5 5BJXAKXtU8TTLzrJMdArL6J7RIi//tsCwAreH9h5SVG1yDP5zJGfkftgNoikVSuc Sy63sdZdyjbXJtTo+5/QUvPARNuA4e73zRn89jd/Kts2VNz7XpemvND+PKOEQnSU SRdab/gVsQ53RyU/MZVPwTKhFXIeu3pGsk/27RzAWn6BAoHBAMIRYwaKDffd/SHJ /v+lHEThvBXa21c26ae36hhc6q1UI/tVGrfrpVZldIdFilgs7RbvVsmksvIj/gMv M0bL4j0gdC7FcUF0XPaUoBbJdZIZSP0P3ZpJyv1MdYN0WxFsl6IBcD79WrdXPC8m B8XmDgIhsppU77onkaa+DOxVNSJdR8BpG95W7ERxcN14SPrm6ku4kOfqFNXzC+C1 hJ2V9Y22lLiqRUplaLzpS/eTX36VoF6E/T87mtt5D5UNHoaA8QKBwH5sRqZXoatU X+vw1MHU5eptMwG7LXR0gw2xmvG3cCN4hbnnBp5YaXlWPiIMmaWhpvschgBIo1TP qGWUpMEETGES18NenLBym+tWIXlfuyZH3B4NUi4kItiZaKb09LzmTjFvzdfQzun4 HzIeigTNBDHdS0rdicNIn83QLZ4pJaOZJHq79+mFYkp+9It7UUoWsws6DGl/qX8o 0cj4NmJB6QiJa1QCzrGkaajbtThbFoQal9Twk2h3jHgJzX3FbwCpLw== -----END RSA PRIVATE KEY-----
test/h1/key.pem
0
https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb
[ 0.004514969885349274, 0.0026121593546122313, 0.0010352910030633211, 0.0024491886142641306, 0.001384867588058114 ]
{ "id": 10, "code_window": [ "\t\tcase \"CON\", \"PRN\", \"AUX\", \"NUL\",\n", "\t\t\t\"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\", \"COM6\", \"COM7\", \"COM8\", \"COM9\",\n", "\t\t\t\"LPT1\", \"LPT2\", \"LPT3\", \"LPT4\", \"LPT5\", \"LPT6\", \"LPT7\", \"LPT8\", \"LPT9\":\n", "\t\t\t// These reserved names are not valid.\n", "\t\t\treturn true\n", "\t\t}\n", "\t}\n", "\n", "\t// The path must not contain any disallowed characters\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\treturn errInvalidFilenameWindowsReservedName\n" ], "file_path": "lib/fs/util.go", "type": "replace", "edit_start_line_idx": 75 }
// Copyright (C) 2019 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. package fs import ( "syscall" "github.com/syncthing/syncthing/lib/sync" ) var ( copyRangeMethods = make(map[CopyRangeMethod]copyRangeImplementation) mut = sync.NewMutex() ) type copyRangeImplementation func(src, dst File, srcOffset, dstOffset, size int64) error func registerCopyRangeImplementation(copyMethod CopyRangeMethod, impl copyRangeImplementation) { mut.Lock() defer mut.Unlock() l.Debugln("Registering " + copyMethod.String() + " copyRange method") copyRangeMethods[copyMethod] = impl } // CopyRange tries to use the specified method to copy data between two files. // Takes size bytes at offset srcOffset from the source file, and copies the data to destination file at offset // dstOffset. If required, adjusts the size of the destination file to fit that much data. // // On Linux/BSD you can ask it to use ioctl and copy_file_range system calls, which if the underlying filesystem supports // it tries referencing existing data in the source file, instead of making a copy and taking up additional space. // // CopyRange does its best to have no effect on src and dst file offsets (copy operation should not affect it). func CopyRange(copyMethod CopyRangeMethod, src, dst File, srcOffset, dstOffset, size int64) error { if impl, ok := copyRangeMethods[copyMethod]; ok { return impl(src, dst, srcOffset, dstOffset, size) } return syscall.ENOTSUP }
lib/fs/filesystem_copy_range.go
0
https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb
[ 0.00031618267530575395, 0.00019690848421305418, 0.00016402961045969278, 0.0001665931922616437, 0.00005972829967504367 ]
{ "id": 12, "code_window": [ "\t\t\tl.Debugln(f, \"Handling ignored file\", file)\n", "\t\t\tdbUpdateChan <- dbUpdateJob{file, dbUpdateInvalidate}\n", "\n", "\t\tcase runtime.GOOS == \"windows\" && fs.WindowsInvalidFilename(file.Name):\n", "\t\t\tif file.IsDeleted() {\n", "\t\t\t\t// Just pretend we deleted it, no reason to create an error\n", "\t\t\t\t// about a deleted file that we can't have anyway.\n", "\t\t\t\t// Reason we need it in the first place is, that it was\n", "\t\t\t\t// ignored at some point.\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tcase runtime.GOOS == \"windows\" && fs.WindowsInvalidFilename(file.Name) != nil:\n" ], "file_path": "lib/model/folder_sendrecv.go", "type": "replace", "edit_start_line_idx": 339 }
// Copyright (C) 2016 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. package fs import ( "context" "errors" "io" "os" "path/filepath" "strings" "time" ) // The Filesystem interface abstracts access to the file system. type Filesystem interface { Chmod(name string, mode FileMode) error Lchown(name string, uid, gid int) error Chtimes(name string, atime time.Time, mtime time.Time) error Create(name string) (File, error) CreateSymlink(target, name string) error DirNames(name string) ([]string, error) Lstat(name string) (FileInfo, error) Mkdir(name string, perm FileMode) error MkdirAll(name string, perm FileMode) error Open(name string) (File, error) OpenFile(name string, flags int, mode FileMode) (File, error) ReadSymlink(name string) (string, error) Remove(name string) error RemoveAll(name string) error Rename(oldname, newname string) error Stat(name string) (FileInfo, error) SymlinksSupported() bool Walk(name string, walkFn WalkFunc) error // If setup fails, returns non-nil error, and if afterwards a fatal (!) // error occurs, sends that error on the channel. Afterwards this watch // can be considered stopped. Watch(path string, ignore Matcher, ctx context.Context, ignorePerms bool) (<-chan Event, <-chan error, error) Hide(name string) error Unhide(name string) error Glob(pattern string) ([]string, error) Roots() ([]string, error) Usage(name string) (Usage, error) Type() FilesystemType URI() string SameFile(fi1, fi2 FileInfo) bool } // The File interface abstracts access to a regular file, being a somewhat // smaller interface than os.File type File interface { io.Closer io.Reader io.ReaderAt io.Seeker io.Writer io.WriterAt Name() string Truncate(size int64) error Stat() (FileInfo, error) Sync() error } // The FileInfo interface is almost the same as os.FileInfo, but with the // Sys method removed (as we don't want to expose whatever is underlying) // and with a couple of convenience methods added. type FileInfo interface { // Standard things present in os.FileInfo Name() string Mode() FileMode Size() int64 ModTime() time.Time IsDir() bool // Extensions IsRegular() bool IsSymlink() bool Owner() int Group() int } // FileMode is similar to os.FileMode type FileMode uint32 func (fm FileMode) String() string { return os.FileMode(fm).String() } // Usage represents filesystem space usage type Usage struct { Free int64 Total int64 } type Matcher interface { ShouldIgnore(name string) bool SkipIgnoredDirs() bool } type MatchResult interface { IsIgnored() bool } type Event struct { Name string Type EventType } type EventType int const ( NonRemove EventType = 1 + iota Remove Mixed // Should probably not be necessary to be used in filesystem interface implementation ) // Merge returns Mixed, except if evType and other are the same and not Mixed. func (evType EventType) Merge(other EventType) EventType { return evType | other } func (evType EventType) String() string { switch { case evType == NonRemove: return "non-remove" case evType == Remove: return "remove" case evType == Mixed: return "mixed" default: panic("bug: Unknown event type") } } var ErrWatchNotSupported = errors.New("watching is not supported") // Equivalents from os package. const ModePerm = FileMode(os.ModePerm) const ModeSetgid = FileMode(os.ModeSetgid) const ModeSetuid = FileMode(os.ModeSetuid) const ModeSticky = FileMode(os.ModeSticky) const ModeSymlink = FileMode(os.ModeSymlink) const ModeType = FileMode(os.ModeType) const PathSeparator = os.PathSeparator const OptAppend = os.O_APPEND const OptCreate = os.O_CREATE const OptExclusive = os.O_EXCL const OptReadOnly = os.O_RDONLY const OptReadWrite = os.O_RDWR const OptSync = os.O_SYNC const OptTruncate = os.O_TRUNC const OptWriteOnly = os.O_WRONLY // SkipDir is used as a return value from WalkFuncs to indicate that // the directory named in the call is to be skipped. It is not returned // as an error by any function. var SkipDir = filepath.SkipDir // IsExist is the equivalent of os.IsExist var IsExist = os.IsExist // IsExist is the equivalent of os.ErrExist var ErrExist = os.ErrExist // IsNotExist is the equivalent of os.IsNotExist var IsNotExist = os.IsNotExist // ErrNotExist is the equivalent of os.ErrNotExist var ErrNotExist = os.ErrNotExist // IsPermission is the equivalent of os.IsPermission var IsPermission = os.IsPermission // IsPathSeparator is the equivalent of os.IsPathSeparator var IsPathSeparator = os.IsPathSeparator type Option func(Filesystem) func NewFilesystem(fsType FilesystemType, uri string, opts ...Option) Filesystem { var fs Filesystem switch fsType { case FilesystemTypeBasic: fs = newBasicFilesystem(uri, opts...) case FilesystemTypeFake: fs = newFakeFilesystem(uri, opts...) default: l.Debugln("Unknown filesystem", fsType, uri) fs = &errorFilesystem{ fsType: fsType, uri: uri, err: errors.New("filesystem with type " + fsType.String() + " does not exist."), } } if l.ShouldDebug("walkfs") { return NewWalkFilesystem(&logFilesystem{fs}) } if l.ShouldDebug("fs") { return &logFilesystem{NewWalkFilesystem(fs)} } return NewWalkFilesystem(fs) } // IsInternal returns true if the file, as a path relative to the folder // root, represents an internal file that should always be ignored. The file // path must be clean (i.e., in canonical shortest form). func IsInternal(file string) bool { // fs cannot import config, so we hard code .stfolder here (config.DefaultMarkerName) internals := []string{".stfolder", ".stignore", ".stversions"} for _, internal := range internals { if file == internal { return true } if IsParent(file, internal) { return true } } return false } // Canonicalize checks that the file path is valid and returns it in the "canonical" form: // - /foo/bar -> foo/bar // - / -> "." func Canonicalize(file string) (string, error) { pathSep := string(PathSeparator) if strings.HasPrefix(file, pathSep+pathSep) { // The relative path may pretend to be an absolute path within // the root, but the double path separator on Windows implies // something else and is out of spec. return "", ErrNotRelative } // The relative path should be clean from internal dotdots and similar // funkyness. file = filepath.Clean(file) // It is not acceptable to attempt to traverse upwards. switch file { case "..": return "", ErrNotRelative } if strings.HasPrefix(file, ".."+pathSep) { return "", ErrNotRelative } if strings.HasPrefix(file, pathSep) { if file == pathSep { return ".", nil } return file[1:], nil } return file, nil } // unwrapFilesystem removes "wrapping" filesystems to expose the underlying filesystem. func unwrapFilesystem(fs Filesystem) Filesystem { for { switch sfs := fs.(type) { case *logFilesystem: fs = sfs.Filesystem case *walkFilesystem: fs = sfs.Filesystem case *MtimeFS: fs = sfs.Filesystem default: return sfs } } }
lib/fs/filesystem.go
1
https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb
[ 0.004609911702573299, 0.0004453944566193968, 0.00016164164117071778, 0.0001724577887216583, 0.0008549284539185464 ]
{ "id": 12, "code_window": [ "\t\t\tl.Debugln(f, \"Handling ignored file\", file)\n", "\t\t\tdbUpdateChan <- dbUpdateJob{file, dbUpdateInvalidate}\n", "\n", "\t\tcase runtime.GOOS == \"windows\" && fs.WindowsInvalidFilename(file.Name):\n", "\t\t\tif file.IsDeleted() {\n", "\t\t\t\t// Just pretend we deleted it, no reason to create an error\n", "\t\t\t\t// about a deleted file that we can't have anyway.\n", "\t\t\t\t// Reason we need it in the first place is, that it was\n", "\t\t\t\t// ignored at some point.\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tcase runtime.GOOS == \"windows\" && fs.WindowsInvalidFilename(file.Name) != nil:\n" ], "file_path": "lib/model/folder_sendrecv.go", "type": "replace", "edit_start_line_idx": 339 }
// Copyright (C) 2017 The Syncthing Authors. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this file, // You can obtain one at https://mozilla.org/MPL/2.0/. package fs import ( "io/ioutil" "os" "path/filepath" "runtime" "sort" "strings" "testing" "time" "github.com/syncthing/syncthing/lib/rand" ) func setup(t *testing.T) (*BasicFilesystem, string) { t.Helper() dir, err := ioutil.TempDir("", "") if err != nil { t.Fatal(err) } return newBasicFilesystem(dir), dir } func TestChmodFile(t *testing.T) { fs, dir := setup(t) path := filepath.Join(dir, "file") defer os.RemoveAll(dir) defer os.Chmod(path, 0666) fd, err := os.Create(path) if err != nil { t.Error(err) } fd.Close() if err := os.Chmod(path, 0666); err != nil { t.Error(err) } if stat, err := os.Stat(path); err != nil || stat.Mode()&os.ModePerm != 0666 { t.Errorf("wrong perm: %t %#o", err == nil, stat.Mode()&os.ModePerm) } if err := fs.Chmod("file", 0444); err != nil { t.Error(err) } if stat, err := os.Stat(path); err != nil || stat.Mode()&os.ModePerm != 0444 { t.Errorf("wrong perm: %t %#o", err == nil, stat.Mode()&os.ModePerm) } } func TestChownFile(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Not supported on Windows") return } if os.Getuid() != 0 { // We are not root. No expectation of being able to chown. Our tests // typically don't run with CAP_FOWNER. t.Skip("Test not possible") return } fs, dir := setup(t) path := filepath.Join(dir, "file") defer os.RemoveAll(dir) defer os.Chmod(path, 0666) fd, err := os.Create(path) if err != nil { t.Error("Unexpected error:", err) } fd.Close() _, err = fs.Lstat("file") if err != nil { t.Error("Unexpected error:", err) } newUID := 1000 + rand.Intn(30000) newGID := 1000 + rand.Intn(30000) if err := fs.Lchown("file", newUID, newGID); err != nil { t.Error("Unexpected error:", err) } info, err := fs.Lstat("file") if err != nil { t.Error("Unexpected error:", err) } if info.Owner() != newUID { t.Errorf("Incorrect owner, expected %d but got %d", newUID, info.Owner()) } if info.Group() != newGID { t.Errorf("Incorrect group, expected %d but got %d", newGID, info.Group()) } } func TestChmodDir(t *testing.T) { fs, dir := setup(t) path := filepath.Join(dir, "dir") defer os.RemoveAll(dir) mode := os.FileMode(0755) if runtime.GOOS == "windows" { mode = os.FileMode(0777) } defer os.Chmod(path, mode) if err := os.Mkdir(path, mode); err != nil { t.Error(err) } if stat, err := os.Stat(path); err != nil || stat.Mode()&os.ModePerm != mode { t.Errorf("wrong perm: %t %#o", err == nil, stat.Mode()&os.ModePerm) } if err := fs.Chmod("dir", 0555); err != nil { t.Error(err) } if stat, err := os.Stat(path); err != nil || stat.Mode()&os.ModePerm != 0555 { t.Errorf("wrong perm: %t %#o", err == nil, stat.Mode()&os.ModePerm) } } func TestChtimes(t *testing.T) { fs, dir := setup(t) path := filepath.Join(dir, "file") defer os.RemoveAll(dir) fd, err := os.Create(path) if err != nil { t.Error(err) } fd.Close() mtime := time.Now().Add(-time.Hour) fs.Chtimes("file", mtime, mtime) stat, err := os.Stat(path) if err != nil { t.Error(err) } diff := stat.ModTime().Sub(mtime) if diff > 3*time.Second || diff < -3*time.Second { t.Errorf("%s != %s", stat.Mode(), mtime) } } func TestCreate(t *testing.T) { fs, dir := setup(t) path := filepath.Join(dir, "file") defer os.RemoveAll(dir) if _, err := os.Stat(path); err == nil { t.Errorf("exists?") } fd, err := fs.Create("file") if err != nil { t.Error(err) } fd.Close() if _, err := os.Stat(path); err != nil { t.Error(err) } } func TestCreateSymlink(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("windows not supported") } fs, dir := setup(t) path := filepath.Join(dir, "file") defer os.RemoveAll(dir) if err := fs.CreateSymlink("blah", "file"); err != nil { t.Error(err) } if target, err := os.Readlink(path); err != nil || target != "blah" { t.Error("target", target, "err", err) } if err := os.Remove(path); err != nil { t.Error(err) } if err := fs.CreateSymlink(filepath.Join("..", "blah"), "file"); err != nil { t.Error(err) } if target, err := os.Readlink(path); err != nil || target != filepath.Join("..", "blah") { t.Error("target", target, "err", err) } } func TestDirNames(t *testing.T) { fs, dir := setup(t) defer os.RemoveAll(dir) // Case differences testCases := []string{ "a", "bC", } sort.Strings(testCases) for _, sub := range testCases { if err := os.Mkdir(filepath.Join(dir, sub), 0777); err != nil { t.Error(err) } } if dirs, err := fs.DirNames("."); err != nil || len(dirs) != len(testCases) { t.Errorf("%s %s %s", err, dirs, testCases) } else { sort.Strings(dirs) for i := range dirs { if dirs[i] != testCases[i] { t.Errorf("%s != %s", dirs[i], testCases[i]) } } } } func TestNames(t *testing.T) { // Tests that all names are without the root directory. fs, dir := setup(t) defer os.RemoveAll(dir) expected := "file" fd, err := fs.Create(expected) if err != nil { t.Error(err) } defer fd.Close() if fd.Name() != expected { t.Errorf("incorrect %s != %s", fd.Name(), expected) } if stat, err := fd.Stat(); err != nil || stat.Name() != expected { t.Errorf("incorrect %s != %s (%v)", stat.Name(), expected, err) } if err := fs.Mkdir("dir", 0777); err != nil { t.Error(err) } expected = filepath.Join("dir", "file") fd, err = fs.Create(expected) if err != nil { t.Error(err) } defer fd.Close() if fd.Name() != expected { t.Errorf("incorrect %s != %s", fd.Name(), expected) } // os.fd.Stat() returns just base, so do we. if stat, err := fd.Stat(); err != nil || stat.Name() != filepath.Base(expected) { t.Errorf("incorrect %s != %s (%v)", stat.Name(), filepath.Base(expected), err) } } func TestGlob(t *testing.T) { // Tests that all names are without the root directory. fs, dir := setup(t) defer os.RemoveAll(dir) for _, dirToCreate := range []string{ filepath.Join("a", "test", "b"), filepath.Join("a", "best", "b"), filepath.Join("a", "best", "c"), } { if err := fs.MkdirAll(dirToCreate, 0777); err != nil { t.Error(err) } } testCases := []struct { pattern string matches []string }{ { filepath.Join("a", "?est", "?"), []string{ filepath.Join("a", "test", "b"), filepath.Join("a", "best", "b"), filepath.Join("a", "best", "c"), }, }, { filepath.Join("a", "?est", "b"), []string{ filepath.Join("a", "test", "b"), filepath.Join("a", "best", "b"), }, }, { filepath.Join("a", "best", "?"), []string{ filepath.Join("a", "best", "b"), filepath.Join("a", "best", "c"), }, }, } for _, testCase := range testCases { results, err := fs.Glob(testCase.pattern) sort.Strings(results) sort.Strings(testCase.matches) if err != nil { t.Error(err) } if len(results) != len(testCase.matches) { t.Errorf("result count mismatch") } for i := range testCase.matches { if results[i] != testCase.matches[i] { t.Errorf("%s != %s", results[i], testCase.matches[i]) } } } } func TestUsage(t *testing.T) { fs, dir := setup(t) defer os.RemoveAll(dir) usage, err := fs.Usage(".") if err != nil { if runtime.GOOS == "netbsd" || runtime.GOOS == "openbsd" || runtime.GOOS == "solaris" { t.Skip() } t.Errorf("Unexpected error: %s", err) } if usage.Free < 1 { t.Error("Disk is full?", usage.Free) } } func TestRooted(t *testing.T) { type testcase struct { root string rel string joined string ok bool } cases := []testcase{ // Valid cases {"foo", "bar", "foo/bar", true}, {"foo", "/bar", "foo/bar", true}, {"foo/", "bar", "foo/bar", true}, {"foo/", "/bar", "foo/bar", true}, {"baz/foo", "bar", "baz/foo/bar", true}, {"baz/foo", "/bar", "baz/foo/bar", true}, {"baz/foo/", "bar", "baz/foo/bar", true}, {"baz/foo/", "/bar", "baz/foo/bar", true}, {"foo", "bar/baz", "foo/bar/baz", true}, {"foo", "/bar/baz", "foo/bar/baz", true}, {"foo/", "bar/baz", "foo/bar/baz", true}, {"foo/", "/bar/baz", "foo/bar/baz", true}, {"baz/foo", "bar/baz", "baz/foo/bar/baz", true}, {"baz/foo", "/bar/baz", "baz/foo/bar/baz", true}, {"baz/foo/", "bar/baz", "baz/foo/bar/baz", true}, {"baz/foo/", "/bar/baz", "baz/foo/bar/baz", true}, // Not escape attempts, but oddly formatted relative paths. {"foo", "", "foo", true}, {"foo", "/", "foo", true}, {"foo", "/..", "foo", true}, {"foo", "./bar", "foo/bar", true}, {"foo/", "", "foo", true}, {"foo/", "/", "foo", true}, {"foo/", "/..", "foo", true}, {"foo/", "./bar", "foo/bar", true}, {"baz/foo", "./bar", "baz/foo/bar", true}, {"foo", "./bar/baz", "foo/bar/baz", true}, {"baz/foo", "./bar/baz", "baz/foo/bar/baz", true}, {"baz/foo", "bar/../baz", "baz/foo/baz", true}, {"baz/foo", "/bar/../baz", "baz/foo/baz", true}, {"baz/foo", "./bar/../baz", "baz/foo/baz", true}, // Results in an allowed path, but does it by probing. Disallowed. {"foo", "../foo", "", false}, {"foo", "../foo/bar", "", false}, {"baz/foo", "../foo/bar", "", false}, {"baz/foo", "../../baz/foo/bar", "", false}, {"baz/foo", "bar/../../foo/bar", "", false}, {"baz/foo", "bar/../../../baz/foo/bar", "", false}, // Escape attempts. {"foo", "..", "", false}, {"foo", "../", "", false}, {"foo", "../bar", "", false}, {"foo", "../foobar", "", false}, {"foo/", "../bar", "", false}, {"foo/", "../foobar", "", false}, {"baz/foo", "../bar", "", false}, {"baz/foo", "../foobar", "", false}, {"baz/foo/", "../bar", "", false}, {"baz/foo/", "../foobar", "", false}, {"baz/foo/", "bar/../../quux/baz", "", false}, // Empty root is a misconfiguration. {"", "/foo", "", false}, {"", "foo", "", false}, {"", ".", "", false}, {"", "..", "", false}, {"", "/", "", false}, {"", "", "", false}, // Root=/ is valid, and things should be verified as usual. {"/", "foo", "/foo", true}, {"/", "/foo", "/foo", true}, {"/", "../foo", "", false}, {"/", "..", "", false}, {"/", "/", "/", true}, {"/", "", "/", true}, // special case for filesystems to be able to MkdirAll('.') for example {"/", ".", "/", true}, } if runtime.GOOS == "windows" { extraCases := []testcase{ {`c:\`, `foo`, `c:\foo`, true}, {`\\?\c:\`, `foo`, `\\?\c:\foo`, true}, {`c:\`, `\foo`, `c:\foo`, true}, {`\\?\c:\`, `\foo`, `\\?\c:\foo`, true}, {`c:\`, `\\foo`, ``, false}, {`c:\`, ``, `c:\`, true}, {`c:\`, `\`, `c:\`, true}, {`\\?\c:\`, `\\foo`, ``, false}, {`\\?\c:\`, ``, `\\?\c:\`, true}, {`\\?\c:\`, `\`, `\\?\c:\`, true}, {`\\?\c:\test`, `.`, `\\?\c:\test`, true}, {`c:\test`, `.`, `c:\test`, true}, {`\\?\c:\test`, `/`, `\\?\c:\test`, true}, {`c:\test`, ``, `c:\test`, true}, // makes no sense, but will be treated simply as a bad filename {`c:\foo`, `d:\bar`, `c:\foo\d:\bar`, true}, // special case for filesystems to be able to MkdirAll('.') for example {`c:\`, `.`, `c:\`, true}, {`\\?\c:\`, `.`, `\\?\c:\`, true}, } for _, tc := range cases { // Add case where root is backslashed, rel is forward slashed extraCases = append(extraCases, testcase{ root: filepath.FromSlash(tc.root), rel: tc.rel, joined: tc.joined, ok: tc.ok, }) // and the opposite extraCases = append(extraCases, testcase{ root: tc.root, rel: filepath.FromSlash(tc.rel), joined: tc.joined, ok: tc.ok, }) // and both backslashed extraCases = append(extraCases, testcase{ root: filepath.FromSlash(tc.root), rel: filepath.FromSlash(tc.rel), joined: tc.joined, ok: tc.ok, }) } cases = append(cases, extraCases...) } for _, tc := range cases { fs := BasicFilesystem{root: tc.root} res, err := fs.rooted(tc.rel) if tc.ok { if err != nil { t.Errorf("Unexpected error for rooted(%q, %q): %v", tc.root, tc.rel, err) continue } exp := filepath.FromSlash(tc.joined) if res != exp { t.Errorf("Unexpected result for rooted(%q, %q): %q != expected %q", tc.root, tc.rel, res, exp) } } else if err == nil { t.Errorf("Unexpected pass for rooted(%q, %q) => %q", tc.root, tc.rel, res) continue } } } func TestNewBasicFilesystem(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("non-windows root paths") } currentDir, err := filepath.Abs(".") if err != nil { t.Fatal(err) } testCases := []struct { input string expectedRoot string expectedURI string }{ {"/foo/bar/baz", "/foo/bar/baz", "/foo/bar/baz"}, {"/foo/bar/baz/", "/foo/bar/baz", "/foo/bar/baz"}, {"", currentDir, currentDir}, {".", currentDir, currentDir}, {"/", "/", "/"}, } for _, testCase := range testCases { fs := newBasicFilesystem(testCase.input) if fs.root != testCase.expectedRoot { t.Errorf("root %q != %q", fs.root, testCase.expectedRoot) } if fs.URI() != testCase.expectedURI { t.Errorf("uri %q != %q", fs.URI(), testCase.expectedURI) } } fs := newBasicFilesystem("relative/path") if fs.root == "relative/path" || !strings.HasPrefix(fs.root, string(PathSeparator)) { t.Errorf(`newBasicFilesystem("relative/path").root == %q, expected absolutification`, fs.root) } } func TestRel(t *testing.T) { testCases := []struct { root string abs string expectedRel string }{ {"/", "/", ""}, {"/", "/test", "test"}, {"/", "/Test", "Test"}, {"/Test", "/Test/test", "test"}, } if runtime.GOOS == "windows" { for i := range testCases { testCases[i].root = filepath.FromSlash(testCases[i].root) testCases[i].abs = filepath.FromSlash(testCases[i].abs) testCases[i].expectedRel = filepath.FromSlash(testCases[i].expectedRel) } } for _, tc := range testCases { if res := rel(tc.abs, tc.root); res != tc.expectedRel { t.Errorf(`rel("%v", "%v") == "%v", expected "%v"`, tc.abs, tc.root, res, tc.expectedRel) } } } func TestBasicWalkSkipSymlink(t *testing.T) { _, dir := setup(t) defer os.RemoveAll(dir) testWalkSkipSymlink(t, FilesystemTypeBasic, dir) } func TestWalkTraverseDirJunct(t *testing.T) { _, dir := setup(t) defer os.RemoveAll(dir) testWalkTraverseDirJunct(t, FilesystemTypeBasic, dir) } func TestWalkInfiniteRecursion(t *testing.T) { _, dir := setup(t) defer os.RemoveAll(dir) testWalkInfiniteRecursion(t, FilesystemTypeBasic, dir) }
lib/fs/basicfs_test.go
0
https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb
[ 0.0026471863966435194, 0.0004927269765175879, 0.0001593609049450606, 0.00017154833767563105, 0.0005932124331593513 ]
{ "id": 12, "code_window": [ "\t\t\tl.Debugln(f, \"Handling ignored file\", file)\n", "\t\t\tdbUpdateChan <- dbUpdateJob{file, dbUpdateInvalidate}\n", "\n", "\t\tcase runtime.GOOS == \"windows\" && fs.WindowsInvalidFilename(file.Name):\n", "\t\t\tif file.IsDeleted() {\n", "\t\t\t\t// Just pretend we deleted it, no reason to create an error\n", "\t\t\t\t// about a deleted file that we can't have anyway.\n", "\t\t\t\t// Reason we need it in the first place is, that it was\n", "\t\t\t\t// ignored at some point.\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tcase runtime.GOOS == \"windows\" && fs.WindowsInvalidFilename(file.Name) != nil:\n" ], "file_path": "lib/model/folder_sendrecv.go", "type": "replace", "edit_start_line_idx": 339 }
{ "A device with that ID is already added.": "Já foi adicionado um dispositivo com esse ID anteriormente.", "A negative number of days doesn't make sense.": "Um número negativo de dias não faz sentido.", "A new major version may not be compatible with previous versions.": "Uma nova versão principal pode não ser compatível com versões anteriores.", "API Key": "Chave da API", "About": "Acerca da aplicação", "Action": "Acção", "Actions": "Acções", "Add": "Adicionar", "Add Device": "Adicionar dispositivo", "Add Folder": "Adicionar pasta", "Add Remote Device": "Adicionar dispositivo remoto", "Add devices from the introducer to our device list, for mutually shared folders.": "Adicione dispositivos do apresentador à nossa lista de dispositivos para ter pastas mutuamente partilhadas.", "Add new folder?": "Adicionar nova pasta?", "Additionally the full rescan interval will be increased (times 60, i.e. new default of 1h). You can also configure it manually for every folder later after choosing No.": "Para além disso o intervalo entre verificações completas irá ser aumentado (vezes 60, ou seja, um novo valor predefinido de 1h). Também o pode configurar manualmente para cada pasta, posteriormente, depois de seleccionar Não.", "Address": "Endereço", "Addresses": "Endereços", "Advanced": "Avançadas", "Advanced Configuration": "Configuração avançada", "All Data": "Todos os dados", "Allow Anonymous Usage Reporting?": "Permitir envio de relatórios anónimos de utilização?", "Allowed Networks": "Redes permitidas", "Alphabetic": "Alfabética", "An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "Um comando externo controla as versões. Esse comando tem que remover o ficheiro da pasta partilhada. Se o caminho para a aplicação contiver espaços, então terá de o escrever entre aspas.", "Anonymous Usage Reporting": "Enviar relatórios anónimos de utilização", "Anonymous usage report format has changed. Would you like to move to the new format?": "O formato do relatório anónimo de utilização foi alterado. Gostaria de mudar para o novo formato?", "Are you sure you want to remove device {%name%}?": "Tem a certeza que quer remover o dispositivo {{name}}?", "Are you sure you want to remove folder {%label%}?": "Tem a certeza que quer remover a pasta {{label}}?", "Are you sure you want to restore {%count%} files?": "Tem a certeza que quer restaurar {{count}} ficheiros?", "Are you sure you want to upgrade?": "Tem a certeza que quer actualizar?", "Auto Accept": "Aceitar automaticamente", "Automatic Crash Reporting": "Relatório Automático de Estouro", "Automatic upgrade now offers the choice between stable releases and release candidates.": "A actualização automática agora oferece a escolha entre versões estáveis e candidatas a lançamento.", "Automatic upgrades": "Actualizações automáticas", "Automatic upgrades are always enabled for candidate releases.": "As actualizações automáticas estão sempre activadas nas versões candidatas a lançamento.", "Automatically create or share folders that this device advertises at the default path.": "Criar ou partilhar, de forma automática e no caminho predefinido, pastas que este dispositivo publicita.", "Available debug logging facilities:": "Recursos de registo de depuração disponíveis:", "Be careful!": "Tenha cuidado!", "Bugs": "Erros", "Changelog": "Registo de alterações", "Clean out after": "Esvaziar ao fim de", "Cleaning Versions": "Limpar versões", "Cleanup Interval": "Intervalo entre limpezas", "Click to see discovery failures": "Clique para ver as falhas da pesquisa", "Close": "Fechar", "Command": "Comando", "Comment, when used at the start of a line": "Comentário, quando usado no início de uma linha", "Compression": "Compressão", "Configured": "Configurado", "Connected (Unused)": "Conectado (não usado)", "Connection Error": "Erro de ligação", "Connection Type": "Tipo de ligação", "Connections": "Ligações", "Continuously watching for changes is now available within Syncthing. This will detect changes on disk and issue a scan on only the modified paths. The benefits are that changes are propagated quicker and that less full scans are required.": "A vigilância de alterações contínua está agora disponível dentro do Syncthing. Este sistema irá detectar alterações no disco e efectuar uma verificação apenas nas pastas modificadas. Os benefícios são que as alterações são propagadas mais depressa e são necessárias menos verificações completas.", "Copied from elsewhere": "Copiado doutro sítio", "Copied from original": "Copiado do original", "Copyright © 2014-2019 the following Contributors:": "Copyright © 2014-2019 dos seguintes contribuidores:", "Creating ignore patterns, overwriting an existing file at {%path%}.": "Criando padrões de exclusão, sobrescrevendo um ficheiro existente em {{path}}.", "Currently Shared With Devices": "Dispositivos com os quais está partilhada", "Danger!": "Perigo!", "Debugging Facilities": "Recursos de depuração", "Default Folder Path": "Caminho da pasta predefinida", "Deleted": "Eliminado", "Deselect All": "Retirar todas as selecções", "Deselect devices to stop sharing this folder with.": "Retire a selecção para deixar de partilhar a pasta com esses dispositivos.", "Device": "Dispositivo", "Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "O dispositivo \"{{name}}\" ({{device}} em {{address}}) quer conectar-se. Adiciono este novo dispositivo?", "Device ID": "ID do dispositivo", "Device Identification": "Identificação do dispositivo", "Device Name": "Nome do dispositivo", "Device rate limits": "Limites de velocidade do dispositivo", "Device that last modified the item": "Último dispositivo a modificar o item", "Devices": "Dispositivos", "Disable Crash Reporting": "Desactivar Relatório de Estouro", "Disabled": "Desactivada", "Disabled periodic scanning and disabled watching for changes": "Desactivada a verificação periódica e desactivada a vigilância de alterações", "Disabled periodic scanning and enabled watching for changes": "Desactivada a verificação periódica e desactivada a vigilância de alterações", "Disabled periodic scanning and failed setting up watching for changes, retrying every 1m:": "Desactivada a verificação periódica e falha ao preparar a vigilância de alterações, tentando novamente a cada minuto:", "Disables comparing and syncing file permissions. Useful on systems with nonexistent or custom permissions (e.g. FAT, exFAT, Synology, Android).": "Desactiva a comparação e a sincronização das permissões dos ficheiros. É útil em sistemas onde as permissões não existem ou são personalizadas (ex.: FAT, exFAT, Synology, Android).", "Discard": "Descartar", "Disconnected": "Desconectado", "Disconnected (Unused)": "Desconectado (não usado)", "Discovered": "Descoberto", "Discovery": "Pesquisa", "Discovery Failures": "Falhas da pesquisa", "Do not restore": "Não restaurar", "Do not restore all": "Não restaurar nenhum", "Do you want to enable watching for changes for all your folders?": "Quer activar a vigilância de alterações para todas as suas pastas?", "Documentation": "Documentação", "Download Rate": "Velocidade de recepção", "Downloaded": "Recebido", "Downloading": "Recebendo", "Edit": "Editar", "Edit Device": "Editar dispositivo", "Edit Folder": "Editar pasta", "Editing {%path%}.": "Editando {{path}}.", "Enable Crash Reporting": "Activar Relatório de Estouro", "Enable NAT traversal": "Activar travessia de NAT", "Enable Relaying": "Permitir retransmissão", "Enabled": "Activada", "Enter a non-negative number (e.g., \"2.35\") and select a unit. Percentages are as part of the total disk size.": "Escreva um número positivo (ex.: \"2.35\") e seleccione uma unidade. Percentagens são relativas ao tamanho total do disco.", "Enter a non-privileged port number (1024 - 65535).": "Escreva um número de porto não-privilegiado (1024-65535).", "Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Introduza endereços separados por vírgulas (\"tcp://ip:porto\", \"tcp://máquina:porto\") ou \"dynamic\" para detectar automaticamente os endereços.", "Enter ignore patterns, one per line.": "Escreva os padrões de exclusão, um por linha.", "Enter up to three octal digits.": "Insira de um a três dígitos em octal.", "Error": "Erro", "External File Versioning": "Externa", "Failed Items": "Itens que falharam", "Failed to setup, retrying": "A preparação falhou, tentando novamente", "Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "São esperadas falhas na ligação a servidores IPv6 se não existir conectividade IPv6.", "File Pull Order": "Ordem de obtenção de ficheiros", "File Versioning": "Gestão de versões de ficheiros", "Files are moved to .stversions directory when replaced or deleted by Syncthing.": "Os ficheiros são movidos para a pasta .stversions ao serem substituídos ou eliminados pelo Syncthing.", "Files are moved to date stamped versions in a .stversions directory when replaced or deleted by Syncthing.": "Os ficheiros são movidos para versões marcadas com data e hora numa pasta .stversions, ao serem substituídos ou eliminados pelo Syncthing.", "Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Os ficheiros estão protegidos contra alterações feitas noutros dispositivos, mas alterações feitas neste dispositivo serão enviadas ao resto do grupo.", "Files are synchronized from the cluster, but any changes made locally will not be sent to other devices.": "Os ficheiros são sincronizados a partir do agrupamento, mas as alterações feitas localmente não serão enviadas para outros dispositivos.", "Filesystem Watcher Errors": "Erros na vigilância do sistema de ficheiros", "Filter by date": "Filtrar por data", "Filter by name": "Filtrar por nome", "Folder": "Pasta", "Folder ID": "ID da pasta", "Folder Label": "Etiqueta da pasta", "Folder Path": "Caminho da pasta", "Folder Type": "Tipo de pasta", "Folders": "Pastas", "For the following folders an error occurred while starting to watch for changes. It will be retried every minute, so the errors might go away soon. If they persist, try to fix the underlying issue and ask for help if you can't.": "Nas pastas seguintes ocorreu um erro durante o arranque da vigilância de alterações. Será tentado novamente a cada minuto, por isso os erros poderão desaparecer brevemente. Se persistirem, tente resolver o erro subjacente e, caso não consiga, peça ajuda.", "Full Rescan Interval (s)": "Intervalo entre verificações completas (s)", "GUI": "Interface gráfica", "GUI Authentication Password": "Senha da autenticação na interface gráfica", "GUI Authentication User": "Utilizador da autenticação na interface gráfica", "GUI Authentication: Set User and Password": "Autenticação na interface gráfica: Definir utilizador e senha", "GUI Listen Address": "Endereço de escuta da interface gráfica", "GUI Theme": "Tema gráfico", "General": "Geral", "Generate": "Gerar", "Global Discovery": "Pesquisa global", "Global Discovery Servers": "Servidores de pesquisa global", "Global State": "Estado global", "Help": "Ajuda", "Home page": "Página do projecto", "However, your current settings indicate you might not want it enabled. We have disabled automatic crash reporting for you.": "Contudo, a sua configuração actual indica que pode não a querer activada. Nós desactivámos automaticamente o relatório de estouro para si.", "If you want to prevent other users on this computer from accessing Syncthing and through it your files, consider setting up authentication.": "Se quiser evitar que outros utilizadores neste computador acedam ao Syncthing e, através dele, aos seus ficheiros, considere configurar a autenticação.", "Ignore": "Ignorar", "Ignore Patterns": "Padrões de exclusão", "Ignore Permissions": "Ignorar permissões", "Ignored Devices": "Dispositivos ignorados", "Ignored Folders": "Pastas ignoradas", "Ignored at": "Ignorado em", "Incoming Rate Limit (KiB/s)": "Limite de velocidade de recepção (KiB/s)", "Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Uma configuração incorrecta pode danificar o conteúdo da pasta e tornar o Syncthing inoperacional.", "Introduced By": "Introduzido por", "Introducer": "Apresentador", "Inversion of the given condition (i.e. do not exclude)": "Inversão de uma dada condição (ou seja, não excluir)", "Keep Versions": "Manter versões", "LDAP": "LDAP", "Largest First": "Primeiro os maiores", "Last Scan": "Última verificação", "Last seen": "Última vez que foi verificado", "Latest Change": "Última alteração", "Learn more": "Saiba mais", "Limit": "Limite", "Listeners": "Auscultadores", "Loading data...": "Carregando dados...", "Loading...": "Carregando...", "Local Additions": "Adições locais", "Local Discovery": "Pesquisa local", "Local State": "Estado local", "Local State (Total)": "Estado local (total)", "Locally Changed Items": "Itens alterados localmente", "Log": "Registo", "Log tailing paused. Scroll to the bottom to continue.": "O acompanhamento do final do registo está em pausa. Desloque para o final para continuar.", "Logs": "Registos", "Major Upgrade": "Actualização importante", "Mass actions": "Acções em massa", "Maximum Age": "Idade máxima", "Metadata Only": "Metadados apenas", "Minimum Free Disk Space": "Espaço livre mínimo no disco", "Mod. Device": "Dispositivo mod.", "Mod. Time": "Quando foi mod.", "Move to top of queue": "Mover para o topo da fila", "Multi level wildcard (matches multiple directory levels)": "Símbolo polivalente multi-nível (faz corresponder a vários níveis de pastas)", "Never": "Nunca", "New Device": "Novo dispositivo", "New Folder": "Nova pasta", "Newest First": "Primeiro os mais recentes", "No": "Não", "No File Versioning": "Nenhuma", "No files will be deleted as a result of this operation.": "Nenhum ficheiro será eliminado como resultado desta operação.", "No upgrades": "Sem actualizações", "Notice": "Avisos", "OK": "OK", "Off": "Desligada", "Oldest First": "Primeiro os mais antigos", "Optional descriptive label for the folder. Can be different on each device.": "Etiqueta descritiva opcional para a pasta. Pode ser diferente em cada dispositivo.", "Options": "Opções", "Out of Sync": "Fora de sincronia", "Out of Sync Items": "Itens por sincronizar", "Outgoing Rate Limit (KiB/s)": "Limite da velocidade de envio (KiB/s)", "Override Changes": "Sobrepor alterações", "Path": "Caminho", "Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Caminho para a pasta no computador local. Será criada, caso não exista. O til (~) pode ser utilizado como atalho para", "Path where new auto accepted folders will be created, as well as the default suggested path when adding new folders via the UI. Tilde character (~) expands to {%tilde%}.": "Caminho no qual as novas pastas aceites automaticamente serão criadas, assim como o caminho predefinido sugerido ao adicionar novas pastas através da interface do utilizador. O til (~) expande para {{tilde}}.", "Path where versions should be stored (leave empty for the default .stversions directory in the shared folder).": "Caminho da pasta onde as versões deverão ser guardadas (deixe vazio para ficar a pasta predefinida .stversions dentro da pasta partilhada).", "Pause": "Pausar", "Pause All": "Pausar todas", "Paused": "Em pausa", "Paused (Unused)": "Em pausa (não usado)", "Pending changes": "Alterações pendentes", "Periodic scanning at given interval and disabled watching for changes": "Verificação periódica no intervalo dado e desactivada a vigilância de alterações", "Periodic scanning at given interval and enabled watching for changes": "Verificação periódica no intervalo dado e activada a vigilância de alterações", "Periodic scanning at given interval and failed setting up watching for changes, retrying every 1m:": "Verificação periódica no intervalo dado e falha ao preparar a vigilância de alterações, tentando novamente a cada minuto:", "Permissions": "Permissões", "Please consult the release notes before performing a major upgrade.": "Consulte as notas de lançamento antes de fazer uma actualização importante.", "Please set a GUI Authentication User and Password in the Settings dialog.": "Por favor, defina um utilizador e senha de autenticação para a interface gráfica, nas configurações.", "Please wait": "Aguarde", "Prefix indicating that the file can be deleted if preventing directory removal": "Prefixo para indicar que o ficheiro pode ser eliminado se estiver a impedir a eliminação da pasta", "Prefix indicating that the pattern should be matched without case sensitivity": "Prefixo para indicar que o padrão não diferencia entre maiúsculas e minúsculas", "Preparing to Sync": "Preparando para sincronizar", "Preview": "Previsão", "Preview Usage Report": "Pré-visualizar relatório de utilização", "Quick guide to supported patterns": "Guia rápido dos padrões suportados", "Random": "Aleatória", "Receive Only": "Recebe apenas", "Recent Changes": "Alterações recentes", "Reduced by ignore patterns": "Reduzido por padrões de exclusão", "Release Notes": "Notas de lançamento", "Release candidates contain the latest features and fixes. They are similar to the traditional bi-weekly Syncthing releases.": "Versões candidatas a lançamento contêm as funcionalidades e as correcções mais recentes. São semelhantes aos tradicionais lançamentos bi-semanais do Syncthing.", "Remote Devices": "Dispositivos remotos", "Remove": "Remover", "Remove Device": "Remover dispositivo", "Remove Folder": "Remover pasta", "Required identifier for the folder. Must be the same on all cluster devices.": "Identificador obrigatório para a pasta. Tem que ser igual em todos os dispositivos do grupo.", "Rescan": "Verificar agora", "Rescan All": "Verificar todas agora", "Rescans": "Verificações", "Restart": "Reiniciar", "Restart Needed": "É preciso reiniciar", "Restarting": "Reiniciando", "Restore": "Restaurar", "Restore Versions": "Restaurar versões", "Resume": "Retomar", "Resume All": "Retomar todas", "Reused": "Reutilizado", "Revert Local Changes": "Reverter alterações locais", "Save": "Gravar", "Scan Time Remaining": "Tempo restante da verificação", "Scanning": "Verificação de alterações", "See external versioning help for supported templated command line parameters.": "Veja a ajuda externa sobre gestão de versões para ver os modelos suportados de parâmetros para a linha de comandos.", "Select All": "Seleccionar tudo", "Select a version": "Seleccione uma versão", "Select additional devices to share this folder with.": "Seleccione outros dispositivos com os quais também pretende partilhar a pasta.", "Select latest version": "Seleccionar a última versão", "Select oldest version": "Seleccionar a versão mais antiga", "Select the folders to share with this device.": "Seleccione as pastas a partilhar com este dispositivo.", "Send & Receive": "Envia e recebe", "Send Only": "Envia apenas", "Settings": "Configurações", "Share": "Partilhar", "Share Folder": "Partilhar pasta", "Share Folders With Device": "Partilhar pastas com dispositivo", "Share this folder?": "Partilhar esta pasta?", "Shared With": "Partilhada com", "Sharing": "Partilha", "Show ID": "Mostrar ID", "Show QR": "Mostrar QR", "Show diff with previous version": "Mostrar diferenças em relação à versão anterior", "Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Apresentado ao invés do ID do dispositivo no indicador de estado do grupo. Será divulgado aos outros dispositivos como um nome predefinido opcional.", "Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Apresentado ao invés do ID do dispositivo no indicador de estado do grupo. Será actualizado para o nome que o dispositivo divulga, se for deixado em branco.", "Shutdown": "Desligar", "Shutdown Complete": "Encerramento completado", "Simple File Versioning": "Simples", "Single level wildcard (matches within a directory only)": "Símbolo polivalente de um só nível (faz corresponder apenas dentro de uma pasta)", "Size": "Tamanho", "Smallest First": "Primeiro os menores", "Some items could not be restored:": "Não foi possível restaurar alguns dos itens:", "Source Code": "Código fonte", "Stable releases and release candidates": "Versões estáveis e versões candidatas a lançamento", "Stable releases are delayed by about two weeks. During this time they go through testing as release candidates.": "Versões estáveis são adiadas por cerca de duas semanas. Durante esse período são submetidas a testes sob a forma de versões candidatas a lançamento.", "Stable releases only": "Somente versões estáveis", "Staggered File Versioning": "Escalonada", "Start Browser": "Iniciar navegador", "Statistics": "Estatísticas", "Stopped": "Parado", "Support": "Suporte", "Support Bundle": "Pacote de suporte", "Sync Protocol Listen Addresses": "Endereços de escuta do protocolo de sincronização", "Syncing": "A Sincronizar", "Syncthing has been shut down.": "O Syncthing foi desligado.", "Syncthing includes the following software or portions thereof:": "O Syncthing inclui as seguintes aplicações ou partes delas:", "Syncthing is Free and Open Source Software licensed as MPL v2.0.": "Syncthing é Software Livre e de Código Aberto licenciado como MPL v2.0.", "Syncthing is restarting.": "O Syncthing está a reiniciar.", "Syncthing is upgrading.": "O Syncthing está a actualizar-se.", "Syncthing now supports automatically reporting crashes to the developers. This feature is enabled by default.": "O Syncthing agora suporta o envio automático de relatórios de estouro para os programadores. Esta funcionalidade vem inicialmente activada.", "Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "O Syncthing parece estar em baixo, ou então existe um problema com a sua ligação à Internet. Tentando novamente...", "Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "O Syncthing parece estar com problemas em processar o seu pedido. Tente recarregar a página ou reiniciar o Syncthing, se o problema persistir.", "Take me back": "Voltar atrás", "The GUI address is overridden by startup options. Changes here will not take effect while the override is in place.": "O endereço da interface gráfica é substituído pelas opções de arranque. Alterações feitas aqui não terão efeito enquanto a substituição estiver activa.", "The Syncthing Authors": "Os autores do Syncthing", "The Syncthing admin interface is configured to allow remote access without a password.": "A interface de administração do Syncthing está configurada para permitir o acesso remoto sem pedir senha.", "The aggregated statistics are publicly available at the URL below.": "As estatísticas agregadas estão publicamente disponíveis no URL abaixo.", "The cleanup interval cannot be blank.": "O intervalo entre limpezas não pode estar vazio.", "The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "A configuração foi gravada mas não activada. O Syncthing tem que reiniciar para activar a nova configuração.", "The device ID cannot be blank.": "O ID do dispositivo não pode estar vazio.", "The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "O ID do dispositivo a colocar aqui pode ser obtido no menu \"Acções > Mostrar ID\" do outro dispositivo. Espaços e hífenes são opcionais (ignorados).", "The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "O relatório de utilização cifrado é enviado diariamente. É utilizado para rastrear plataformas comuns, tamanhos de pastas e versões da aplicação. Se o tipo de dados do relatório for alterado, será notificado novamente através desta janela.", "The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "O ID do dispositivo fornecido não parece ser válido. Deveria ter 52 ou 56 caracteres constituídos por letras e números, com espaços e hífenes opcionais.", "The folder ID cannot be blank.": "O ID da pasta não pode estar vazio.", "The folder ID must be unique.": "O ID da pasta tem que ser único.", "The folder path cannot be blank.": "O caminho da pasta não pode estar vazio.", "The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "São utilizados os seguintes intervalos: na primeira hora é guardada uma versão a cada 30 segundos, no primeiro dia é guardada uma versão a cada hora, nos primeiros 30 dias é guardada uma versão por dia e, até que atinja a idade máxima, é guardada uma versão por semana.", "The following items could not be synchronized.": "Não foi possível sincronizar os elementos seguintes.", "The following items were changed locally.": "Os itens seguintes foram alterados localmente.", "The interval must be a positive number of seconds.": "O intervalo tem que ser um número positivo de segundos.", "The interval, in seconds, for running cleanup in the versions directory. Zero to disable periodic cleaning.": "O intervalo, em segundos, para executar limpezas na pasta das versões. Coloque zero para desactivar a limpeza periódica.", "The maximum age must be a number and cannot be blank.": "A idade máxima tem que ser um número e não pode estar vazia.", "The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Tempo máximo, em dias, para manter uma versão (use 0 para manter a versão para sempre).", "The number of days must be a number and cannot be blank.": "O número de dias tem que ser um número e não pode estar em branco.", "The number of days to keep files in the trash can. Zero means forever.": "O número de dias a manter os ficheiros na reciclagem. Zero significa para sempre.", "The number of old versions to keep, per file.": "O número de versões antigas a manter, por ficheiro.", "The number of versions must be a number and cannot be blank.": "O número de versões tem que ser um número e não pode estar vazio.", "The path cannot be blank.": "O caminho não pode estar vazio.", "The rate limit must be a non-negative number (0: no limit)": "O limite de velocidade tem que ser um número que não seja negativo (0: sem limite)", "The rescan interval must be a non-negative number of seconds.": "O intervalo entre verificações tem que ser um valor não negativo de segundos.", "There are no devices to share this folder with.": "Não existem quaisquer dispositivos com os quais se possa partilhar esta pasta.", "They are retried automatically and will be synced when the error is resolved.": "Será tentado automaticamente e os itens serão sincronizados assim que o erro seja resolvido.", "This Device": "Este dispositivo", "This can easily give hackers access to read and change any files on your computer.": "Isso facilmente dará acesso aos piratas informáticos para lerem e modificarem quaisquer ficheiros no seu computador.", "This is a major version upgrade.": "Esta é uma actualização para uma versão importante.", "This setting controls the free space required on the home (i.e., index database) disk.": "Este parâmetro controla o espaço livre necessário no disco base (ou seja, o disco da base de dados do índice).", "Time": "Quando", "Time the item was last modified": "Quando o item foi modificado pela última vez", "Trash Can File Versioning": "Reciclagem", "Type": "Tipo", "UNIX Permissions": "Permissões UNIX", "Unavailable": "Indisponível", "Unavailable/Disabled by administrator or maintainer": "Indisponíveis ou desactivadas pelo administrador ou responsável de manutenção", "Undecided (will prompt)": "Não definido (será inquirido na altura)", "Unignore": "Deixar de ignorar", "Unknown": "Desconhecido", "Unshared": "Não partilhada", "Unshared Devices": "Dispositivos sem partilhas", "Up to Date": "Em sincronia", "Updated": "Actualizado", "Upgrade": "Actualizar", "Upgrade To {%version%}": "Actualizar para {{version}}", "Upgrading": "Actualizando", "Upload Rate": "Velocidade de envio", "Uptime": "Tempo em actividade", "Usage reporting is always enabled for candidate releases.": "O relatório de utilização está sempre activado nas versões candidatas a lançamento.", "Use HTTPS for GUI": "Utilizar HTTPS na interface gráfica", "Username/Password has not been set for the GUI authentication. Please consider setting it up.": "O nome de utilizador e a respectiva senha para a autenticação na interface gráfica não foram definidos. Considere efectuar essa configuração.", "Version": "Versão", "Versions": "Versões", "Versions Path": "Caminho das versões", "Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "As versões são eliminadas automaticamente se forem mais antigas do que a idade máxima ou excederem o número máximo de ficheiros permitido num intervalo.", "Waiting to Clean": "Aguardando a limpeza", "Waiting to Scan": "Aguardando a verificação", "Waiting to Sync": "Aguardando a sincronização", "Warning, this path is a parent directory of an existing folder \"{%otherFolder%}\".": "Aviso: Este caminho é uma pasta mãe duma pasta \"{{otherFolder}}\" já existente.", "Warning, this path is a parent directory of an existing folder \"{%otherFolderLabel%}\" ({%otherFolder%}).": "Aviso: Este caminho é uma pasta mãe duma pasta \"{{otherFolderLabel}}\" ({{otherFolder}}) já existente.", "Warning, this path is a subdirectory of an existing folder \"{%otherFolder%}\".": "Aviso: Este caminho é uma subpasta da pasta \"{{otherFolder}}\" já existente.", "Warning, this path is a subdirectory of an existing folder \"{%otherFolderLabel%}\" ({%otherFolder%}).": "Aviso: Este caminho é uma subpasta da pasta \"{{otherFolderLabel}}\" ({{otherFolder}}) já existente.", "Warning: If you are using an external watcher like {%syncthingInotify%}, you should make sure it is deactivated.": "Aviso: Se estiver a usar um verificador externo, tal como o {{syncthingInotify}}, deve certificar-se que está desactivado.", "Watch for Changes": "Vigiar alterações", "Watching for Changes": "Vigilância de alterações", "Watching for changes discovers most changes without periodic scanning.": "A vigilância de alterações detecta a maior parte das alterações sem a necessidade de fazer uma verificação periódica.", "When adding a new device, keep in mind that this device must be added on the other side too.": "Quando adicionar um novo dispositivo, lembre-se que este dispositivo tem que ser adicionado do outro lado também.", "When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Quando adicionar uma nova pasta, lembre-se que o ID da pasta é utilizado para ligar as pastas entre dispositivos. É sensível às diferenças entre maiúsculas e minúsculas e tem que ter uma correspondência perfeita entre todos os dispositivos.", "Yes": "Sim", "You can also select one of these nearby devices:": "Também pode seleccionar um destes dispositivos que estão próximos:", "You can change your choice at any time in the Settings dialog.": "Pode modificar a sua escolha em qualquer altura nas configurações.", "You can read more about the two release channels at the link below.": "Pode ler mais sobre os dois canais de lançamento na ligação abaixo.", "You have no ignored devices.": "Não tem dispositivos ignorados.", "You have no ignored folders.": "Não tem pastas ignoradas.", "You have unsaved changes. Do you really want to discard them?": "Fez alterações que não foram guardadas. Quer mesmo descartá-las?", "You must keep at least one version.": "Tem que manter pelo menos uma versão.", "days": "dias", "directories": "pastas", "files": "ficheiros", "full documentation": "documentação completa", "items": "itens", "seconds": "segundos", "{%device%} wants to share folder \"{%folder%}\".": "{{device}} quer partilhar a pasta \"{{folder}}\".", "{%device%} wants to share folder \"{%folderlabel%}\" ({%folder%}).": "{{device}} quer partilhar a pasta \"{{folderlabel}}\" ({{folder}})." }
gui/default/assets/lang/lang-pt-PT.json
0
https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb
[ 0.00017636905249673873, 0.00016940810019150376, 0.00015851418720558286, 0.00017116058734245598, 0.00000460773890154087 ]
{ "id": 12, "code_window": [ "\t\t\tl.Debugln(f, \"Handling ignored file\", file)\n", "\t\t\tdbUpdateChan <- dbUpdateJob{file, dbUpdateInvalidate}\n", "\n", "\t\tcase runtime.GOOS == \"windows\" && fs.WindowsInvalidFilename(file.Name):\n", "\t\t\tif file.IsDeleted() {\n", "\t\t\t\t// Just pretend we deleted it, no reason to create an error\n", "\t\t\t\t// about a deleted file that we can't have anyway.\n", "\t\t\t\t// Reason we need it in the first place is, that it was\n", "\t\t\t\t// ignored at some point.\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tcase runtime.GOOS == \"windows\" && fs.WindowsInvalidFilename(file.Name) != nil:\n" ], "file_path": "lib/model/folder_sendrecv.go", "type": "replace", "edit_start_line_idx": 339 }
// Copyright (C) 2016 The Protocol Authors. package protocol import ( "context" "crypto/tls" "encoding/binary" "net" "testing" "github.com/syncthing/syncthing/lib/dialer" ) func BenchmarkRequestsRawTCP(b *testing.B) { // Benchmarks the rate at which we can serve requests over a single, // unencrypted TCP channel over the loopback interface. // Get a connected TCP pair conn0, conn1, err := getTCPConnectionPair() if err != nil { b.Fatal(err) } defer conn0.Close() defer conn1.Close() // Bench it benchmarkRequestsConnPair(b, conn0, conn1) } func BenchmarkRequestsTLSoTCP(b *testing.B) { conn0, conn1, err := getTCPConnectionPair() if err != nil { b.Fatal(err) } defer conn0.Close() defer conn1.Close() benchmarkRequestsTLS(b, conn0, conn1) } func benchmarkRequestsTLS(b *testing.B, conn0, conn1 net.Conn) { // Benchmarks the rate at which we can serve requests over a single, // TLS encrypted channel over the loopback interface. // Load a certificate, skipping this benchmark if it doesn't exist cert, err := tls.LoadX509KeyPair("../../test/h1/cert.pem", "../../test/h1/key.pem") if err != nil { b.Skip(err) return } /// TLSify them conn0, conn1 = negotiateTLS(cert, conn0, conn1) // Bench it benchmarkRequestsConnPair(b, conn0, conn1) } func benchmarkRequestsConnPair(b *testing.B, conn0, conn1 net.Conn) { // Start up Connections on them c0 := NewConnection(LocalDeviceID, conn0, conn0, new(fakeModel), "c0", CompressMetadata) c0.Start() c1 := NewConnection(LocalDeviceID, conn1, conn1, new(fakeModel), "c1", CompressMetadata) c1.Start() // Satisfy the assertions in the protocol by sending an initial cluster config c0.ClusterConfig(ClusterConfig{}) c1.ClusterConfig(ClusterConfig{}) // Report some useful stats and reset the timer for the actual test b.ReportAllocs() b.SetBytes(128 << 10) b.ResetTimer() // Request 128 KiB blocks, which will be satisfied by zero copy from the // other side (we'll get back a full block of zeroes). var buf []byte var err error for i := 0; i < b.N; i++ { // Use c0 and c1 for each alternating request, so we get as much // data flowing in both directions. if i%2 == 0 { buf, err = c0.Request(context.Background(), "folder", "file", int64(i), 128<<10, nil, 0, false) } else { buf, err = c1.Request(context.Background(), "folder", "file", int64(i), 128<<10, nil, 0, false) } if err != nil { b.Fatal(err) } if len(buf) != 128<<10 { b.Fatal("Incorrect returned buf length", len(buf), "!=", 128<<10) } // The fake model is supposed to tag the end of the buffer with the // requested offset, so we can verify that we get back data for this // block correctly. if binary.BigEndian.Uint64(buf[128<<10-8:]) != uint64(i) { b.Fatal("Bad data returned") } } } // returns the two endpoints of a TCP connection over lo0 func getTCPConnectionPair() (net.Conn, net.Conn, error) { lst, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, nil, err } // We run the Accept in the background since it's blocking, and we use // the channel to make the race thingies happy about writing vs reading // conn0 and err0. var conn0 net.Conn var err0 error done := make(chan struct{}) go func() { conn0, err0 = lst.Accept() close(done) }() // Dial the connection conn1, err := net.Dial("tcp", lst.Addr().String()) if err != nil { return nil, nil, err } // Check any error from accept <-done if err0 != nil { return nil, nil, err0 } // Set the buffer sizes etc as usual dialer.SetTCPOptions(conn0) dialer.SetTCPOptions(conn1) return conn0, conn1, nil } func negotiateTLS(cert tls.Certificate, conn0, conn1 net.Conn) (net.Conn, net.Conn) { cfg := &tls.Config{ Certificates: []tls.Certificate{cert}, NextProtos: []string{"bep/1.0"}, ClientAuth: tls.RequestClientCert, SessionTicketsDisabled: true, InsecureSkipVerify: true, MinVersion: tls.VersionTLS12, CipherSuites: []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, }, } tlsc0 := tls.Server(conn0, cfg) tlsc1 := tls.Client(conn1, cfg) return tlsc0, tlsc1 } // The fake model does nothing much type fakeModel struct{} func (m *fakeModel) Index(deviceID DeviceID, folder string, files []FileInfo) error { return nil } func (m *fakeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) error { return nil } func (m *fakeModel) Request(deviceID DeviceID, folder, name string, size int32, offset int64, hash []byte, weakHash uint32, fromTemporary bool) (RequestResponse, error) { // We write the offset to the end of the buffer, so the receiver // can verify that it did in fact get some data back over the // connection. buf := make([]byte, size) binary.BigEndian.PutUint64(buf[len(buf)-8:], uint64(offset)) return &fakeRequestResponse{buf}, nil } func (m *fakeModel) ClusterConfig(deviceID DeviceID, config ClusterConfig) error { return nil } func (m *fakeModel) Closed(conn Connection, err error) { } func (m *fakeModel) DownloadProgress(deviceID DeviceID, folder string, updates []FileDownloadProgressUpdate) error { return nil }
lib/protocol/benchmark_test.go
0
https://github.com/syncthing/syncthing/commit/9e0b924d57dd6d296472d6af39878437dd7e10bb
[ 0.0002539379056543112, 0.0001739449508022517, 0.0001623720017960295, 0.00017010961892083287, 0.000018665319657884538 ]
{ "id": 4, "code_window": [ "\t\tmod.Resources[n.Name] = rs\n", "\t}\n", "\trs.Type = n.ResourceType\n", "\trs.Dependencies = n.Dependencies\n", "\n", "\tif n.Tainted != nil && *n.Tainted {\n", "\t\tif n.TaintedIndex != -1 {\n", "\t\t\trs.Tainted[n.TaintedIndex] = *n.State\n", "\t\t} else {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tprintln(fmt.Sprintf(\"%#v\", rs))\n", "\tprintln(fmt.Sprintf(\"%#v\", *n.State))\n" ], "file_path": "terraform/eval_state.go", "type": "add", "edit_start_line_idx": 105 }
package terraform import ( "fmt" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/dag" ) // ResourceCountTransformer is a GraphTransformer that expands the count // out for a specific resource. type ResourceCountTransformer struct { Resource *config.Resource Destroy bool } func (t *ResourceCountTransformer) Transform(g *Graph) error { // Expand the resource count count, err := t.Resource.Count() if err != nil { return err } // Don't allow the count to be negative if count < 0 { return fmt.Errorf("negative count: %d", count) } // For each count, build and add the node nodes := make([]dag.Vertex, count) for i := 0; i < count; i++ { // Set the index. If our count is 1 we special case it so that // we handle the "resource.0" and "resource" boundary properly. index := i if count == 1 { index = -1 } // Save the node for later so we can do connections. Make the // proper node depending on if we're just a destroy node or if // were a regular node. var node dag.Vertex = &graphNodeExpandedResource{ Index: index, Resource: t.Resource, } if t.Destroy { node = &graphNodeExpandedResourceDestroy{ graphNodeExpandedResource: node.(*graphNodeExpandedResource), } } // Add the node now nodes[i] = node g.Add(nodes[i]) } // Make the dependency connections for _, n := range nodes { // Connect the dependents. We ignore the return value for missing // dependents since that should've been caught at a higher level. g.ConnectDependent(n) } return nil } type graphNodeExpandedResource struct { Index int Resource *config.Resource } func (n *graphNodeExpandedResource) Name() string { if n.Index == -1 { return n.Resource.Id() } return fmt.Sprintf("%s #%d", n.Resource.Id(), n.Index) } // GraphNodeDependable impl. func (n *graphNodeExpandedResource) DependableName() []string { return []string{ n.Resource.Id(), n.stateId(), } } // GraphNodeDependent impl. func (n *graphNodeExpandedResource) DependentOn() []string { config := &GraphNodeConfigResource{Resource: n.Resource} return config.DependentOn() } // GraphNodeProviderConsumer func (n *graphNodeExpandedResource) ProvidedBy() []string { return []string{resourceProvider(n.Resource.Type)} } // GraphNodeEvalable impl. func (n *graphNodeExpandedResource) EvalTree() EvalNode { var diff *InstanceDiff var state *InstanceState // Build the resource. If we aren't part of a multi-resource, then // we still consider ourselves as count index zero. index := n.Index if index < 0 { index = 0 } resource := &Resource{CountIndex: index} // Shared node for interpolation of configuration interpolateNode := &EvalInterpolate{ Config: n.Resource.RawConfig, Resource: resource, } seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} // Validate the resource vseq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} vseq.Nodes = append(vseq.Nodes, &EvalValidateResource{ Provider: &EvalGetProvider{Name: n.ProvidedBy()[0]}, Config: interpolateNode, ResourceName: n.Resource.Name, ResourceType: n.Resource.Type, }) // Validate all the provisioners for _, p := range n.Resource.Provisioners { vseq.Nodes = append(vseq.Nodes, &EvalValidateProvisioner{ Provisioner: &EvalGetProvisioner{Name: p.Type}, Config: &EvalInterpolate{ Config: p.RawConfig, Resource: resource}, }) } // Add the validation operations seq.Nodes = append(seq.Nodes, &EvalOpFilter{ Ops: []walkOperation{walkValidate}, Node: vseq, }) // Build instance info info := n.instanceInfo() seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info}) // Refresh the resource seq.Nodes = append(seq.Nodes, &EvalOpFilter{ Ops: []walkOperation{walkRefresh}, Node: &EvalSequence{ Nodes: []EvalNode{ &EvalReadState{ Name: n.stateId(), Output: &state, }, &EvalRefresh{ Info: info, Provider: &EvalGetProvider{Name: n.ProvidedBy()[0]}, State: &state, Output: &state, }, &EvalWriteState{ Name: n.stateId(), ResourceType: n.Resource.Type, Dependencies: n.DependentOn(), State: &state, }, }, }, }) // Diff the resource seq.Nodes = append(seq.Nodes, &EvalOpFilter{ Ops: []walkOperation{walkPlan}, Node: &EvalSequence{ Nodes: []EvalNode{ &EvalDiff{ Info: info, Config: interpolateNode, Provider: &EvalGetProvider{Name: n.ProvidedBy()[0]}, State: &EvalReadState{Name: n.stateId()}, Output: &diff, OutputState: &state, }, &EvalWriteState{ Name: n.stateId(), ResourceType: n.Resource.Type, Dependencies: n.DependentOn(), State: &state, }, &EvalDiffTainted{ Diff: &diff, Name: n.stateId(), }, &EvalWriteDiff{ Name: n.stateId(), Diff: &diff, }, }, }, }) // Diff the resource for destruction seq.Nodes = append(seq.Nodes, &EvalOpFilter{ Ops: []walkOperation{walkPlanDestroy}, Node: &EvalSequence{ Nodes: []EvalNode{ &EvalDiffDestroy{ Info: info, State: &EvalReadState{Name: n.stateId()}, Output: &diff, }, &EvalWriteDiff{ Name: n.stateId(), Diff: &diff, }, }, }, }) // Diff the resource for destruction var provider ResourceProvider var diffApply *InstanceDiff var err error var createNew, tainted bool seq.Nodes = append(seq.Nodes, &EvalOpFilter{ Ops: []walkOperation{walkApply}, Node: &EvalSequence{ Nodes: []EvalNode{ // Get the saved diff for apply &EvalReadDiff{ Name: n.stateId(), Diff: &diffApply, }, // We don't want to do any destroys &EvalIf{ If: func(ctx EvalContext) (bool, error) { if diffApply == nil { return true, EvalEarlyExitError{} } if diffApply.Destroy && len(diffApply.Attributes) == 0 { return true, EvalEarlyExitError{} } diffApply.Destroy = false return true, nil }, Node: EvalNoop{}, }, &EvalIf{ If: func(ctx EvalContext) (bool, error) { return n.Resource.Lifecycle.CreateBeforeDestroy, nil }, Node: &EvalDeposeState{ Name: n.stateId(), }, }, &EvalDiff{ Info: info, Config: interpolateNode, Provider: &EvalGetProvider{Name: n.ProvidedBy()[0]}, State: &EvalReadState{Name: n.stateId()}, Output: &diffApply, }, // Get the saved diff &EvalReadDiff{ Name: n.stateId(), Diff: &diff, }, // Compare the diffs &EvalCompareDiff{ Info: info, One: &diff, Two: &diffApply, }, &EvalGetProvider{ Name: n.ProvidedBy()[0], Output: &provider, }, &EvalReadState{ Name: n.stateId(), Output: &state, }, &EvalApply{ Info: info, State: &state, Diff: &diffApply, Provider: &provider, Output: &state, Error: &err, CreateNew: &createNew, }, &EvalWriteState{ Name: n.stateId(), ResourceType: n.Resource.Type, Dependencies: n.DependentOn(), State: &state, }, &EvalApplyProvisioners{ Info: info, State: &state, Resource: n.Resource, InterpResource: resource, CreateNew: &createNew, Tainted: &tainted, Error: &err, }, &EvalIf{ If: func(ctx EvalContext) (bool, error) { return n.Resource.Lifecycle.CreateBeforeDestroy && tainted, nil }, Node: &EvalUndeposeState{ Name: n.stateId(), }, }, &EvalWriteState{ Name: n.stateId(), ResourceType: n.Resource.Type, Dependencies: n.DependentOn(), State: &state, Tainted: &tainted, TaintedIndex: -1, TaintedClearPrimary: !n.Resource.Lifecycle.CreateBeforeDestroy, }, &EvalApplyPost{ Info: info, State: &state, Error: &err, }, }, }, }) return seq } // instanceInfo is used for EvalTree. func (n *graphNodeExpandedResource) instanceInfo() *InstanceInfo { return &InstanceInfo{Id: n.stateId(), Type: n.Resource.Type} } // stateId is the name used for the state key func (n *graphNodeExpandedResource) stateId() string { if n.Index == -1 { return n.Resource.Id() } return fmt.Sprintf("%s.%d", n.Resource.Id(), n.Index) } // GraphNodeStateRepresentative impl. func (n *graphNodeExpandedResource) StateId() []string { return []string{n.stateId()} } // graphNodeExpandedResourceDestroy represents an expanded resource that // is to be destroyed. type graphNodeExpandedResourceDestroy struct { *graphNodeExpandedResource } func (n *graphNodeExpandedResourceDestroy) Name() string { return fmt.Sprintf("%s (destroy)", n.graphNodeExpandedResource.Name()) } // GraphNodeEvalable impl. func (n *graphNodeExpandedResourceDestroy) EvalTree() EvalNode { info := n.instanceInfo() var diffApply *InstanceDiff var provider ResourceProvider var state *InstanceState var err error return &EvalOpFilter{ Ops: []walkOperation{walkApply}, Node: &EvalSequence{ Nodes: []EvalNode{ // Get the saved diff for apply &EvalReadDiff{ Name: n.stateId(), Diff: &diffApply, }, // If we're not destroying, then compare diffs &EvalIf{ If: func(ctx EvalContext) (bool, error) { if diffApply != nil && diffApply.Destroy { return true, nil } return true, EvalEarlyExitError{} }, Node: EvalNoop{}, }, &EvalGetProvider{ Name: n.ProvidedBy()[0], Output: &provider, }, &EvalReadState{ Name: n.stateId(), Output: &state, Tainted: n.Resource.Lifecycle.CreateBeforeDestroy, TaintedIndex: -1, }, &EvalApply{ Info: info, State: &state, Diff: &diffApply, Provider: &provider, Output: &state, Error: &err, }, &EvalWriteState{ Name: n.stateId(), ResourceType: n.Resource.Type, Dependencies: n.DependentOn(), State: &state, }, &EvalApplyPost{ Info: info, State: &state, Error: &err, }, }, }, } }
terraform/transform_resource.go
1
https://github.com/hashicorp/terraform/commit/33cad6d2073170752b1834e1f7ef4c59ff62180b
[ 0.005927502643316984, 0.0008250260725617409, 0.0001672998332651332, 0.00021844159346073866, 0.0013229066971689463 ]
{ "id": 4, "code_window": [ "\t\tmod.Resources[n.Name] = rs\n", "\t}\n", "\trs.Type = n.ResourceType\n", "\trs.Dependencies = n.Dependencies\n", "\n", "\tif n.Tainted != nil && *n.Tainted {\n", "\t\tif n.TaintedIndex != -1 {\n", "\t\t\trs.Tainted[n.TaintedIndex] = *n.State\n", "\t\t} else {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tprintln(fmt.Sprintf(\"%#v\", rs))\n", "\tprintln(fmt.Sprintf(\"%#v\", *n.State))\n" ], "file_path": "terraform/eval_state.go", "type": "add", "edit_start_line_idx": 105 }
package ssh import ( "code.google.com/p/go.crypto/ssh" "log" ) // An implementation of ssh.KeyboardInteractiveChallenge that simply sends // back the password for all questions. The questions are logged. func PasswordKeyboardInteractive(password string) ssh.KeyboardInteractiveChallenge { return func(user, instruction string, questions []string, echos []bool) ([]string, error) { log.Printf("Keyboard interactive challenge: ") log.Printf("-- User: %s", user) log.Printf("-- Instructions: %s", instruction) for i, question := range questions { log.Printf("-- Question %d: %s", i+1, question) } // Just send the password back for all questions answers := make([]string, len(questions)) for i := range answers { answers[i] = string(password) } return answers, nil } }
helper/ssh/password.go
0
https://github.com/hashicorp/terraform/commit/33cad6d2073170752b1834e1f7ef4c59ff62180b
[ 0.00017389828281011432, 0.00017073361959774047, 0.00016629799210932106, 0.0001720046129776165, 0.0000032303407806466566 ]
{ "id": 4, "code_window": [ "\t\tmod.Resources[n.Name] = rs\n", "\t}\n", "\trs.Type = n.ResourceType\n", "\trs.Dependencies = n.Dependencies\n", "\n", "\tif n.Tainted != nil && *n.Tainted {\n", "\t\tif n.TaintedIndex != -1 {\n", "\t\t\trs.Tainted[n.TaintedIndex] = *n.State\n", "\t\t} else {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tprintln(fmt.Sprintf(\"%#v\", rs))\n", "\tprintln(fmt.Sprintf(\"%#v\", *n.State))\n" ], "file_path": "terraform/eval_state.go", "type": "add", "edit_start_line_idx": 105 }
// The depgraph package is used to create and model a dependency graph // of nouns. Each noun can represent a service, server, application, // network switch, etc. Nouns can depend on other nouns, and provide // versioning constraints. Nouns can also have various meta data that // may be relevant to their construction or configuration. package depgraph import ( "bytes" "fmt" "sort" "strings" "sync" "github.com/hashicorp/terraform/digraph" ) // WalkFunc is the type used for the callback for Walk. type WalkFunc func(*Noun) error // Graph is used to represent a dependency graph. type Graph struct { Name string Meta interface{} Nouns []*Noun Root *Noun } // ValidateError implements the Error interface but provides // additional information on a validation error. type ValidateError struct { // If set, then the graph is missing a single root, on which // there are no depdendencies MissingRoot bool // Unreachable are nodes that could not be reached from // the root noun. Unreachable []*Noun // Cycles are groups of strongly connected nodes, which // form a cycle. This is disallowed. Cycles [][]*Noun } func (v *ValidateError) Error() string { var msgs []string if v.MissingRoot { msgs = append(msgs, "The graph has no single root") } for _, n := range v.Unreachable { msgs = append(msgs, fmt.Sprintf( "Unreachable node: %s", n.Name)) } for _, c := range v.Cycles { cycleNodes := make([]string, len(c)) for i, n := range c { cycleNodes[i] = n.Name } msgs = append(msgs, fmt.Sprintf( "Cycle: %s", strings.Join(cycleNodes, " -> "))) } for i, m := range msgs { msgs[i] = fmt.Sprintf("* %s", m) } return fmt.Sprintf( "The dependency graph is not valid:\n\n%s", strings.Join(msgs, "\n")) } // ConstraintError is used to return detailed violation // information from CheckConstraints type ConstraintError struct { Violations []*Violation } func (c *ConstraintError) Error() string { return fmt.Sprintf("%d constraint violations", len(c.Violations)) } // Violation is used to pass along information about // a constraint violation type Violation struct { Source *Noun Target *Noun Dependency *Dependency Constraint Constraint Err error } func (v *Violation) Error() string { return fmt.Sprintf("Constraint %v between %v and %v violated: %v", v.Constraint, v.Source, v.Target, v.Err) } // CheckConstraints walks the graph and ensures that all // user imposed constraints are satisfied. func (g *Graph) CheckConstraints() error { // Ensure we have a root if g.Root == nil { return fmt.Errorf("Graph must be validated before checking constraint violations") } // Create a constraint error cErr := &ConstraintError{} // Walk from the root digraph.DepthFirstWalk(g.Root, func(n digraph.Node) bool { noun := n.(*Noun) for _, dep := range noun.Deps { target := dep.Target for _, constraint := range dep.Constraints { ok, err := constraint.Satisfied(noun, target) if ok { continue } violation := &Violation{ Source: noun, Target: target, Dependency: dep, Constraint: constraint, Err: err, } cErr.Violations = append(cErr.Violations, violation) } } return true }) if cErr.Violations != nil { return cErr } return nil } // Noun returns the noun with the given name, or nil if it cannot be found. func (g *Graph) Noun(name string) *Noun { for _, n := range g.Nouns { if n.Name == name { return n } } return nil } // String generates a little ASCII string of the graph, useful in // debugging output. func (g *Graph) String() string { var buf bytes.Buffer // Alphabetize the output based on the noun name keys := make([]string, 0, len(g.Nouns)) mapping := make(map[string]*Noun) for _, n := range g.Nouns { mapping[n.Name] = n keys = append(keys, n.Name) } sort.Strings(keys) if g.Root != nil { buf.WriteString(fmt.Sprintf("root: %s\n", g.Root.Name)) } else { buf.WriteString("root: <unknown>\n") } for _, k := range keys { n := mapping[k] buf.WriteString(fmt.Sprintf("%s\n", n.Name)) // Alphabetize the dependency names depKeys := make([]string, 0, len(n.Deps)) depMapping := make(map[string]*Dependency) for _, d := range n.Deps { depMapping[d.Target.Name] = d depKeys = append(depKeys, d.Target.Name) } sort.Strings(depKeys) for _, k := range depKeys { dep := depMapping[k] buf.WriteString(fmt.Sprintf( " %s -> %s\n", dep.Source, dep.Target)) } } return buf.String() } // Validate is used to ensure that a few properties of the graph are not violated: // 1) There must be a single "root", or source on which nothing depends. // 2) All nouns in the graph must be reachable from the root // 3) The graph must be cycle free, meaning there are no cicular dependencies func (g *Graph) Validate() error { // Convert to node list nodes := make([]digraph.Node, len(g.Nouns)) for i, n := range g.Nouns { nodes[i] = n } // Create a validate erro vErr := &ValidateError{} // Search for all the sources, if we have only 1, it must be the root if sources := digraph.Sources(nodes); len(sources) != 1 { vErr.MissingRoot = true goto CHECK_CYCLES } else { g.Root = sources[0].(*Noun) } // Check reachability if unreached := digraph.Unreachable(g.Root, nodes); len(unreached) > 0 { vErr.Unreachable = make([]*Noun, len(unreached)) for i, u := range unreached { vErr.Unreachable[i] = u.(*Noun) } } CHECK_CYCLES: // Check for cycles if cycles := digraph.StronglyConnectedComponents(nodes, true); len(cycles) > 0 { vErr.Cycles = make([][]*Noun, len(cycles)) for i, cycle := range cycles { group := make([]*Noun, len(cycle)) for j, n := range cycle { group[j] = n.(*Noun) } vErr.Cycles[i] = group } } // Check for loops to yourself for _, n := range g.Nouns { for _, d := range n.Deps { if d.Source == d.Target { vErr.Cycles = append(vErr.Cycles, []*Noun{n}) } } } // Return the detailed error if vErr.MissingRoot || vErr.Unreachable != nil || vErr.Cycles != nil { return vErr } return nil } // Walk will walk the tree depth-first (dependency first) and call // the callback. // // The callbacks will be called in parallel, so if you need non-parallelism, // then introduce a lock in your callback. func (g *Graph) Walk(fn WalkFunc) error { // Set so we don't callback for a single noun multiple times var seenMapL sync.RWMutex seenMap := make(map[*Noun]chan struct{}) seenMap[g.Root] = make(chan struct{}) // Keep track of what nodes errored. var errMapL sync.RWMutex errMap := make(map[*Noun]struct{}) // Build the list of things to visit tovisit := make([]*Noun, 1, len(g.Nouns)) tovisit[0] = g.Root // Spawn off all our goroutines to walk the tree errCh := make(chan error) for len(tovisit) > 0 { // Grab the current thing to use n := len(tovisit) current := tovisit[n-1] tovisit = tovisit[:n-1] // Go through each dependency and run that first for _, dep := range current.Deps { if _, ok := seenMap[dep.Target]; !ok { seenMapL.Lock() seenMap[dep.Target] = make(chan struct{}) seenMapL.Unlock() tovisit = append(tovisit, dep.Target) } } // Spawn off a goroutine to execute our callback once // all our dependencies are satisfied. go func(current *Noun) { seenMapL.RLock() closeCh := seenMap[current] seenMapL.RUnlock() defer close(closeCh) // Wait for all our dependencies for _, dep := range current.Deps { seenMapL.RLock() ch := seenMap[dep.Target] seenMapL.RUnlock() // Wait for the dep to be run <-ch // Check if any dependencies errored. If so, // then return right away, we won't walk it. errMapL.RLock() _, errOk := errMap[dep.Target] errMapL.RUnlock() if errOk { return } } // Call our callback! if err := fn(current); err != nil { errMapL.Lock() errMap[current] = struct{}{} errMapL.Unlock() errCh <- err } }(current) } // Aggregate channel that is closed when all goroutines finish doneCh := make(chan struct{}) go func() { defer close(doneCh) for _, ch := range seenMap { <-ch } }() // Wait for finish OR an error select { case <-doneCh: return nil case err := <-errCh: // Drain the error channel go func() { for _ = range errCh { // Nothing } }() // Wait for the goroutines to end <-doneCh close(errCh) return err } } // DependsOn returns the set of nouns that have a // dependency on a given noun. This can be used to find // the incoming edges to a noun. func (g *Graph) DependsOn(n *Noun) []*Noun { var incoming []*Noun OUTER: for _, other := range g.Nouns { if other == n { continue } for _, d := range other.Deps { if d.Target == n { incoming = append(incoming, other) continue OUTER } } } return incoming }
depgraph/graph.go
0
https://github.com/hashicorp/terraform/commit/33cad6d2073170752b1834e1f7ef4c59ff62180b
[ 0.03442953899502754, 0.002650384558364749, 0.00016176869394257665, 0.00017498110537417233, 0.006500727497041225 ]
{ "id": 4, "code_window": [ "\t\tmod.Resources[n.Name] = rs\n", "\t}\n", "\trs.Type = n.ResourceType\n", "\trs.Dependencies = n.Dependencies\n", "\n", "\tif n.Tainted != nil && *n.Tainted {\n", "\t\tif n.TaintedIndex != -1 {\n", "\t\t\trs.Tainted[n.TaintedIndex] = *n.State\n", "\t\t} else {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tprintln(fmt.Sprintf(\"%#v\", rs))\n", "\tprintln(fmt.Sprintf(\"%#v\", *n.State))\n" ], "file_path": "terraform/eval_state.go", "type": "add", "edit_start_line_idx": 105 }
variable "amis" { default = { us-east-1 = "foo" us-west-2 = "bar" } } variable "bar" { default = "baz" } variable "foo" {} resource "aws_instance" "foo" { num = "2" bar = "${var.bar}" } resource "aws_instance" "bar" { foo = "${var.foo}" bar = "${lookup(var.amis, var.foo)}" }
terraform/test-fixtures/input-vars/main.tf
0
https://github.com/hashicorp/terraform/commit/33cad6d2073170752b1834e1f7ef4c59ff62180b
[ 0.00017497756925877184, 0.000174511966179125, 0.00017389214190188795, 0.00017466620192863047, 4.563467541629507e-7 ]
{ "id": 0, "code_window": [ "\t\"io/ioutil\"\n", "\t\"log\"\n", "\t\"os\"\n", "\t\"sync/atomic\"\n", "\t\"time\"\n", "\n", "\t\"github.com/hashicorp/terraform/communicator\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"strings\"\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "add", "edit_start_line_idx": 10 }
package remoteexec import ( "bytes" "context" "fmt" "io" "io/ioutil" "log" "os" "sync/atomic" "time" "github.com/hashicorp/terraform/communicator" "github.com/hashicorp/terraform/communicator/remote" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/go-linereader" ) func Provisioner() terraform.ResourceProvisioner { return &schema.Provisioner{ Schema: map[string]*schema.Schema{ "inline": &schema.Schema{ Type: schema.TypeList, Elem: &schema.Schema{Type: schema.TypeString}, PromoteSingle: true, Optional: true, ConflictsWith: []string{"script", "scripts"}, }, "script": &schema.Schema{ Type: schema.TypeString, Optional: true, ConflictsWith: []string{"inline", "scripts"}, }, "scripts": &schema.Schema{ Type: schema.TypeList, Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, ConflictsWith: []string{"script", "inline"}, }, }, ApplyFunc: applyFn, } } // Apply executes the remote exec provisioner func applyFn(ctx context.Context) error { connState := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) data := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) // Get a new communicator comm, err := communicator.New(connState) if err != nil { return err } // Collect the scripts scripts, err := collectScripts(data) if err != nil { return err } for _, s := range scripts { defer s.Close() } // Copy and execute each script if err := runScripts(ctx, o, comm, scripts); err != nil { return err } return nil } // generateScripts takes the configuration and creates a script from each inline config func generateScripts(d *schema.ResourceData) ([]string, error) { var scripts []string for _, l := range d.Get("inline").([]interface{}) { scripts = append(scripts, l.(string)) } return scripts, nil } // collectScripts is used to collect all the scripts we need // to execute in preparation for copying them. func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) { // Check if inline if _, ok := d.GetOk("inline"); ok { scripts, err := generateScripts(d) if err != nil { return nil, err } var r []io.ReadCloser for _, script := range scripts { r = append(r, ioutil.NopCloser(bytes.NewReader([]byte(script)))) } return r, nil } // Collect scripts var scripts []string if script, ok := d.GetOk("script"); ok { scripts = append(scripts, script.(string)) } if scriptList, ok := d.GetOk("scripts"); ok { for _, script := range scriptList.([]interface{}) { scripts = append(scripts, script.(string)) } } // Open all the scripts var fhs []io.ReadCloser for _, s := range scripts { fh, err := os.Open(s) if err != nil { for _, fh := range fhs { fh.Close() } return nil, fmt.Errorf("Failed to open script '%s': %v", s, err) } fhs = append(fhs, fh) } // Done, return the file handles return fhs, nil } // runScripts is used to copy and execute a set of scripts func runScripts( ctx context.Context, o terraform.UIOutput, comm communicator.Communicator, scripts []io.ReadCloser) error { // Wrap out context in a cancelation function that we use to // kill the connection. ctx, cancelFunc := context.WithCancel(ctx) defer cancelFunc() // Wait for the context to end and then disconnect go func() { <-ctx.Done() comm.Disconnect() }() // Wait and retry until we establish the connection err := retryFunc(ctx, comm.Timeout(), func() error { err := comm.Connect(o) return err }) if err != nil { return err } for _, script := range scripts { var cmd *remote.Cmd outR, outW := io.Pipe() errR, errW := io.Pipe() outDoneCh := make(chan struct{}) errDoneCh := make(chan struct{}) go copyOutput(o, outR, outDoneCh) go copyOutput(o, errR, errDoneCh) remotePath := comm.ScriptPath() err = retryFunc(ctx, comm.Timeout(), func() error { if err := comm.UploadScript(remotePath, script); err != nil { return fmt.Errorf("Failed to upload script: %v", err) } cmd = &remote.Cmd{ Command: remotePath, Stdout: outW, Stderr: errW, } if err := comm.Start(cmd); err != nil { return fmt.Errorf("Error starting script: %v", err) } return nil }) if err == nil { cmd.Wait() if cmd.ExitStatus != 0 { err = fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) } } // If we have an error, end our context so the disconnect happens. // This has to happen before the output cleanup below since during // an interrupt this will cause the outputs to end. if err != nil { cancelFunc() } // Wait for output to clean up outW.Close() errW.Close() <-outDoneCh <-errDoneCh // Upload a blank follow up file in the same path to prevent residual // script contents from remaining on remote machine empty := bytes.NewReader([]byte("")) if err := comm.Upload(remotePath, empty); err != nil { // This feature is best-effort. log.Printf("[WARN] Failed to upload empty follow up script: %v", err) } // If we have an error, return it out now that we've cleaned up if err != nil { return err } } return nil } func copyOutput( o terraform.UIOutput, r io.Reader, doneCh chan<- struct{}) { defer close(doneCh) lr := linereader.New(r) for line := range lr.Ch { o.Output(line) } } // retryFunc is used to retry a function for a given duration func retryFunc(ctx context.Context, timeout time.Duration, f func() error) error { // Build a new context with the timeout ctx, done := context.WithTimeout(ctx, timeout) defer done() // Try the function in a goroutine var errVal atomic.Value doneCh := make(chan struct{}) go func() { defer close(doneCh) for { // If our context ended, we want to exit right away. select { case <-ctx.Done(): return default: } // Try the function call err := f() if err == nil { return } log.Printf("Retryable error: %v", err) errVal.Store(err) } }() // Wait for completion select { case <-doneCh: case <-ctx.Done(): } // Check if we have a context error to check if we're interrupted or timeout switch ctx.Err() { case context.Canceled: return fmt.Errorf("interrupted") case context.DeadlineExceeded: return fmt.Errorf("timeout") } // Check if we got an error executing if err, ok := errVal.Load().(error); ok { return err } return nil }
builtin/provisioners/remote-exec/resource_provisioner.go
1
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.02248256281018257, 0.0013461241032928228, 0.00016455957666039467, 0.00016978883650153875, 0.004245911259204149 ]
{ "id": 0, "code_window": [ "\t\"io/ioutil\"\n", "\t\"log\"\n", "\t\"os\"\n", "\t\"sync/atomic\"\n", "\t\"time\"\n", "\n", "\t\"github.com/hashicorp/terraform/communicator\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"strings\"\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "add", "edit_start_line_idx": 10 }
package backoff import ( "runtime" "sync" "time" ) // Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. // // Ticks will continue to arrive when the previous operation is still running, // so operations that take a while to fail could run in quick succession. type Ticker struct { C <-chan time.Time c chan time.Time b BackOff stop chan struct{} stopOnce sync.Once } // NewTicker returns a new Ticker containing a channel that will send the time at times // specified by the BackOff argument. Ticker is guaranteed to tick at least once. // The channel is closed when Stop method is called or BackOff stops. func NewTicker(b BackOff) *Ticker { c := make(chan time.Time) t := &Ticker{ C: c, c: c, b: b, stop: make(chan struct{}), } go t.run() runtime.SetFinalizer(t, (*Ticker).Stop) return t } // Stop turns off a ticker. After Stop, no more ticks will be sent. func (t *Ticker) Stop() { t.stopOnce.Do(func() { close(t.stop) }) } func (t *Ticker) run() { c := t.c defer close(c) t.b.Reset() // Ticker is guaranteed to tick at least once. afterC := t.send(time.Now()) for { if afterC == nil { return } select { case tick := <-afterC: afterC = t.send(tick) case <-t.stop: t.c = nil // Prevent future ticks from being sent to the channel. return } } } func (t *Ticker) send(tick time.Time) <-chan time.Time { select { case t.c <- tick: case <-t.stop: return nil } next := t.b.NextBackOff() if next == Stop { t.Stop() return nil } return time.After(next) }
vendor/github.com/cenkalti/backoff/ticker.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.00017408342682756484, 0.00016888530808500946, 0.00016353641694877297, 0.00016747300105635077, 0.0000036912504128849832 ]
{ "id": 0, "code_window": [ "\t\"io/ioutil\"\n", "\t\"log\"\n", "\t\"os\"\n", "\t\"sync/atomic\"\n", "\t\"time\"\n", "\n", "\t\"github.com/hashicorp/terraform/communicator\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"strings\"\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "add", "edit_start_line_idx": 10 }
--- layout: "profitbricks" page_title: "ProfitBricks: profitbricks_lan" sidebar_current: "docs-profitbricks-resource-lan" description: |- Creates and manages LAN objects. --- # profitbricks\_lan Manages a LANs on ProfitBricks ## Example Usage ``` resource "profitbricks_lan" "example" { datacenter_id = "${profitbricks_datacenter.example.id}" public = true } ``` ##Argument reference * `datacenter_id` - (Required) [string] * `name` - (Optional) [string] The name of the LAN * `public` - (Optional) [Boolean] indicating if the LAN faces the public Internet or not.
website/source/docs/providers/profitbricks/r/profitbricks_lan.html.markdown
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.00018754905613604933, 0.00017372994625475258, 0.0001664136361796409, 0.00016722710279282182, 0.00000977723811956821 ]
{ "id": 0, "code_window": [ "\t\"io/ioutil\"\n", "\t\"log\"\n", "\t\"os\"\n", "\t\"sync/atomic\"\n", "\t\"time\"\n", "\n", "\t\"github.com/hashicorp/terraform/communicator\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"strings\"\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "add", "edit_start_line_idx": 10 }
package aws import ( "errors" "fmt" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) func TestValidateIamGroupName(t *testing.T) { validNames := []string{ "test-group", "test_group", "testgroup123", "TestGroup", "Test-Group", "test.group", "test.123,group", "testgroup@hashicorp", "[email protected]", } for _, v := range validNames { _, errs := validateAwsIamGroupName(v, "name") if len(errs) != 0 { t.Fatalf("%q should be a valid IAM Group name: %q", v, errs) } } invalidNames := []string{ "!", "/", " ", ":", ";", "test name", "/slash-at-the-beginning", "slash-at-the-end/", } for _, v := range invalidNames { _, errs := validateAwsIamGroupName(v, "name") if len(errs) == 0 { t.Fatalf("%q should be an invalid IAM Group name", v) } } } func TestAccAWSIAMGroup_basic(t *testing.T) { var conf iam.GetGroupOutput rInt := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGroupDestroy, Steps: []resource.TestStep{ { Config: testAccAWSGroupConfig(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSGroupExists("aws_iam_group.group", &conf), testAccCheckAWSGroupAttributes(&conf, fmt.Sprintf("test-group-%d", rInt), "/"), ), }, { Config: testAccAWSGroupConfig2(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckAWSGroupExists("aws_iam_group.group2", &conf), testAccCheckAWSGroupAttributes(&conf, fmt.Sprintf("test-group-%d-2", rInt), "/funnypath/"), ), }, }, }) } func testAccCheckAWSGroupDestroy(s *terraform.State) error { iamconn := testAccProvider.Meta().(*AWSClient).iamconn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iam_group" { continue } // Try to get group _, err := iamconn.GetGroup(&iam.GetGroupInput{ GroupName: aws.String(rs.Primary.ID), }) if err == nil { return errors.New("still exist.") } // Verify the error is what we want ec2err, ok := err.(awserr.Error) if !ok { return err } if ec2err.Code() != "NoSuchEntity" { return err } } return nil } func testAccCheckAWSGroupExists(n string, res *iam.GetGroupOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } if rs.Primary.ID == "" { return errors.New("No Group name is set") } iamconn := testAccProvider.Meta().(*AWSClient).iamconn resp, err := iamconn.GetGroup(&iam.GetGroupInput{ GroupName: aws.String(rs.Primary.ID), }) if err != nil { return err } *res = *resp return nil } } func testAccCheckAWSGroupAttributes(group *iam.GetGroupOutput, name string, path string) resource.TestCheckFunc { return func(s *terraform.State) error { if *group.Group.GroupName != name { return fmt.Errorf("Bad name: %s when %s was expected", *group.Group.GroupName, name) } if *group.Group.Path != path { return fmt.Errorf("Bad path: %s when %s was expected", *group.Group.Path, path) } return nil } } func testAccAWSGroupConfig(rInt int) string { return fmt.Sprintf(` resource "aws_iam_group" "group" { name = "test-group-%d" path = "/" }`, rInt) } func testAccAWSGroupConfig2(rInt int) string { return fmt.Sprintf(` resource "aws_iam_group" "group2" { name = "test-group-%d-2" path = "/funnypath/" }`, rInt) }
builtin/providers/aws/resource_aws_iam_group_test.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0037352018989622593, 0.00045222166227176785, 0.00016496553143952042, 0.0001696542021818459, 0.0008706586668267846 ]
{ "id": 1, "code_window": [ "}\n", "\n", "// generateScripts takes the configuration and creates a script from each inline config\n", "func generateScripts(d *schema.ResourceData) ([]string, error) {\n", "\tvar scripts []string\n", "\tfor _, l := range d.Get(\"inline\").([]interface{}) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "\tvar lines []string\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "replace", "edit_start_line_idx": 80 }
package remoteexec import ( "bytes" "context" "fmt" "io" "io/ioutil" "log" "os" "sync/atomic" "time" "github.com/hashicorp/terraform/communicator" "github.com/hashicorp/terraform/communicator/remote" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/go-linereader" ) func Provisioner() terraform.ResourceProvisioner { return &schema.Provisioner{ Schema: map[string]*schema.Schema{ "inline": &schema.Schema{ Type: schema.TypeList, Elem: &schema.Schema{Type: schema.TypeString}, PromoteSingle: true, Optional: true, ConflictsWith: []string{"script", "scripts"}, }, "script": &schema.Schema{ Type: schema.TypeString, Optional: true, ConflictsWith: []string{"inline", "scripts"}, }, "scripts": &schema.Schema{ Type: schema.TypeList, Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, ConflictsWith: []string{"script", "inline"}, }, }, ApplyFunc: applyFn, } } // Apply executes the remote exec provisioner func applyFn(ctx context.Context) error { connState := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) data := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) // Get a new communicator comm, err := communicator.New(connState) if err != nil { return err } // Collect the scripts scripts, err := collectScripts(data) if err != nil { return err } for _, s := range scripts { defer s.Close() } // Copy and execute each script if err := runScripts(ctx, o, comm, scripts); err != nil { return err } return nil } // generateScripts takes the configuration and creates a script from each inline config func generateScripts(d *schema.ResourceData) ([]string, error) { var scripts []string for _, l := range d.Get("inline").([]interface{}) { scripts = append(scripts, l.(string)) } return scripts, nil } // collectScripts is used to collect all the scripts we need // to execute in preparation for copying them. func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) { // Check if inline if _, ok := d.GetOk("inline"); ok { scripts, err := generateScripts(d) if err != nil { return nil, err } var r []io.ReadCloser for _, script := range scripts { r = append(r, ioutil.NopCloser(bytes.NewReader([]byte(script)))) } return r, nil } // Collect scripts var scripts []string if script, ok := d.GetOk("script"); ok { scripts = append(scripts, script.(string)) } if scriptList, ok := d.GetOk("scripts"); ok { for _, script := range scriptList.([]interface{}) { scripts = append(scripts, script.(string)) } } // Open all the scripts var fhs []io.ReadCloser for _, s := range scripts { fh, err := os.Open(s) if err != nil { for _, fh := range fhs { fh.Close() } return nil, fmt.Errorf("Failed to open script '%s': %v", s, err) } fhs = append(fhs, fh) } // Done, return the file handles return fhs, nil } // runScripts is used to copy and execute a set of scripts func runScripts( ctx context.Context, o terraform.UIOutput, comm communicator.Communicator, scripts []io.ReadCloser) error { // Wrap out context in a cancelation function that we use to // kill the connection. ctx, cancelFunc := context.WithCancel(ctx) defer cancelFunc() // Wait for the context to end and then disconnect go func() { <-ctx.Done() comm.Disconnect() }() // Wait and retry until we establish the connection err := retryFunc(ctx, comm.Timeout(), func() error { err := comm.Connect(o) return err }) if err != nil { return err } for _, script := range scripts { var cmd *remote.Cmd outR, outW := io.Pipe() errR, errW := io.Pipe() outDoneCh := make(chan struct{}) errDoneCh := make(chan struct{}) go copyOutput(o, outR, outDoneCh) go copyOutput(o, errR, errDoneCh) remotePath := comm.ScriptPath() err = retryFunc(ctx, comm.Timeout(), func() error { if err := comm.UploadScript(remotePath, script); err != nil { return fmt.Errorf("Failed to upload script: %v", err) } cmd = &remote.Cmd{ Command: remotePath, Stdout: outW, Stderr: errW, } if err := comm.Start(cmd); err != nil { return fmt.Errorf("Error starting script: %v", err) } return nil }) if err == nil { cmd.Wait() if cmd.ExitStatus != 0 { err = fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) } } // If we have an error, end our context so the disconnect happens. // This has to happen before the output cleanup below since during // an interrupt this will cause the outputs to end. if err != nil { cancelFunc() } // Wait for output to clean up outW.Close() errW.Close() <-outDoneCh <-errDoneCh // Upload a blank follow up file in the same path to prevent residual // script contents from remaining on remote machine empty := bytes.NewReader([]byte("")) if err := comm.Upload(remotePath, empty); err != nil { // This feature is best-effort. log.Printf("[WARN] Failed to upload empty follow up script: %v", err) } // If we have an error, return it out now that we've cleaned up if err != nil { return err } } return nil } func copyOutput( o terraform.UIOutput, r io.Reader, doneCh chan<- struct{}) { defer close(doneCh) lr := linereader.New(r) for line := range lr.Ch { o.Output(line) } } // retryFunc is used to retry a function for a given duration func retryFunc(ctx context.Context, timeout time.Duration, f func() error) error { // Build a new context with the timeout ctx, done := context.WithTimeout(ctx, timeout) defer done() // Try the function in a goroutine var errVal atomic.Value doneCh := make(chan struct{}) go func() { defer close(doneCh) for { // If our context ended, we want to exit right away. select { case <-ctx.Done(): return default: } // Try the function call err := f() if err == nil { return } log.Printf("Retryable error: %v", err) errVal.Store(err) } }() // Wait for completion select { case <-doneCh: case <-ctx.Done(): } // Check if we have a context error to check if we're interrupted or timeout switch ctx.Err() { case context.Canceled: return fmt.Errorf("interrupted") case context.DeadlineExceeded: return fmt.Errorf("timeout") } // Check if we got an error executing if err, ok := errVal.Load().(error); ok { return err } return nil }
builtin/provisioners/remote-exec/resource_provisioner.go
1
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.9989187717437744, 0.24260897934436798, 0.00016770469665061682, 0.0005836967029608786, 0.4229618310928345 ]
{ "id": 1, "code_window": [ "}\n", "\n", "// generateScripts takes the configuration and creates a script from each inline config\n", "func generateScripts(d *schema.ResourceData) ([]string, error) {\n", "\tvar scripts []string\n", "\tfor _, l := range d.Get(\"inline\").([]interface{}) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "\tvar lines []string\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "replace", "edit_start_line_idx": 80 }
// generated by 'go run gen.go'; do not edit package oid const ( T_bool Oid = 16 T_bytea Oid = 17 T_char Oid = 18 T_name Oid = 19 T_int8 Oid = 20 T_int2 Oid = 21 T_int2vector Oid = 22 T_int4 Oid = 23 T_regproc Oid = 24 T_text Oid = 25 T_oid Oid = 26 T_tid Oid = 27 T_xid Oid = 28 T_cid Oid = 29 T_oidvector Oid = 30 T_pg_type Oid = 71 T_pg_attribute Oid = 75 T_pg_proc Oid = 81 T_pg_class Oid = 83 T_json Oid = 114 T_xml Oid = 142 T__xml Oid = 143 T_pg_node_tree Oid = 194 T__json Oid = 199 T_smgr Oid = 210 T_point Oid = 600 T_lseg Oid = 601 T_path Oid = 602 T_box Oid = 603 T_polygon Oid = 604 T_line Oid = 628 T__line Oid = 629 T_cidr Oid = 650 T__cidr Oid = 651 T_float4 Oid = 700 T_float8 Oid = 701 T_abstime Oid = 702 T_reltime Oid = 703 T_tinterval Oid = 704 T_unknown Oid = 705 T_circle Oid = 718 T__circle Oid = 719 T_money Oid = 790 T__money Oid = 791 T_macaddr Oid = 829 T_inet Oid = 869 T__bool Oid = 1000 T__bytea Oid = 1001 T__char Oid = 1002 T__name Oid = 1003 T__int2 Oid = 1005 T__int2vector Oid = 1006 T__int4 Oid = 1007 T__regproc Oid = 1008 T__text Oid = 1009 T__tid Oid = 1010 T__xid Oid = 1011 T__cid Oid = 1012 T__oidvector Oid = 1013 T__bpchar Oid = 1014 T__varchar Oid = 1015 T__int8 Oid = 1016 T__point Oid = 1017 T__lseg Oid = 1018 T__path Oid = 1019 T__box Oid = 1020 T__float4 Oid = 1021 T__float8 Oid = 1022 T__abstime Oid = 1023 T__reltime Oid = 1024 T__tinterval Oid = 1025 T__polygon Oid = 1027 T__oid Oid = 1028 T_aclitem Oid = 1033 T__aclitem Oid = 1034 T__macaddr Oid = 1040 T__inet Oid = 1041 T_bpchar Oid = 1042 T_varchar Oid = 1043 T_date Oid = 1082 T_time Oid = 1083 T_timestamp Oid = 1114 T__timestamp Oid = 1115 T__date Oid = 1182 T__time Oid = 1183 T_timestamptz Oid = 1184 T__timestamptz Oid = 1185 T_interval Oid = 1186 T__interval Oid = 1187 T__numeric Oid = 1231 T_pg_database Oid = 1248 T__cstring Oid = 1263 T_timetz Oid = 1266 T__timetz Oid = 1270 T_bit Oid = 1560 T__bit Oid = 1561 T_varbit Oid = 1562 T__varbit Oid = 1563 T_numeric Oid = 1700 T_refcursor Oid = 1790 T__refcursor Oid = 2201 T_regprocedure Oid = 2202 T_regoper Oid = 2203 T_regoperator Oid = 2204 T_regclass Oid = 2205 T_regtype Oid = 2206 T__regprocedure Oid = 2207 T__regoper Oid = 2208 T__regoperator Oid = 2209 T__regclass Oid = 2210 T__regtype Oid = 2211 T_record Oid = 2249 T_cstring Oid = 2275 T_any Oid = 2276 T_anyarray Oid = 2277 T_void Oid = 2278 T_trigger Oid = 2279 T_language_handler Oid = 2280 T_internal Oid = 2281 T_opaque Oid = 2282 T_anyelement Oid = 2283 T__record Oid = 2287 T_anynonarray Oid = 2776 T_pg_authid Oid = 2842 T_pg_auth_members Oid = 2843 T__txid_snapshot Oid = 2949 T_uuid Oid = 2950 T__uuid Oid = 2951 T_txid_snapshot Oid = 2970 T_fdw_handler Oid = 3115 T_anyenum Oid = 3500 T_tsvector Oid = 3614 T_tsquery Oid = 3615 T_gtsvector Oid = 3642 T__tsvector Oid = 3643 T__gtsvector Oid = 3644 T__tsquery Oid = 3645 T_regconfig Oid = 3734 T__regconfig Oid = 3735 T_regdictionary Oid = 3769 T__regdictionary Oid = 3770 T_anyrange Oid = 3831 T_event_trigger Oid = 3838 T_int4range Oid = 3904 T__int4range Oid = 3905 T_numrange Oid = 3906 T__numrange Oid = 3907 T_tsrange Oid = 3908 T__tsrange Oid = 3909 T_tstzrange Oid = 3910 T__tstzrange Oid = 3911 T_daterange Oid = 3912 T__daterange Oid = 3913 T_int8range Oid = 3926 T__int8range Oid = 3927 )
vendor/github.com/lib/pq/oid/types.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0001772409596014768, 0.00017243657202925533, 0.00016854985733516514, 0.00017264002235606313, 0.000002395904630247969 ]
{ "id": 1, "code_window": [ "}\n", "\n", "// generateScripts takes the configuration and creates a script from each inline config\n", "func generateScripts(d *schema.ResourceData) ([]string, error) {\n", "\tvar scripts []string\n", "\tfor _, l := range d.Get(\"inline\").([]interface{}) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "\tvar lines []string\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "replace", "edit_start_line_idx": 80 }
Mozilla Public License, version 2.0 1. Definitions 1.1. “Contributor” means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. “Contributor Version” means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor’s Contribution. 1.3. “Contribution” means Covered Software of a particular Contributor. 1.4. “Covered Software” means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. “Incompatible With Secondary Licenses” means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. “Executable Form” means any form of the work other than Source Code Form. 1.7. “Larger Work” means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. “License” means this document. 1.9. “Licensable” means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. “Modifications” means any of the following: a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. 1.11. “Patent Claims” of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. “Secondary License” means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. “Source Code Form” means the form of the work preferred for making modifications. 1.14. “You” (or “Your”) means an individual or a legal entity exercising rights under this License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or b. for infringements caused by: (i) Your and any other third party’s modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or c. under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients’ rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: a. such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and b. You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients’ rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. 6. Disclaimer of Warranty Covered Software is provided under this License on an “as is” basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. 7. Limitation of Liability Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party’s negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party’s ability to bring cross-claims or counter-claims. 9. Miscellaneous This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - “Incompatible With Secondary Licenses” Notice This Source Code Form is “Incompatible With Secondary Licenses”, as defined by the Mozilla Public License, v. 2.0.
LICENSE
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0001777049619704485, 0.00017405941616743803, 0.00016936440079007298, 0.00017387623665854335, 0.0000019511905975377886 ]
{ "id": 1, "code_window": [ "}\n", "\n", "// generateScripts takes the configuration and creates a script from each inline config\n", "func generateScripts(d *schema.ResourceData) ([]string, error) {\n", "\tvar scripts []string\n", "\tfor _, l := range d.Get(\"inline\").([]interface{}) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "\tvar lines []string\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "replace", "edit_start_line_idx": 80 }
--- layout: "dyn" page_title: "Provider: Dyn" sidebar_current: "docs-dyn-index" description: |- The Dyn provider is used to interact with the resources supported by Dyn. The provider needs to be configured with the proper credentials before it can be used. --- # Dyn Provider The Dyn provider is used to interact with the resources supported by Dyn. The provider needs to be configured with the proper credentials before it can be used. Use the navigation to the left to read about the available resources. ## Example Usage ``` # Configure the Dyn provider provider "dyn" { customer_name = "${var.dyn_customer_name}" username = "${var.dyn_username}" password = "${var.dyn_password}" } # Create a record resource "dyn_record" "www" { ... } ``` ## Argument Reference The following arguments are supported: * `customer_name` - (Required) The Dyn customer name. It must be provided, but it can also be sourced from the `DYN_CUSTOMER_NAME` environment variable. * `username` - (Required) The Dyn username. It must be provided, but it can also be sourced from the `DYN_USERNAME` environment variable. * `password` - (Required) The Dyn password. It must be provided, but it can also be sourced from the `DYN_PASSWORD` environment variable.
website/source/docs/providers/dyn/index.html.markdown
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.00017070195463020355, 0.00016711086209397763, 0.0001647214376134798, 0.0001665100280661136, 0.0000022150679797050543 ]
{ "id": 2, "code_window": [ "\tfor _, l := range d.Get(\"inline\").([]interface{}) {\n", "\t\tscripts = append(scripts, l.(string))\n", "\t}\n" ], "labels": [ "keep", "replace", "keep" ], "after_edit": [ "\t\tlines = append(lines, l.(string))\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "replace", "edit_start_line_idx": 82 }
package remoteexec import ( "bytes" "io" "strings" "testing" "reflect" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) func TestResourceProvider_Validate_good(t *testing.T) { c := testConfig(t, map[string]interface{}{ "inline": "echo foo", }) p := Provisioner() warn, errs := p.Validate(c) if len(warn) > 0 { t.Fatalf("Warnings: %v", warn) } if len(errs) > 0 { t.Fatalf("Errors: %v", errs) } } func TestResourceProvider_Validate_bad(t *testing.T) { c := testConfig(t, map[string]interface{}{ "invalid": "nope", }) p := Provisioner() warn, errs := p.Validate(c) if len(warn) > 0 { t.Fatalf("Warnings: %v", warn) } if len(errs) == 0 { t.Fatalf("Should have errors") } } var expectedScriptOut = `cd /tmp wget http://foobar exit 0 ` var expectedInlineScriptsOut = strings.Split(expectedScriptOut, "\n") func TestResourceProvider_generateScript(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "inline": []interface{}{ "cd /tmp", "wget http://foobar", "exit 0", }, } out, err := generateScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if reflect.DeepEqual(out, expectedInlineScriptsOut) { t.Fatalf("bad: %v", out) } } func TestResourceProvider_CollectScripts_inline(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "inline": []interface{}{ "cd /tmp", "wget http://foobar", "exit 0", }, } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 3 { t.Fatalf("bad: %v", scripts) } for i, script := range scripts { var out bytes.Buffer _, err = io.Copy(&out, script) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedInlineScriptsOut[i] { t.Fatalf("bad: %v", out.String()) } } } func TestResourceProvider_CollectScripts_script(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "script": "test-fixtures/script1.sh", } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 1 { t.Fatalf("bad: %v", scripts) } var out bytes.Buffer _, err = io.Copy(&out, scripts[0]) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedScriptOut { t.Fatalf("bad: %v", out.String()) } } func TestResourceProvider_CollectScripts_scripts(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "scripts": []interface{}{ "test-fixtures/script1.sh", "test-fixtures/script1.sh", "test-fixtures/script1.sh", }, } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 3 { t.Fatalf("bad: %v", scripts) } for idx := range scripts { var out bytes.Buffer _, err = io.Copy(&out, scripts[idx]) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedScriptOut { t.Fatalf("bad: %v", out.String()) } } } func testConfig( t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { r, err := config.NewRawConfig(c) if err != nil { t.Fatalf("bad: %s", err) } return terraform.NewResourceConfig(r) }
builtin/provisioners/remote-exec/resource_provisioner_test.go
1
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.9926562309265137, 0.2260279655456543, 0.000168900573044084, 0.0038997074589133263, 0.4084044396877289 ]
{ "id": 2, "code_window": [ "\tfor _, l := range d.Get(\"inline\").([]interface{}) {\n", "\t\tscripts = append(scripts, l.(string))\n", "\t}\n" ], "labels": [ "keep", "replace", "keep" ], "after_edit": [ "\t\tlines = append(lines, l.(string))\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "replace", "edit_start_line_idx": 82 }
--- layout: "docs" page_title: "Configuring Atlas" sidebar_current: "docs-config-atlas" description: |- Atlas is the ideal way to use Terraform in a team environment. Atlas will run Terraform for you, safely handle parallelization across different team members, save run history along with plans, and more. --- # Atlas Configuration Terraform can be configured to be able to upload to HashiCorp's [Atlas](https://atlas.hashicorp.com). This configuration doesn't change the behavior of Terraform itself, it only configures your Terraform configuration to support being uploaded to Atlas via the [push command](/docs/commands/push.html). For more information on the benefits of uploading your Terraform configuration to Atlas, please see the [push command documentation](/docs/commands/push.html). This page assumes you're familiar with the [configuration syntax](/docs/configuration/syntax.html) already. ## Example Atlas configuration looks like the following: ``` atlas { name = "mitchellh/production-example" } ``` ## Description The `atlas` block configures the settings when Terraform is [pushed](/docs/commands/push.html) to Atlas. Only one `atlas` block is allowed. Within the block (the `{ }`) is configuration for Atlas uploading. No keys are required, but the key typically set is `name`. **No value within the `atlas` block can use interpolations.** Due to the nature of this configuration, interpolations are not possible. If you want to parameterize these settings, use the Atlas block to set defaults, then use the command-line flags of the [push command](/docs/commands/push.html) to override. ## Syntax The full syntax is: ``` atlas { name = VALUE } ```
website/source/docs/configuration/atlas.html.md
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0001734045217745006, 0.00016902123752515763, 0.00016579673683736473, 0.0001692485820967704, 0.0000025608142095734365 ]
{ "id": 2, "code_window": [ "\tfor _, l := range d.Get(\"inline\").([]interface{}) {\n", "\t\tscripts = append(scripts, l.(string))\n", "\t}\n" ], "labels": [ "keep", "replace", "keep" ], "after_edit": [ "\t\tlines = append(lines, l.(string))\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "replace", "edit_start_line_idx": 82 }
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package inspector import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) // Amazon Inspector enables you to analyze the behavior of your AWS resources // and to identify potential security issues. For more information, see Amazon // Inspector User Guide (http://docs.aws.amazon.com/inspector/latest/userguide/inspector_introduction.html). // The service client's operations are safe to be used concurrently. // It is not safe to mutate any of the client's properties though. // Please also see https://docs.aws.amazon.com/goto/WebAPI/inspector-2016-02-16 type Inspector struct { *client.Client } // Used for custom client initialization logic var initClient func(*client.Client) // Used for custom request initialization logic var initRequest func(*request.Request) // Service information constants const ( ServiceName = "inspector" // Service endpoint prefix API calls made to. EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. ) // New creates a new instance of the Inspector client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // // Create a Inspector client from just a session. // svc := inspector.New(mySession) // // // Create a Inspector client with additional configuration // svc := inspector.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Inspector { c := p.ClientConfig(EndpointsID, cfgs...) return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Inspector { svc := &Inspector{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: ServiceName, SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, APIVersion: "2016-02-16", JSONVersion: "1.1", TargetPrefix: "InspectorService", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) // Run custom client initialization if present if initClient != nil { initClient(svc.Client) } return svc } // newRequest creates a new request for a Inspector operation and runs any // custom request initialization. func (c *Inspector) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) // Run custom request initialization if present if initRequest != nil { initRequest(req) } return req }
vendor/github.com/aws/aws-sdk-go/service/inspector/service.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.00017582502914592624, 0.0001695698592811823, 0.0001645663141971454, 0.00016942454385571182, 0.0000032638220091030234 ]
{ "id": 2, "code_window": [ "\tfor _, l := range d.Get(\"inline\").([]interface{}) {\n", "\t\tscripts = append(scripts, l.(string))\n", "\t}\n" ], "labels": [ "keep", "replace", "keep" ], "after_edit": [ "\t\tlines = append(lines, l.(string))\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "replace", "edit_start_line_idx": 82 }
package client import ( "fmt" "net/http/httputil" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" ) // A Config provides configuration to a service client instance. type Config struct { Config *aws.Config Handlers request.Handlers Endpoint string SigningRegion string SigningName string } // ConfigProvider provides a generic way for a service client to receive // the ClientConfig without circular dependencies. type ConfigProvider interface { ClientConfig(serviceName string, cfgs ...*aws.Config) Config } // A Client implements the base client request and response handling // used by all service clients. type Client struct { request.Retryer metadata.ClientInfo Config aws.Config Handlers request.Handlers } // New will return a pointer to a new initialized service client. func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { svc := &Client{ Config: cfg, ClientInfo: info, Handlers: handlers, } switch retryer, ok := cfg.Retryer.(request.Retryer); { case ok: svc.Retryer = retryer case cfg.Retryer != nil && cfg.Logger != nil: s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) cfg.Logger.Log(s) fallthrough default: maxRetries := aws.IntValue(cfg.MaxRetries) if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { maxRetries = 3 } svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} } svc.AddDebugHandlers() for _, option := range options { option(svc) } return svc } // NewRequest returns a new Request pointer for the service API // operation and parameters. func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) } // AddDebugHandlers injects debug logging handlers into the service to log request // debug information. func (c *Client) AddDebugHandlers() { if !c.Config.LogLevel.AtLeast(aws.LogDebug) { return } c.Handlers.Send.PushFront(logRequest) c.Handlers.Send.PushBack(logResponse) } const logReqMsg = `DEBUG: Request %s/%s Details: ---[ REQUEST POST-SIGN ]----------------------------- %s -----------------------------------------------------` const logReqErrMsg = `DEBUG ERROR: Request %s/%s: ---[ REQUEST DUMP ERROR ]----------------------------- %s -----------------------------------------------------` func logRequest(r *request.Request) { logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) if err != nil { r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) return } if logBody { // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's // Body as a NoOpCloser and will not be reset after read by the HTTP // client reader. r.ResetBody() } r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) } const logRespMsg = `DEBUG: Response %s/%s Details: ---[ RESPONSE ]-------------------------------------- %s -----------------------------------------------------` const logRespErrMsg = `DEBUG ERROR: Response %s/%s: ---[ RESPONSE DUMP ERROR ]----------------------------- %s -----------------------------------------------------` func logResponse(r *request.Request) { var msg = "no response data" if r.HTTPResponse != nil { logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody) if err != nil { r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) return } msg = string(dumpedBody) } else if r.Error != nil { msg = r.Error.Error() } r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) }
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0012990389950573444, 0.00025255067157559097, 0.00016440100444015115, 0.00016736533143557608, 0.00029065701528452337 ]
{ "id": 3, "code_window": [ "\t}\n", "\treturn scripts, nil\n", "}\n", "\n", "// collectScripts is used to collect all the scripts we need\n", "// to execute in preparation for copying them.\n", "func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) {\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tlines = append(lines, \"\")\n", "\n", "\treturn []string{strings.Join(lines, \"\\n\")}, nil\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "replace", "edit_start_line_idx": 84 }
package remoteexec import ( "bytes" "io" "strings" "testing" "reflect" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) func TestResourceProvider_Validate_good(t *testing.T) { c := testConfig(t, map[string]interface{}{ "inline": "echo foo", }) p := Provisioner() warn, errs := p.Validate(c) if len(warn) > 0 { t.Fatalf("Warnings: %v", warn) } if len(errs) > 0 { t.Fatalf("Errors: %v", errs) } } func TestResourceProvider_Validate_bad(t *testing.T) { c := testConfig(t, map[string]interface{}{ "invalid": "nope", }) p := Provisioner() warn, errs := p.Validate(c) if len(warn) > 0 { t.Fatalf("Warnings: %v", warn) } if len(errs) == 0 { t.Fatalf("Should have errors") } } var expectedScriptOut = `cd /tmp wget http://foobar exit 0 ` var expectedInlineScriptsOut = strings.Split(expectedScriptOut, "\n") func TestResourceProvider_generateScript(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "inline": []interface{}{ "cd /tmp", "wget http://foobar", "exit 0", }, } out, err := generateScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if reflect.DeepEqual(out, expectedInlineScriptsOut) { t.Fatalf("bad: %v", out) } } func TestResourceProvider_CollectScripts_inline(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "inline": []interface{}{ "cd /tmp", "wget http://foobar", "exit 0", }, } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 3 { t.Fatalf("bad: %v", scripts) } for i, script := range scripts { var out bytes.Buffer _, err = io.Copy(&out, script) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedInlineScriptsOut[i] { t.Fatalf("bad: %v", out.String()) } } } func TestResourceProvider_CollectScripts_script(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "script": "test-fixtures/script1.sh", } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 1 { t.Fatalf("bad: %v", scripts) } var out bytes.Buffer _, err = io.Copy(&out, scripts[0]) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedScriptOut { t.Fatalf("bad: %v", out.String()) } } func TestResourceProvider_CollectScripts_scripts(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "scripts": []interface{}{ "test-fixtures/script1.sh", "test-fixtures/script1.sh", "test-fixtures/script1.sh", }, } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 3 { t.Fatalf("bad: %v", scripts) } for idx := range scripts { var out bytes.Buffer _, err = io.Copy(&out, scripts[idx]) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedScriptOut { t.Fatalf("bad: %v", out.String()) } } } func testConfig( t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { r, err := config.NewRawConfig(c) if err != nil { t.Fatalf("bad: %s", err) } return terraform.NewResourceConfig(r) }
builtin/provisioners/remote-exec/resource_provisioner_test.go
1
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.999350368976593, 0.2995012402534485, 0.00016848479572217911, 0.042022816836833954, 0.3954034447669983 ]
{ "id": 3, "code_window": [ "\t}\n", "\treturn scripts, nil\n", "}\n", "\n", "// collectScripts is used to collect all the scripts we need\n", "// to execute in preparation for copying them.\n", "func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) {\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tlines = append(lines, \"\")\n", "\n", "\treturn []string{strings.Join(lines, \"\\n\")}, nil\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "replace", "edit_start_line_idx": 84 }
{ "resource" : { "aws_security_group" : { "allow_external_http_https" : { "tags" : { "Name" : "allow_external_http_https" } } } } }
config/test-fixtures/resource-no-name.tf.json
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.00017338634643238038, 0.0001712751400191337, 0.00016916394815780222, 0.0001712751400191337, 0.000002111199137289077 ]
{ "id": 3, "code_window": [ "\t}\n", "\treturn scripts, nil\n", "}\n", "\n", "// collectScripts is used to collect all the scripts we need\n", "// to execute in preparation for copying them.\n", "func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) {\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tlines = append(lines, \"\")\n", "\n", "\treturn []string{strings.Join(lines, \"\\n\")}, nil\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "replace", "edit_start_line_idx": 84 }
package client const ( GLOBAL_LOAD_BALANCER_TYPE = "globalLoadBalancer" ) type GlobalLoadBalancer struct { Resource AccountId string `json:"accountId,omitempty" yaml:"account_id,omitempty"` Created string `json:"created,omitempty" yaml:"created,omitempty"` Data map[string]interface{} `json:"data,omitempty" yaml:"data,omitempty"` Description string `json:"description,omitempty" yaml:"description,omitempty"` GlobalLoadBalancerHealthCheck []interface{} `json:"globalLoadBalancerHealthCheck,omitempty" yaml:"global_load_balancer_health_check,omitempty"` GlobalLoadBalancerPolicy []interface{} `json:"globalLoadBalancerPolicy,omitempty" yaml:"global_load_balancer_policy,omitempty"` Kind string `json:"kind,omitempty" yaml:"kind,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` RemoveTime string `json:"removeTime,omitempty" yaml:"remove_time,omitempty"` Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` State string `json:"state,omitempty" yaml:"state,omitempty"` Transitioning string `json:"transitioning,omitempty" yaml:"transitioning,omitempty"` TransitioningMessage string `json:"transitioningMessage,omitempty" yaml:"transitioning_message,omitempty"` TransitioningProgress int64 `json:"transitioningProgress,omitempty" yaml:"transitioning_progress,omitempty"` Uuid string `json:"uuid,omitempty" yaml:"uuid,omitempty"` } type GlobalLoadBalancerCollection struct { Collection Data []GlobalLoadBalancer `json:"data,omitempty"` } type GlobalLoadBalancerClient struct { rancherClient *RancherClient } type GlobalLoadBalancerOperations interface { List(opts *ListOpts) (*GlobalLoadBalancerCollection, error) Create(opts *GlobalLoadBalancer) (*GlobalLoadBalancer, error) Update(existing *GlobalLoadBalancer, updates interface{}) (*GlobalLoadBalancer, error) ById(id string) (*GlobalLoadBalancer, error) Delete(container *GlobalLoadBalancer) error ActionAddloadbalancer(*GlobalLoadBalancer, *AddLoadBalancerInput) (*GlobalLoadBalancer, error) ActionCreate(*GlobalLoadBalancer) (*GlobalLoadBalancer, error) ActionRemove(*GlobalLoadBalancer) (*GlobalLoadBalancer, error) ActionRemoveloadbalancer(*GlobalLoadBalancer, *RemoveLoadBalancerInput) (*GlobalLoadBalancer, error) } func newGlobalLoadBalancerClient(rancherClient *RancherClient) *GlobalLoadBalancerClient { return &GlobalLoadBalancerClient{ rancherClient: rancherClient, } } func (c *GlobalLoadBalancerClient) Create(container *GlobalLoadBalancer) (*GlobalLoadBalancer, error) { resp := &GlobalLoadBalancer{} err := c.rancherClient.doCreate(GLOBAL_LOAD_BALANCER_TYPE, container, resp) return resp, err } func (c *GlobalLoadBalancerClient) Update(existing *GlobalLoadBalancer, updates interface{}) (*GlobalLoadBalancer, error) { resp := &GlobalLoadBalancer{} err := c.rancherClient.doUpdate(GLOBAL_LOAD_BALANCER_TYPE, &existing.Resource, updates, resp) return resp, err } func (c *GlobalLoadBalancerClient) List(opts *ListOpts) (*GlobalLoadBalancerCollection, error) { resp := &GlobalLoadBalancerCollection{} err := c.rancherClient.doList(GLOBAL_LOAD_BALANCER_TYPE, opts, resp) return resp, err } func (c *GlobalLoadBalancerClient) ById(id string) (*GlobalLoadBalancer, error) { resp := &GlobalLoadBalancer{} err := c.rancherClient.doById(GLOBAL_LOAD_BALANCER_TYPE, id, resp) if apiError, ok := err.(*ApiError); ok { if apiError.StatusCode == 404 { return nil, nil } } return resp, err } func (c *GlobalLoadBalancerClient) Delete(container *GlobalLoadBalancer) error { return c.rancherClient.doResourceDelete(GLOBAL_LOAD_BALANCER_TYPE, &container.Resource) } func (c *GlobalLoadBalancerClient) ActionAddloadbalancer(resource *GlobalLoadBalancer, input *AddLoadBalancerInput) (*GlobalLoadBalancer, error) { resp := &GlobalLoadBalancer{} err := c.rancherClient.doAction(GLOBAL_LOAD_BALANCER_TYPE, "addloadbalancer", &resource.Resource, input, resp) return resp, err } func (c *GlobalLoadBalancerClient) ActionCreate(resource *GlobalLoadBalancer) (*GlobalLoadBalancer, error) { resp := &GlobalLoadBalancer{} err := c.rancherClient.doAction(GLOBAL_LOAD_BALANCER_TYPE, "create", &resource.Resource, nil, resp) return resp, err } func (c *GlobalLoadBalancerClient) ActionRemove(resource *GlobalLoadBalancer) (*GlobalLoadBalancer, error) { resp := &GlobalLoadBalancer{} err := c.rancherClient.doAction(GLOBAL_LOAD_BALANCER_TYPE, "remove", &resource.Resource, nil, resp) return resp, err } func (c *GlobalLoadBalancerClient) ActionRemoveloadbalancer(resource *GlobalLoadBalancer, input *RemoveLoadBalancerInput) (*GlobalLoadBalancer, error) { resp := &GlobalLoadBalancer{} err := c.rancherClient.doAction(GLOBAL_LOAD_BALANCER_TYPE, "removeloadbalancer", &resource.Resource, input, resp) return resp, err }
vendor/github.com/rancher/go-rancher/client/generated_global_load_balancer.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0003617508918978274, 0.00020492561452556401, 0.00016329552454408258, 0.00018630977137945592, 0.000052600687922677025 ]
{ "id": 3, "code_window": [ "\t}\n", "\treturn scripts, nil\n", "}\n", "\n", "// collectScripts is used to collect all the scripts we need\n", "// to execute in preparation for copying them.\n", "func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) {\n" ], "labels": [ "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tlines = append(lines, \"\")\n", "\n", "\treturn []string{strings.Join(lines, \"\\n\")}, nil\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner.go", "type": "replace", "edit_start_line_idx": 84 }
// Copyright 2015 go-dockerclient authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package docker import ( "encoding/json" "errors" "fmt" "io" "math" "net" "net/http" "net/http/httputil" "sync" "sync/atomic" "time" ) // APIEvents represents events coming from the Docker API // The fields in the Docker API changed in API version 1.22, and // events for more than images and containers are now fired off. // To maintain forward and backward compatibility, go-dockerclient // replicates the event in both the new and old format as faithfully as possible. // // For events that only exist in 1.22 in later, `Status` is filled in as // `"Type:Action"` instead of just `Action` to allow for older clients to // differentiate and not break if they rely on the pre-1.22 Status types. // // The transformEvent method can be consulted for more information about how // events are translated from new/old API formats type APIEvents struct { // New API Fields in 1.22 Action string `json:"action,omitempty"` Type string `json:"type,omitempty"` Actor APIActor `json:"actor,omitempty"` // Old API fields for < 1.22 Status string `json:"status,omitempty"` ID string `json:"id,omitempty"` From string `json:"from,omitempty"` // Fields in both Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` } // APIActor represents an actor that accomplishes something for an event type APIActor struct { ID string `json:"id,omitempty"` Attributes map[string]string `json:"attributes,omitempty"` } type eventMonitoringState struct { sync.RWMutex sync.WaitGroup enabled bool lastSeen int64 C chan *APIEvents errC chan error listeners []chan<- *APIEvents } const ( maxMonitorConnRetries = 5 retryInitialWaitTime = 10. ) var ( // ErrNoListeners is the error returned when no listeners are available // to receive an event. ErrNoListeners = errors.New("no listeners present to receive event") // ErrListenerAlreadyExists is the error returned when the listerner already // exists. ErrListenerAlreadyExists = errors.New("listener already exists for docker events") // EOFEvent is sent when the event listener receives an EOF error. EOFEvent = &APIEvents{ Type: "EOF", Status: "EOF", } ) // AddEventListener adds a new listener to container events in the Docker API. // // The parameter is a channel through which events will be sent. func (c *Client) AddEventListener(listener chan<- *APIEvents) error { var err error if !c.eventMonitor.isEnabled() { err = c.eventMonitor.enableEventMonitoring(c) if err != nil { return err } } err = c.eventMonitor.addListener(listener) if err != nil { return err } return nil } // RemoveEventListener removes a listener from the monitor. func (c *Client) RemoveEventListener(listener chan *APIEvents) error { err := c.eventMonitor.removeListener(listener) if err != nil { return err } if len(c.eventMonitor.listeners) == 0 { c.eventMonitor.disableEventMonitoring() } return nil } func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error { eventState.Lock() defer eventState.Unlock() if listenerExists(listener, &eventState.listeners) { return ErrListenerAlreadyExists } eventState.Add(1) eventState.listeners = append(eventState.listeners, listener) return nil } func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error { eventState.Lock() defer eventState.Unlock() if listenerExists(listener, &eventState.listeners) { var newListeners []chan<- *APIEvents for _, l := range eventState.listeners { if l != listener { newListeners = append(newListeners, l) } } eventState.listeners = newListeners eventState.Add(-1) } return nil } func (eventState *eventMonitoringState) closeListeners() { for _, l := range eventState.listeners { close(l) eventState.Add(-1) } eventState.listeners = nil } func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool { for _, b := range *list { if b == a { return true } } return false } func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error { eventState.Lock() defer eventState.Unlock() if !eventState.enabled { eventState.enabled = true atomic.StoreInt64(&eventState.lastSeen, 0) eventState.C = make(chan *APIEvents, 100) eventState.errC = make(chan error, 1) go eventState.monitorEvents(c) } return nil } func (eventState *eventMonitoringState) disableEventMonitoring() error { eventState.Lock() defer eventState.Unlock() eventState.closeListeners() eventState.Wait() if eventState.enabled { eventState.enabled = false close(eventState.C) close(eventState.errC) } return nil } func (eventState *eventMonitoringState) monitorEvents(c *Client) { var err error for eventState.noListeners() { time.Sleep(10 * time.Millisecond) } if err = eventState.connectWithRetry(c); err != nil { // terminate if connect failed eventState.disableEventMonitoring() return } for eventState.isEnabled() { timeout := time.After(100 * time.Millisecond) select { case ev, ok := <-eventState.C: if !ok { return } if ev == EOFEvent { eventState.disableEventMonitoring() return } eventState.updateLastSeen(ev) go eventState.sendEvent(ev) case err = <-eventState.errC: if err == ErrNoListeners { eventState.disableEventMonitoring() return } else if err != nil { defer func() { go eventState.monitorEvents(c) }() return } case <-timeout: continue } } } func (eventState *eventMonitoringState) connectWithRetry(c *Client) error { var retries int eventState.RLock() eventChan := eventState.C errChan := eventState.errC eventState.RUnlock() err := c.eventHijack(atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan) for ; err != nil && retries < maxMonitorConnRetries; retries++ { waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries))) time.Sleep(time.Duration(waitTime) * time.Millisecond) eventState.RLock() eventChan = eventState.C errChan = eventState.errC eventState.RUnlock() err = c.eventHijack(atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan) } return err } func (eventState *eventMonitoringState) noListeners() bool { eventState.RLock() defer eventState.RUnlock() return len(eventState.listeners) == 0 } func (eventState *eventMonitoringState) isEnabled() bool { eventState.RLock() defer eventState.RUnlock() return eventState.enabled } func (eventState *eventMonitoringState) sendEvent(event *APIEvents) { eventState.RLock() defer eventState.RUnlock() eventState.Add(1) defer eventState.Done() if eventState.enabled { if len(eventState.listeners) == 0 { eventState.errC <- ErrNoListeners return } for _, listener := range eventState.listeners { listener <- event } } } func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) { eventState.Lock() defer eventState.Unlock() if atomic.LoadInt64(&eventState.lastSeen) < e.Time { atomic.StoreInt64(&eventState.lastSeen, e.Time) } } func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error { uri := "/events" if startTime != 0 { uri += fmt.Sprintf("?since=%d", startTime) } protocol := c.endpointURL.Scheme address := c.endpointURL.Path if protocol != "unix" { protocol = "tcp" address = c.endpointURL.Host } var dial net.Conn var err error if c.TLSConfig == nil { dial, err = c.Dialer.Dial(protocol, address) } else { dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig) } if err != nil { return err } conn := httputil.NewClientConn(dial, nil) req, err := http.NewRequest("GET", uri, nil) if err != nil { return err } res, err := conn.Do(req) if err != nil { return err } go func(res *http.Response, conn *httputil.ClientConn) { defer conn.Close() defer res.Body.Close() decoder := json.NewDecoder(res.Body) for { var event APIEvents if err = decoder.Decode(&event); err != nil { if err == io.EOF || err == io.ErrUnexpectedEOF { c.eventMonitor.RLock() if c.eventMonitor.enabled && c.eventMonitor.C == eventChan { // Signal that we're exiting. eventChan <- EOFEvent } c.eventMonitor.RUnlock() break } errChan <- err } if event.Time == 0 { continue } if !c.eventMonitor.isEnabled() || c.eventMonitor.C != eventChan { return } transformEvent(&event) eventChan <- &event } }(res, conn) return nil } // transformEvent takes an event and determines what version it is from // then populates both versions of the event func transformEvent(event *APIEvents) { // if event version is <= 1.21 there will be no Action and no Type if event.Action == "" && event.Type == "" { event.Action = event.Status event.Actor.ID = event.ID event.Actor.Attributes = map[string]string{} switch event.Status { case "delete", "import", "pull", "push", "tag", "untag": event.Type = "image" default: event.Type = "container" if event.From != "" { event.Actor.Attributes["image"] = event.From } } } else { if event.Status == "" { if event.Type == "image" || event.Type == "container" { event.Status = event.Action } else { // Because just the Status has been overloaded with different Types // if an event is not for an image or a container, we prepend the type // to avoid problems for people relying on actions being only for // images and containers event.Status = event.Type + ":" + event.Action } } if event.ID == "" { event.ID = event.Actor.ID } if event.From == "" { event.From = event.Actor.Attributes["image"] } } }
vendor/github.com/fsouza/go-dockerclient/event.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0008307092939503491, 0.00019213849736843258, 0.00016637174121569842, 0.00017044480773620307, 0.00010621104593155906 ]
{ "id": 4, "code_window": [ "package remoteexec\n", "\n", "import (\n", "\t\"bytes\"\n", "\t\"io\"\n", "\t\"strings\"\n", "\t\"testing\"\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 5 }
package remoteexec import ( "bytes" "context" "fmt" "io" "io/ioutil" "log" "os" "sync/atomic" "time" "github.com/hashicorp/terraform/communicator" "github.com/hashicorp/terraform/communicator/remote" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/go-linereader" ) func Provisioner() terraform.ResourceProvisioner { return &schema.Provisioner{ Schema: map[string]*schema.Schema{ "inline": &schema.Schema{ Type: schema.TypeList, Elem: &schema.Schema{Type: schema.TypeString}, PromoteSingle: true, Optional: true, ConflictsWith: []string{"script", "scripts"}, }, "script": &schema.Schema{ Type: schema.TypeString, Optional: true, ConflictsWith: []string{"inline", "scripts"}, }, "scripts": &schema.Schema{ Type: schema.TypeList, Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, ConflictsWith: []string{"script", "inline"}, }, }, ApplyFunc: applyFn, } } // Apply executes the remote exec provisioner func applyFn(ctx context.Context) error { connState := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) data := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) // Get a new communicator comm, err := communicator.New(connState) if err != nil { return err } // Collect the scripts scripts, err := collectScripts(data) if err != nil { return err } for _, s := range scripts { defer s.Close() } // Copy and execute each script if err := runScripts(ctx, o, comm, scripts); err != nil { return err } return nil } // generateScripts takes the configuration and creates a script from each inline config func generateScripts(d *schema.ResourceData) ([]string, error) { var scripts []string for _, l := range d.Get("inline").([]interface{}) { scripts = append(scripts, l.(string)) } return scripts, nil } // collectScripts is used to collect all the scripts we need // to execute in preparation for copying them. func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) { // Check if inline if _, ok := d.GetOk("inline"); ok { scripts, err := generateScripts(d) if err != nil { return nil, err } var r []io.ReadCloser for _, script := range scripts { r = append(r, ioutil.NopCloser(bytes.NewReader([]byte(script)))) } return r, nil } // Collect scripts var scripts []string if script, ok := d.GetOk("script"); ok { scripts = append(scripts, script.(string)) } if scriptList, ok := d.GetOk("scripts"); ok { for _, script := range scriptList.([]interface{}) { scripts = append(scripts, script.(string)) } } // Open all the scripts var fhs []io.ReadCloser for _, s := range scripts { fh, err := os.Open(s) if err != nil { for _, fh := range fhs { fh.Close() } return nil, fmt.Errorf("Failed to open script '%s': %v", s, err) } fhs = append(fhs, fh) } // Done, return the file handles return fhs, nil } // runScripts is used to copy and execute a set of scripts func runScripts( ctx context.Context, o terraform.UIOutput, comm communicator.Communicator, scripts []io.ReadCloser) error { // Wrap out context in a cancelation function that we use to // kill the connection. ctx, cancelFunc := context.WithCancel(ctx) defer cancelFunc() // Wait for the context to end and then disconnect go func() { <-ctx.Done() comm.Disconnect() }() // Wait and retry until we establish the connection err := retryFunc(ctx, comm.Timeout(), func() error { err := comm.Connect(o) return err }) if err != nil { return err } for _, script := range scripts { var cmd *remote.Cmd outR, outW := io.Pipe() errR, errW := io.Pipe() outDoneCh := make(chan struct{}) errDoneCh := make(chan struct{}) go copyOutput(o, outR, outDoneCh) go copyOutput(o, errR, errDoneCh) remotePath := comm.ScriptPath() err = retryFunc(ctx, comm.Timeout(), func() error { if err := comm.UploadScript(remotePath, script); err != nil { return fmt.Errorf("Failed to upload script: %v", err) } cmd = &remote.Cmd{ Command: remotePath, Stdout: outW, Stderr: errW, } if err := comm.Start(cmd); err != nil { return fmt.Errorf("Error starting script: %v", err) } return nil }) if err == nil { cmd.Wait() if cmd.ExitStatus != 0 { err = fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) } } // If we have an error, end our context so the disconnect happens. // This has to happen before the output cleanup below since during // an interrupt this will cause the outputs to end. if err != nil { cancelFunc() } // Wait for output to clean up outW.Close() errW.Close() <-outDoneCh <-errDoneCh // Upload a blank follow up file in the same path to prevent residual // script contents from remaining on remote machine empty := bytes.NewReader([]byte("")) if err := comm.Upload(remotePath, empty); err != nil { // This feature is best-effort. log.Printf("[WARN] Failed to upload empty follow up script: %v", err) } // If we have an error, return it out now that we've cleaned up if err != nil { return err } } return nil } func copyOutput( o terraform.UIOutput, r io.Reader, doneCh chan<- struct{}) { defer close(doneCh) lr := linereader.New(r) for line := range lr.Ch { o.Output(line) } } // retryFunc is used to retry a function for a given duration func retryFunc(ctx context.Context, timeout time.Duration, f func() error) error { // Build a new context with the timeout ctx, done := context.WithTimeout(ctx, timeout) defer done() // Try the function in a goroutine var errVal atomic.Value doneCh := make(chan struct{}) go func() { defer close(doneCh) for { // If our context ended, we want to exit right away. select { case <-ctx.Done(): return default: } // Try the function call err := f() if err == nil { return } log.Printf("Retryable error: %v", err) errVal.Store(err) } }() // Wait for completion select { case <-doneCh: case <-ctx.Done(): } // Check if we have a context error to check if we're interrupted or timeout switch ctx.Err() { case context.Canceled: return fmt.Errorf("interrupted") case context.DeadlineExceeded: return fmt.Errorf("timeout") } // Check if we got an error executing if err, ok := errVal.Load().(error); ok { return err } return nil }
builtin/provisioners/remote-exec/resource_provisioner.go
1
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.006301620043814182, 0.00039328334969468415, 0.0001664598094066605, 0.00017304957145825028, 0.0011167842894792557 ]
{ "id": 4, "code_window": [ "package remoteexec\n", "\n", "import (\n", "\t\"bytes\"\n", "\t\"io\"\n", "\t\"strings\"\n", "\t\"testing\"\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 5 }
package librato import ( "fmt" "net/http" ) // AlertsService handles communication with the Librato API methods related to // alerts. type AlertsService struct { client *Client } // Alert represents a Librato Alert. type Alert struct { Name *string `json:"name"` ID *uint `json:"id,omitempty"` Conditions []AlertCondition `json:"conditions,omitempty"` // These are interface{} because the Librato API asks for integers // on Create and returns hashes on Get Services interface{} `json:"services,omitempty"` Attributes *AlertAttributes `json:"attributes,omitempty"` Description *string `json:"description,omitempty"` Active *bool `json:"active,omitempty"` RearmSeconds *uint `json:"rearm_seconds,omitempty"` } func (a Alert) String() string { return Stringify(a) } // AlertCondition represents an alert trigger condition. type AlertCondition struct { Type *string `json:"type,omitempty"` MetricName *string `json:"metric_name,omitempty"` Source *string `json:"source,omitempty"` DetectReset *bool `json:"detect_reset,omitempty"` Threshold *float64 `json:"threshold,omitempty"` SummaryFunction *string `json:"summary_function,omitempty"` Duration *uint `json:"duration,omitempty"` } // AlertAttributes represents the attributes of an alert. type AlertAttributes struct { RunbookURL *string `json:"runbook_url,omitempty"` } // Get an alert by ID // // Librato API docs: https://www.librato.com/docs/api/#retrieve-alert-by-id func (a *AlertsService) Get(id uint) (*Alert, *http.Response, error) { urlStr := fmt.Sprintf("alerts/%d", id) req, err := a.client.NewRequest("GET", urlStr, nil) if err != nil { return nil, nil, err } alert := new(Alert) resp, err := a.client.Do(req, alert) if err != nil { return nil, resp, err } return alert, resp, err } // Create an alert // // Librato API docs: https://www.librato.com/docs/api/?shell#create-an-alert func (a *AlertsService) Create(alert *Alert) (*Alert, *http.Response, error) { req, err := a.client.NewRequest("POST", "alerts", alert) if err != nil { return nil, nil, err } al := new(Alert) resp, err := a.client.Do(req, al) if err != nil { return nil, resp, err } return al, resp, err } // Edit an alert. // // Librato API docs: https://www.librato.com/docs/api/?shell#update-alert func (a *AlertsService) Edit(alertID uint, alert *Alert) (*http.Response, error) { u := fmt.Sprintf("alerts/%d", alertID) req, err := a.client.NewRequest("PUT", u, alert) if err != nil { return nil, err } return a.client.Do(req, nil) } // Delete an alert // // Librato API docs: https://www.librato.com/docs/api/?shell#delete-alert func (a *AlertsService) Delete(id uint) (*http.Response, error) { u := fmt.Sprintf("alerts/%d", id) req, err := a.client.NewRequest("DELETE", u, nil) if err != nil { return nil, err } return a.client.Do(req, nil) }
vendor/github.com/henrikhodne/go-librato/librato/alerts.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.00018000105046667159, 0.00017185689648613334, 0.0001679639535723254, 0.00017046925495378673, 0.0000035532655147108017 ]
{ "id": 4, "code_window": [ "package remoteexec\n", "\n", "import (\n", "\t\"bytes\"\n", "\t\"io\"\n", "\t\"strings\"\n", "\t\"testing\"\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 5 }
package rabbithole import ( "encoding/json" "net/http" "net/url" ) // // GET /api/bindings // // Example response: // // [ // { // "source": "", // "vhost": "\/", // "destination": "amq.gen-Dzw36tPTm_VsmILY9oTG9w", // "destination_type": "queue", // "routing_key": "amq.gen-Dzw36tPTm_VsmILY9oTG9w", // "arguments": { // // }, // "properties_key": "amq.gen-Dzw36tPTm_VsmILY9oTG9w" // } // ] type BindingInfo struct { // Binding source (exchange name) Source string `json:"source"` Vhost string `json:"vhost"` // Binding destination (queue or exchange name) Destination string `json:"destination"` // Destination type, either "queue" or "exchange" DestinationType string `json:"destination_type"` RoutingKey string `json:"routing_key"` Arguments map[string]interface{} `json:"arguments"` PropertiesKey string `json:"properties_key"` } // Returns all bindings func (c *Client) ListBindings() (rec []BindingInfo, err error) { req, err := newGETRequest(c, "bindings/") if err != nil { return []BindingInfo{}, err } if err = executeAndParseRequest(c, req, &rec); err != nil { return []BindingInfo{}, err } return rec, nil } // // GET /api/bindings/{vhost} // // Returns all bindings in a virtual host. func (c *Client) ListBindingsIn(vhost string) (rec []BindingInfo, err error) { req, err := newGETRequest(c, "bindings/"+url.QueryEscape(vhost)) if err != nil { return []BindingInfo{}, err } if err = executeAndParseRequest(c, req, &rec); err != nil { return []BindingInfo{}, err } return rec, nil } // // GET /api/queues/{vhost}/{queue}/bindings // // Example response: // [ // {"source":"", // "vhost":"/", // "destination":"amq.gen-H0tnavWatL7g7uU2q5cAPA", // "destination_type":"queue", // "routing_key":"amq.gen-H0tnavWatL7g7uU2q5cAPA", // "arguments":{}, // "properties_key":"amq.gen-H0tnavWatL7g7uU2q5cAPA"}, // {"source":"temp", // "vhost":"/", // "destination":"amq.gen-H0tnavWatL7g7uU2q5cAPA", // "destination_type":"queue", // "routing_key":"", // "arguments":{}, // "properties_key":"~"} // ] // Returns all bindings of individual queue. func (c *Client) ListQueueBindings(vhost, queue string) (rec []BindingInfo, err error) { req, err := newGETRequest(c, "queues/"+url.QueryEscape(vhost)+"/"+url.QueryEscape(queue)+"/bindings") if err != nil { return []BindingInfo{}, err } if err = executeAndParseRequest(c, req, &rec); err != nil { return []BindingInfo{}, err } return rec, nil } // // POST /api/bindings/{vhost}/e/{source}/{destination_type}/{destination} // // DeclareBinding updates information about a binding between a source and a target func (c *Client) DeclareBinding(vhost string, info BindingInfo) (res *http.Response, err error) { info.Vhost = vhost if info.Arguments == nil { info.Arguments = make(map[string]interface{}) } body, err := json.Marshal(info) if err != nil { return nil, err } req, err := newRequestWithBody(c, "POST", "bindings/"+url.QueryEscape(vhost)+"/e/"+url.QueryEscape(info.Source)+"/"+url.QueryEscape(string(info.DestinationType[0]))+"/"+url.QueryEscape(info.Destination), body) if err != nil { return nil, err } res, err = executeRequest(c, req) if err != nil { return nil, err } return res, nil } // // DELETE /api/bindings/{vhost}/e/{source}/{destination_type}/{destination}/{props} // // DeleteBinding delets an individual binding func (c *Client) DeleteBinding(vhost string, info BindingInfo) (res *http.Response, err error) { req, err := newRequestWithBody(c, "DELETE", "bindings/"+url.QueryEscape(vhost)+"/e/"+url.QueryEscape(info.Source)+"/"+url.QueryEscape(string(info.DestinationType[0]))+"/"+url.QueryEscape(info.Destination)+"/"+url.QueryEscape(info.PropertiesKey), nil) if err != nil { return nil, err } res, err = executeRequest(c, req) if err != nil { return nil, err } return res, nil }
vendor/github.com/michaelklishin/rabbit-hole/bindings.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0001774521660991013, 0.00017190570360980928, 0.00016628387675154954, 0.00017168952035717666, 0.0000032391667446063366 ]
{ "id": 4, "code_window": [ "package remoteexec\n", "\n", "import (\n", "\t\"bytes\"\n", "\t\"io\"\n", "\t\"strings\"\n", "\t\"testing\"\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 5 }
resource "aws_instance" "foo" { num = "2" } resource "aws_instance" "bar" { foo = "${aws_instance.foo.num}" }
terraform/test-fixtures/plan-taint/main.tf
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0001731035445118323, 0.0001731035445118323, 0.0001731035445118323, 0.0001731035445118323, 0 ]
{ "id": 5, "code_window": [ "\t\"testing\"\n", "\n", "\t\"reflect\"\n", "\n", "\t\"github.com/hashicorp/terraform/config\"\n", "\t\"github.com/hashicorp/terraform/helper/schema\"\n", "\t\"github.com/hashicorp/terraform/terraform\"\n", ")\n", "\n" ], "labels": [ "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 8 }
package remoteexec import ( "bytes" "io" "strings" "testing" "reflect" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) func TestResourceProvider_Validate_good(t *testing.T) { c := testConfig(t, map[string]interface{}{ "inline": "echo foo", }) p := Provisioner() warn, errs := p.Validate(c) if len(warn) > 0 { t.Fatalf("Warnings: %v", warn) } if len(errs) > 0 { t.Fatalf("Errors: %v", errs) } } func TestResourceProvider_Validate_bad(t *testing.T) { c := testConfig(t, map[string]interface{}{ "invalid": "nope", }) p := Provisioner() warn, errs := p.Validate(c) if len(warn) > 0 { t.Fatalf("Warnings: %v", warn) } if len(errs) == 0 { t.Fatalf("Should have errors") } } var expectedScriptOut = `cd /tmp wget http://foobar exit 0 ` var expectedInlineScriptsOut = strings.Split(expectedScriptOut, "\n") func TestResourceProvider_generateScript(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "inline": []interface{}{ "cd /tmp", "wget http://foobar", "exit 0", }, } out, err := generateScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if reflect.DeepEqual(out, expectedInlineScriptsOut) { t.Fatalf("bad: %v", out) } } func TestResourceProvider_CollectScripts_inline(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "inline": []interface{}{ "cd /tmp", "wget http://foobar", "exit 0", }, } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 3 { t.Fatalf("bad: %v", scripts) } for i, script := range scripts { var out bytes.Buffer _, err = io.Copy(&out, script) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedInlineScriptsOut[i] { t.Fatalf("bad: %v", out.String()) } } } func TestResourceProvider_CollectScripts_script(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "script": "test-fixtures/script1.sh", } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 1 { t.Fatalf("bad: %v", scripts) } var out bytes.Buffer _, err = io.Copy(&out, scripts[0]) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedScriptOut { t.Fatalf("bad: %v", out.String()) } } func TestResourceProvider_CollectScripts_scripts(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "scripts": []interface{}{ "test-fixtures/script1.sh", "test-fixtures/script1.sh", "test-fixtures/script1.sh", }, } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 3 { t.Fatalf("bad: %v", scripts) } for idx := range scripts { var out bytes.Buffer _, err = io.Copy(&out, scripts[idx]) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedScriptOut { t.Fatalf("bad: %v", out.String()) } } } func testConfig( t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { r, err := config.NewRawConfig(c) if err != nil { t.Fatalf("bad: %s", err) } return terraform.NewResourceConfig(r) }
builtin/provisioners/remote-exec/resource_provisioner_test.go
1
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.01463373564183712, 0.0009734281920827925, 0.0001638698304304853, 0.00017074262723326683, 0.00331311346963048 ]
{ "id": 5, "code_window": [ "\t\"testing\"\n", "\n", "\t\"reflect\"\n", "\n", "\t\"github.com/hashicorp/terraform/config\"\n", "\t\"github.com/hashicorp/terraform/helper/schema\"\n", "\t\"github.com/hashicorp/terraform/terraform\"\n", ")\n", "\n" ], "labels": [ "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 8 }
// Package objects contains functionality for working with Object Storage // object resources. An object is a resource that represents and contains data // - such as documents, images, and so on. You can also store custom metadata // with an object. package objects
vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.00015848971088416874, 0.00015848971088416874, 0.00015848971088416874, 0.00015848971088416874, 0 ]
{ "id": 5, "code_window": [ "\t\"testing\"\n", "\n", "\t\"reflect\"\n", "\n", "\t\"github.com/hashicorp/terraform/config\"\n", "\t\"github.com/hashicorp/terraform/helper/schema\"\n", "\t\"github.com/hashicorp/terraform/terraform\"\n", ")\n", "\n" ], "labels": [ "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 8 }
// // Copyright 2016, Sander van Harmelen // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package cloudstack import ( "encoding/json" "fmt" "net/url" "strconv" "strings" ) type AttachIsoParams struct { p map[string]interface{} } func (p *AttachIsoParams) toURLValues() url.Values { u := url.Values{} if p.p == nil { return u } if v, found := p.p["id"]; found { u.Set("id", v.(string)) } if v, found := p.p["virtualmachineid"]; found { u.Set("virtualmachineid", v.(string)) } return u } func (p *AttachIsoParams) SetId(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["id"] = v return } func (p *AttachIsoParams) SetVirtualmachineid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["virtualmachineid"] = v return } // You should always use this function to get a new AttachIsoParams instance, // as then you are sure you have configured all required params func (s *ISOService) NewAttachIsoParams(id string, virtualmachineid string) *AttachIsoParams { p := &AttachIsoParams{} p.p = make(map[string]interface{}) p.p["id"] = id p.p["virtualmachineid"] = virtualmachineid return p } // Attaches an ISO to a virtual machine. func (s *ISOService) AttachIso(p *AttachIsoParams) (*AttachIsoResponse, error) { resp, err := s.cs.newRequest("attachIso", p.toURLValues()) if err != nil { return nil, err } var r AttachIsoResponse if err := json.Unmarshal(resp, &r); err != nil { return nil, err } // If we have a async client, we need to wait for the async result if s.cs.async { b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) if err != nil { if err == AsyncTimeoutErr { return &r, err } return nil, err } b, err = getRawValue(b) if err != nil { return nil, err } if err := json.Unmarshal(b, &r); err != nil { return nil, err } } return &r, nil } type AttachIsoResponse struct { JobID string `json:"jobid,omitempty"` Account string `json:"account,omitempty"` Affinitygroup []struct { Account string `json:"account,omitempty"` Description string `json:"description,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Id string `json:"id,omitempty"` Name string `json:"name,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Type string `json:"type,omitempty"` VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` } `json:"affinitygroup,omitempty"` Cpunumber int `json:"cpunumber,omitempty"` Cpuspeed int `json:"cpuspeed,omitempty"` Cpuused string `json:"cpuused,omitempty"` Created string `json:"created,omitempty"` Details map[string]string `json:"details,omitempty"` Diskioread int64 `json:"diskioread,omitempty"` Diskiowrite int64 `json:"diskiowrite,omitempty"` Diskkbsread int64 `json:"diskkbsread,omitempty"` Diskkbswrite int64 `json:"diskkbswrite,omitempty"` Diskofferingid string `json:"diskofferingid,omitempty"` Diskofferingname string `json:"diskofferingname,omitempty"` Displayname string `json:"displayname,omitempty"` Displayvm bool `json:"displayvm,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` Group string `json:"group,omitempty"` Groupid string `json:"groupid,omitempty"` Guestosid string `json:"guestosid,omitempty"` Haenable bool `json:"haenable,omitempty"` Hostid string `json:"hostid,omitempty"` Hostname string `json:"hostname,omitempty"` Hypervisor string `json:"hypervisor,omitempty"` Id string `json:"id,omitempty"` Instancename string `json:"instancename,omitempty"` Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` Isodisplaytext string `json:"isodisplaytext,omitempty"` Isoid string `json:"isoid,omitempty"` Isoname string `json:"isoname,omitempty"` Keypair string `json:"keypair,omitempty"` Memory int `json:"memory,omitempty"` Name string `json:"name,omitempty"` Networkkbsread int64 `json:"networkkbsread,omitempty"` Networkkbswrite int64 `json:"networkkbswrite,omitempty"` Nic []struct { Broadcasturi string `json:"broadcasturi,omitempty"` Deviceid string `json:"deviceid,omitempty"` Gateway string `json:"gateway,omitempty"` Id string `json:"id,omitempty"` Ip6address string `json:"ip6address,omitempty"` Ip6cidr string `json:"ip6cidr,omitempty"` Ip6gateway string `json:"ip6gateway,omitempty"` Ipaddress string `json:"ipaddress,omitempty"` Isdefault bool `json:"isdefault,omitempty"` Isolationuri string `json:"isolationuri,omitempty"` Macaddress string `json:"macaddress,omitempty"` Netmask string `json:"netmask,omitempty"` Networkid string `json:"networkid,omitempty"` Networkname string `json:"networkname,omitempty"` Secondaryip []struct { Id string `json:"id,omitempty"` Ipaddress string `json:"ipaddress,omitempty"` } `json:"secondaryip,omitempty"` Traffictype string `json:"traffictype,omitempty"` Type string `json:"type,omitempty"` Virtualmachineid string `json:"virtualmachineid,omitempty"` } `json:"nic,omitempty"` Ostypeid int64 `json:"ostypeid,omitempty"` Password string `json:"password,omitempty"` Passwordenabled bool `json:"passwordenabled,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Publicip string `json:"publicip,omitempty"` Publicipid string `json:"publicipid,omitempty"` Rootdeviceid int64 `json:"rootdeviceid,omitempty"` Rootdevicetype string `json:"rootdevicetype,omitempty"` Securitygroup []struct { Account string `json:"account,omitempty"` Description string `json:"description,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Egressrule []struct { Account string `json:"account,omitempty"` Cidr string `json:"cidr,omitempty"` Endport int `json:"endport,omitempty"` Icmpcode int `json:"icmpcode,omitempty"` Icmptype int `json:"icmptype,omitempty"` Protocol string `json:"protocol,omitempty"` Ruleid string `json:"ruleid,omitempty"` Securitygroupname string `json:"securitygroupname,omitempty"` Startport int `json:"startport,omitempty"` Tags []struct { Account string `json:"account,omitempty"` Customer string `json:"customer,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Key string `json:"key,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Resourceid string `json:"resourceid,omitempty"` Resourcetype string `json:"resourcetype,omitempty"` Value string `json:"value,omitempty"` } `json:"tags,omitempty"` } `json:"egressrule,omitempty"` Id string `json:"id,omitempty"` Ingressrule []struct { Account string `json:"account,omitempty"` Cidr string `json:"cidr,omitempty"` Endport int `json:"endport,omitempty"` Icmpcode int `json:"icmpcode,omitempty"` Icmptype int `json:"icmptype,omitempty"` Protocol string `json:"protocol,omitempty"` Ruleid string `json:"ruleid,omitempty"` Securitygroupname string `json:"securitygroupname,omitempty"` Startport int `json:"startport,omitempty"` Tags []struct { Account string `json:"account,omitempty"` Customer string `json:"customer,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Key string `json:"key,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Resourceid string `json:"resourceid,omitempty"` Resourcetype string `json:"resourcetype,omitempty"` Value string `json:"value,omitempty"` } `json:"tags,omitempty"` } `json:"ingressrule,omitempty"` Name string `json:"name,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Tags []struct { Account string `json:"account,omitempty"` Customer string `json:"customer,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Key string `json:"key,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Resourceid string `json:"resourceid,omitempty"` Resourcetype string `json:"resourcetype,omitempty"` Value string `json:"value,omitempty"` } `json:"tags,omitempty"` Virtualmachinecount int `json:"virtualmachinecount,omitempty"` Virtualmachineids []string `json:"virtualmachineids,omitempty"` } `json:"securitygroup,omitempty"` Serviceofferingid string `json:"serviceofferingid,omitempty"` Serviceofferingname string `json:"serviceofferingname,omitempty"` Servicestate string `json:"servicestate,omitempty"` State string `json:"state,omitempty"` Tags []struct { Account string `json:"account,omitempty"` Customer string `json:"customer,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Key string `json:"key,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Resourceid string `json:"resourceid,omitempty"` Resourcetype string `json:"resourcetype,omitempty"` Value string `json:"value,omitempty"` } `json:"tags,omitempty"` Templatedisplaytext string `json:"templatedisplaytext,omitempty"` Templateid string `json:"templateid,omitempty"` Templatename string `json:"templatename,omitempty"` Userid string `json:"userid,omitempty"` Username string `json:"username,omitempty"` Vgpu string `json:"vgpu,omitempty"` Zoneid string `json:"zoneid,omitempty"` Zonename string `json:"zonename,omitempty"` } type DetachIsoParams struct { p map[string]interface{} } func (p *DetachIsoParams) toURLValues() url.Values { u := url.Values{} if p.p == nil { return u } if v, found := p.p["virtualmachineid"]; found { u.Set("virtualmachineid", v.(string)) } return u } func (p *DetachIsoParams) SetVirtualmachineid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["virtualmachineid"] = v return } // You should always use this function to get a new DetachIsoParams instance, // as then you are sure you have configured all required params func (s *ISOService) NewDetachIsoParams(virtualmachineid string) *DetachIsoParams { p := &DetachIsoParams{} p.p = make(map[string]interface{}) p.p["virtualmachineid"] = virtualmachineid return p } // Detaches any ISO file (if any) currently attached to a virtual machine. func (s *ISOService) DetachIso(p *DetachIsoParams) (*DetachIsoResponse, error) { resp, err := s.cs.newRequest("detachIso", p.toURLValues()) if err != nil { return nil, err } var r DetachIsoResponse if err := json.Unmarshal(resp, &r); err != nil { return nil, err } // If we have a async client, we need to wait for the async result if s.cs.async { b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) if err != nil { if err == AsyncTimeoutErr { return &r, err } return nil, err } b, err = getRawValue(b) if err != nil { return nil, err } if err := json.Unmarshal(b, &r); err != nil { return nil, err } } return &r, nil } type DetachIsoResponse struct { JobID string `json:"jobid,omitempty"` Account string `json:"account,omitempty"` Affinitygroup []struct { Account string `json:"account,omitempty"` Description string `json:"description,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Id string `json:"id,omitempty"` Name string `json:"name,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Type string `json:"type,omitempty"` VirtualmachineIds []string `json:"virtualmachineIds,omitempty"` } `json:"affinitygroup,omitempty"` Cpunumber int `json:"cpunumber,omitempty"` Cpuspeed int `json:"cpuspeed,omitempty"` Cpuused string `json:"cpuused,omitempty"` Created string `json:"created,omitempty"` Details map[string]string `json:"details,omitempty"` Diskioread int64 `json:"diskioread,omitempty"` Diskiowrite int64 `json:"diskiowrite,omitempty"` Diskkbsread int64 `json:"diskkbsread,omitempty"` Diskkbswrite int64 `json:"diskkbswrite,omitempty"` Diskofferingid string `json:"diskofferingid,omitempty"` Diskofferingname string `json:"diskofferingname,omitempty"` Displayname string `json:"displayname,omitempty"` Displayvm bool `json:"displayvm,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"` Group string `json:"group,omitempty"` Groupid string `json:"groupid,omitempty"` Guestosid string `json:"guestosid,omitempty"` Haenable bool `json:"haenable,omitempty"` Hostid string `json:"hostid,omitempty"` Hostname string `json:"hostname,omitempty"` Hypervisor string `json:"hypervisor,omitempty"` Id string `json:"id,omitempty"` Instancename string `json:"instancename,omitempty"` Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` Isodisplaytext string `json:"isodisplaytext,omitempty"` Isoid string `json:"isoid,omitempty"` Isoname string `json:"isoname,omitempty"` Keypair string `json:"keypair,omitempty"` Memory int `json:"memory,omitempty"` Name string `json:"name,omitempty"` Networkkbsread int64 `json:"networkkbsread,omitempty"` Networkkbswrite int64 `json:"networkkbswrite,omitempty"` Nic []struct { Broadcasturi string `json:"broadcasturi,omitempty"` Deviceid string `json:"deviceid,omitempty"` Gateway string `json:"gateway,omitempty"` Id string `json:"id,omitempty"` Ip6address string `json:"ip6address,omitempty"` Ip6cidr string `json:"ip6cidr,omitempty"` Ip6gateway string `json:"ip6gateway,omitempty"` Ipaddress string `json:"ipaddress,omitempty"` Isdefault bool `json:"isdefault,omitempty"` Isolationuri string `json:"isolationuri,omitempty"` Macaddress string `json:"macaddress,omitempty"` Netmask string `json:"netmask,omitempty"` Networkid string `json:"networkid,omitempty"` Networkname string `json:"networkname,omitempty"` Secondaryip []struct { Id string `json:"id,omitempty"` Ipaddress string `json:"ipaddress,omitempty"` } `json:"secondaryip,omitempty"` Traffictype string `json:"traffictype,omitempty"` Type string `json:"type,omitempty"` Virtualmachineid string `json:"virtualmachineid,omitempty"` } `json:"nic,omitempty"` Ostypeid int64 `json:"ostypeid,omitempty"` Password string `json:"password,omitempty"` Passwordenabled bool `json:"passwordenabled,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Publicip string `json:"publicip,omitempty"` Publicipid string `json:"publicipid,omitempty"` Rootdeviceid int64 `json:"rootdeviceid,omitempty"` Rootdevicetype string `json:"rootdevicetype,omitempty"` Securitygroup []struct { Account string `json:"account,omitempty"` Description string `json:"description,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Egressrule []struct { Account string `json:"account,omitempty"` Cidr string `json:"cidr,omitempty"` Endport int `json:"endport,omitempty"` Icmpcode int `json:"icmpcode,omitempty"` Icmptype int `json:"icmptype,omitempty"` Protocol string `json:"protocol,omitempty"` Ruleid string `json:"ruleid,omitempty"` Securitygroupname string `json:"securitygroupname,omitempty"` Startport int `json:"startport,omitempty"` Tags []struct { Account string `json:"account,omitempty"` Customer string `json:"customer,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Key string `json:"key,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Resourceid string `json:"resourceid,omitempty"` Resourcetype string `json:"resourcetype,omitempty"` Value string `json:"value,omitempty"` } `json:"tags,omitempty"` } `json:"egressrule,omitempty"` Id string `json:"id,omitempty"` Ingressrule []struct { Account string `json:"account,omitempty"` Cidr string `json:"cidr,omitempty"` Endport int `json:"endport,omitempty"` Icmpcode int `json:"icmpcode,omitempty"` Icmptype int `json:"icmptype,omitempty"` Protocol string `json:"protocol,omitempty"` Ruleid string `json:"ruleid,omitempty"` Securitygroupname string `json:"securitygroupname,omitempty"` Startport int `json:"startport,omitempty"` Tags []struct { Account string `json:"account,omitempty"` Customer string `json:"customer,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Key string `json:"key,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Resourceid string `json:"resourceid,omitempty"` Resourcetype string `json:"resourcetype,omitempty"` Value string `json:"value,omitempty"` } `json:"tags,omitempty"` } `json:"ingressrule,omitempty"` Name string `json:"name,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Tags []struct { Account string `json:"account,omitempty"` Customer string `json:"customer,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Key string `json:"key,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Resourceid string `json:"resourceid,omitempty"` Resourcetype string `json:"resourcetype,omitempty"` Value string `json:"value,omitempty"` } `json:"tags,omitempty"` Virtualmachinecount int `json:"virtualmachinecount,omitempty"` Virtualmachineids []string `json:"virtualmachineids,omitempty"` } `json:"securitygroup,omitempty"` Serviceofferingid string `json:"serviceofferingid,omitempty"` Serviceofferingname string `json:"serviceofferingname,omitempty"` Servicestate string `json:"servicestate,omitempty"` State string `json:"state,omitempty"` Tags []struct { Account string `json:"account,omitempty"` Customer string `json:"customer,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Key string `json:"key,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Resourceid string `json:"resourceid,omitempty"` Resourcetype string `json:"resourcetype,omitempty"` Value string `json:"value,omitempty"` } `json:"tags,omitempty"` Templatedisplaytext string `json:"templatedisplaytext,omitempty"` Templateid string `json:"templateid,omitempty"` Templatename string `json:"templatename,omitempty"` Userid string `json:"userid,omitempty"` Username string `json:"username,omitempty"` Vgpu string `json:"vgpu,omitempty"` Zoneid string `json:"zoneid,omitempty"` Zonename string `json:"zonename,omitempty"` } type ListIsosParams struct { p map[string]interface{} } func (p *ListIsosParams) toURLValues() url.Values { u := url.Values{} if p.p == nil { return u } if v, found := p.p["account"]; found { u.Set("account", v.(string)) } if v, found := p.p["bootable"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("bootable", vv) } if v, found := p.p["domainid"]; found { u.Set("domainid", v.(string)) } if v, found := p.p["hypervisor"]; found { u.Set("hypervisor", v.(string)) } if v, found := p.p["id"]; found { u.Set("id", v.(string)) } if v, found := p.p["isofilter"]; found { u.Set("isofilter", v.(string)) } if v, found := p.p["ispublic"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("ispublic", vv) } if v, found := p.p["isready"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("isready", vv) } if v, found := p.p["isrecursive"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("isrecursive", vv) } if v, found := p.p["keyword"]; found { u.Set("keyword", v.(string)) } if v, found := p.p["listall"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("listall", vv) } if v, found := p.p["name"]; found { u.Set("name", v.(string)) } if v, found := p.p["page"]; found { vv := strconv.Itoa(v.(int)) u.Set("page", vv) } if v, found := p.p["pagesize"]; found { vv := strconv.Itoa(v.(int)) u.Set("pagesize", vv) } if v, found := p.p["projectid"]; found { u.Set("projectid", v.(string)) } if v, found := p.p["showremoved"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("showremoved", vv) } if v, found := p.p["tags"]; found { i := 0 for k, vv := range v.(map[string]string) { u.Set(fmt.Sprintf("tags[%d].key", i), k) u.Set(fmt.Sprintf("tags[%d].value", i), vv) i++ } } if v, found := p.p["zoneid"]; found { u.Set("zoneid", v.(string)) } return u } func (p *ListIsosParams) SetAccount(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["account"] = v return } func (p *ListIsosParams) SetBootable(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["bootable"] = v return } func (p *ListIsosParams) SetDomainid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["domainid"] = v return } func (p *ListIsosParams) SetHypervisor(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["hypervisor"] = v return } func (p *ListIsosParams) SetId(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["id"] = v return } func (p *ListIsosParams) SetIsofilter(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["isofilter"] = v return } func (p *ListIsosParams) SetIspublic(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["ispublic"] = v return } func (p *ListIsosParams) SetIsready(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["isready"] = v return } func (p *ListIsosParams) SetIsrecursive(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["isrecursive"] = v return } func (p *ListIsosParams) SetKeyword(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["keyword"] = v return } func (p *ListIsosParams) SetListall(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["listall"] = v return } func (p *ListIsosParams) SetName(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["name"] = v return } func (p *ListIsosParams) SetPage(v int) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["page"] = v return } func (p *ListIsosParams) SetPagesize(v int) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["pagesize"] = v return } func (p *ListIsosParams) SetProjectid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["projectid"] = v return } func (p *ListIsosParams) SetShowremoved(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["showremoved"] = v return } func (p *ListIsosParams) SetTags(v map[string]string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["tags"] = v return } func (p *ListIsosParams) SetZoneid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["zoneid"] = v return } // You should always use this function to get a new ListIsosParams instance, // as then you are sure you have configured all required params func (s *ISOService) NewListIsosParams() *ListIsosParams { p := &ListIsosParams{} p.p = make(map[string]interface{}) return p } // This is a courtesy helper function, which in some cases may not work as expected! func (s *ISOService) GetIsoID(name string, isofilter string, zoneid string, opts ...OptionFunc) (string, int, error) { p := &ListIsosParams{} p.p = make(map[string]interface{}) p.p["name"] = name p.p["isofilter"] = isofilter p.p["zoneid"] = zoneid for _, fn := range opts { if err := fn(s.cs, p); err != nil { return "", -1, err } } l, err := s.ListIsos(p) if err != nil { return "", -1, err } if l.Count == 0 { return "", l.Count, fmt.Errorf("No match found for %s: %+v", name, l) } if l.Count == 1 { return l.Isos[0].Id, l.Count, nil } if l.Count > 1 { for _, v := range l.Isos { if v.Name == name { return v.Id, l.Count, nil } } } return "", l.Count, fmt.Errorf("Could not find an exact match for %s: %+v", name, l) } // This is a courtesy helper function, which in some cases may not work as expected! func (s *ISOService) GetIsoByName(name string, isofilter string, zoneid string, opts ...OptionFunc) (*Iso, int, error) { id, count, err := s.GetIsoID(name, isofilter, zoneid, opts...) if err != nil { return nil, count, err } r, count, err := s.GetIsoByID(id, opts...) if err != nil { return nil, count, err } return r, count, nil } // This is a courtesy helper function, which in some cases may not work as expected! func (s *ISOService) GetIsoByID(id string, opts ...OptionFunc) (*Iso, int, error) { p := &ListIsosParams{} p.p = make(map[string]interface{}) p.p["id"] = id for _, fn := range opts { if err := fn(s.cs, p); err != nil { return nil, -1, err } } l, err := s.ListIsos(p) if err != nil { if strings.Contains(err.Error(), fmt.Sprintf( "Invalid parameter id value=%s due to incorrect long value format, "+ "or entity does not exist", id)) { return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) } return nil, -1, err } if l.Count == 0 { return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) } if l.Count == 1 { return l.Isos[0], l.Count, nil } return nil, l.Count, fmt.Errorf("There is more then one result for Iso UUID: %s!", id) } // Lists all available ISO files. func (s *ISOService) ListIsos(p *ListIsosParams) (*ListIsosResponse, error) { resp, err := s.cs.newRequest("listIsos", p.toURLValues()) if err != nil { return nil, err } var r ListIsosResponse if err := json.Unmarshal(resp, &r); err != nil { return nil, err } return &r, nil } type ListIsosResponse struct { Count int `json:"count"` Isos []*Iso `json:"iso"` } type Iso struct { Account string `json:"account,omitempty"` Accountid string `json:"accountid,omitempty"` Bootable bool `json:"bootable,omitempty"` Checksum string `json:"checksum,omitempty"` Created string `json:"created,omitempty"` CrossZones bool `json:"crossZones,omitempty"` Details map[string]string `json:"details,omitempty"` Displaytext string `json:"displaytext,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Format string `json:"format,omitempty"` Hostid string `json:"hostid,omitempty"` Hostname string `json:"hostname,omitempty"` Hypervisor string `json:"hypervisor,omitempty"` Id string `json:"id,omitempty"` Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` Isextractable bool `json:"isextractable,omitempty"` Isfeatured bool `json:"isfeatured,omitempty"` Ispublic bool `json:"ispublic,omitempty"` Isready bool `json:"isready,omitempty"` Name string `json:"name,omitempty"` Ostypeid string `json:"ostypeid,omitempty"` Ostypename string `json:"ostypename,omitempty"` Passwordenabled bool `json:"passwordenabled,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Removed string `json:"removed,omitempty"` Size int64 `json:"size,omitempty"` Sourcetemplateid string `json:"sourcetemplateid,omitempty"` Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` Status string `json:"status,omitempty"` Tags []struct { Account string `json:"account,omitempty"` Customer string `json:"customer,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Key string `json:"key,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Resourceid string `json:"resourceid,omitempty"` Resourcetype string `json:"resourcetype,omitempty"` Value string `json:"value,omitempty"` } `json:"tags,omitempty"` Templatetag string `json:"templatetag,omitempty"` Templatetype string `json:"templatetype,omitempty"` Zoneid string `json:"zoneid,omitempty"` Zonename string `json:"zonename,omitempty"` } type RegisterIsoParams struct { p map[string]interface{} } func (p *RegisterIsoParams) toURLValues() url.Values { u := url.Values{} if p.p == nil { return u } if v, found := p.p["account"]; found { u.Set("account", v.(string)) } if v, found := p.p["bootable"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("bootable", vv) } if v, found := p.p["checksum"]; found { u.Set("checksum", v.(string)) } if v, found := p.p["displaytext"]; found { u.Set("displaytext", v.(string)) } if v, found := p.p["domainid"]; found { u.Set("domainid", v.(string)) } if v, found := p.p["imagestoreuuid"]; found { u.Set("imagestoreuuid", v.(string)) } if v, found := p.p["isdynamicallyscalable"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("isdynamicallyscalable", vv) } if v, found := p.p["isextractable"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("isextractable", vv) } if v, found := p.p["isfeatured"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("isfeatured", vv) } if v, found := p.p["ispublic"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("ispublic", vv) } if v, found := p.p["name"]; found { u.Set("name", v.(string)) } if v, found := p.p["ostypeid"]; found { u.Set("ostypeid", v.(string)) } if v, found := p.p["projectid"]; found { u.Set("projectid", v.(string)) } if v, found := p.p["url"]; found { u.Set("url", v.(string)) } if v, found := p.p["zoneid"]; found { u.Set("zoneid", v.(string)) } return u } func (p *RegisterIsoParams) SetAccount(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["account"] = v return } func (p *RegisterIsoParams) SetBootable(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["bootable"] = v return } func (p *RegisterIsoParams) SetChecksum(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["checksum"] = v return } func (p *RegisterIsoParams) SetDisplaytext(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["displaytext"] = v return } func (p *RegisterIsoParams) SetDomainid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["domainid"] = v return } func (p *RegisterIsoParams) SetImagestoreuuid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["imagestoreuuid"] = v return } func (p *RegisterIsoParams) SetIsdynamicallyscalable(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["isdynamicallyscalable"] = v return } func (p *RegisterIsoParams) SetIsextractable(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["isextractable"] = v return } func (p *RegisterIsoParams) SetIsfeatured(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["isfeatured"] = v return } func (p *RegisterIsoParams) SetIspublic(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["ispublic"] = v return } func (p *RegisterIsoParams) SetName(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["name"] = v return } func (p *RegisterIsoParams) SetOstypeid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["ostypeid"] = v return } func (p *RegisterIsoParams) SetProjectid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["projectid"] = v return } func (p *RegisterIsoParams) SetUrl(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["url"] = v return } func (p *RegisterIsoParams) SetZoneid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["zoneid"] = v return } // You should always use this function to get a new RegisterIsoParams instance, // as then you are sure you have configured all required params func (s *ISOService) NewRegisterIsoParams(displaytext string, name string, url string, zoneid string) *RegisterIsoParams { p := &RegisterIsoParams{} p.p = make(map[string]interface{}) p.p["displaytext"] = displaytext p.p["name"] = name p.p["url"] = url p.p["zoneid"] = zoneid return p } // Registers an existing ISO into the CloudStack Cloud. func (s *ISOService) RegisterIso(p *RegisterIsoParams) (*RegisterIsoResponse, error) { resp, err := s.cs.newRequest("registerIso", p.toURLValues()) if err != nil { return nil, err } var r RegisterIsoResponse if err := json.Unmarshal(resp, &r); err != nil { return nil, err } return &r, nil } type RegisterIsoResponse struct { Account string `json:"account,omitempty"` Accountid string `json:"accountid,omitempty"` Bootable bool `json:"bootable,omitempty"` Checksum string `json:"checksum,omitempty"` Created string `json:"created,omitempty"` CrossZones bool `json:"crossZones,omitempty"` Details map[string]string `json:"details,omitempty"` Displaytext string `json:"displaytext,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Format string `json:"format,omitempty"` Hostid string `json:"hostid,omitempty"` Hostname string `json:"hostname,omitempty"` Hypervisor string `json:"hypervisor,omitempty"` Id string `json:"id,omitempty"` Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` Isextractable bool `json:"isextractable,omitempty"` Isfeatured bool `json:"isfeatured,omitempty"` Ispublic bool `json:"ispublic,omitempty"` Isready bool `json:"isready,omitempty"` Name string `json:"name,omitempty"` Ostypeid string `json:"ostypeid,omitempty"` Ostypename string `json:"ostypename,omitempty"` Passwordenabled bool `json:"passwordenabled,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Removed string `json:"removed,omitempty"` Size int64 `json:"size,omitempty"` Sourcetemplateid string `json:"sourcetemplateid,omitempty"` Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` Status string `json:"status,omitempty"` Tags []struct { Account string `json:"account,omitempty"` Customer string `json:"customer,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Key string `json:"key,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Resourceid string `json:"resourceid,omitempty"` Resourcetype string `json:"resourcetype,omitempty"` Value string `json:"value,omitempty"` } `json:"tags,omitempty"` Templatetag string `json:"templatetag,omitempty"` Templatetype string `json:"templatetype,omitempty"` Zoneid string `json:"zoneid,omitempty"` Zonename string `json:"zonename,omitempty"` } type UpdateIsoParams struct { p map[string]interface{} } func (p *UpdateIsoParams) toURLValues() url.Values { u := url.Values{} if p.p == nil { return u } if v, found := p.p["bootable"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("bootable", vv) } if v, found := p.p["details"]; found { i := 0 for k, vv := range v.(map[string]string) { u.Set(fmt.Sprintf("details[%d].key", i), k) u.Set(fmt.Sprintf("details[%d].value", i), vv) i++ } } if v, found := p.p["displaytext"]; found { u.Set("displaytext", v.(string)) } if v, found := p.p["format"]; found { u.Set("format", v.(string)) } if v, found := p.p["id"]; found { u.Set("id", v.(string)) } if v, found := p.p["isdynamicallyscalable"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("isdynamicallyscalable", vv) } if v, found := p.p["isrouting"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("isrouting", vv) } if v, found := p.p["name"]; found { u.Set("name", v.(string)) } if v, found := p.p["ostypeid"]; found { u.Set("ostypeid", v.(string)) } if v, found := p.p["passwordenabled"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("passwordenabled", vv) } if v, found := p.p["requireshvm"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("requireshvm", vv) } if v, found := p.p["sortkey"]; found { vv := strconv.Itoa(v.(int)) u.Set("sortkey", vv) } return u } func (p *UpdateIsoParams) SetBootable(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["bootable"] = v return } func (p *UpdateIsoParams) SetDetails(v map[string]string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["details"] = v return } func (p *UpdateIsoParams) SetDisplaytext(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["displaytext"] = v return } func (p *UpdateIsoParams) SetFormat(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["format"] = v return } func (p *UpdateIsoParams) SetId(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["id"] = v return } func (p *UpdateIsoParams) SetIsdynamicallyscalable(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["isdynamicallyscalable"] = v return } func (p *UpdateIsoParams) SetIsrouting(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["isrouting"] = v return } func (p *UpdateIsoParams) SetName(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["name"] = v return } func (p *UpdateIsoParams) SetOstypeid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["ostypeid"] = v return } func (p *UpdateIsoParams) SetPasswordenabled(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["passwordenabled"] = v return } func (p *UpdateIsoParams) SetRequireshvm(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["requireshvm"] = v return } func (p *UpdateIsoParams) SetSortkey(v int) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["sortkey"] = v return } // You should always use this function to get a new UpdateIsoParams instance, // as then you are sure you have configured all required params func (s *ISOService) NewUpdateIsoParams(id string) *UpdateIsoParams { p := &UpdateIsoParams{} p.p = make(map[string]interface{}) p.p["id"] = id return p } // Updates an ISO file. func (s *ISOService) UpdateIso(p *UpdateIsoParams) (*UpdateIsoResponse, error) { resp, err := s.cs.newRequest("updateIso", p.toURLValues()) if err != nil { return nil, err } var r UpdateIsoResponse if err := json.Unmarshal(resp, &r); err != nil { return nil, err } return &r, nil } type UpdateIsoResponse struct { Account string `json:"account,omitempty"` Accountid string `json:"accountid,omitempty"` Bootable bool `json:"bootable,omitempty"` Checksum string `json:"checksum,omitempty"` Created string `json:"created,omitempty"` CrossZones bool `json:"crossZones,omitempty"` Details map[string]string `json:"details,omitempty"` Displaytext string `json:"displaytext,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Format string `json:"format,omitempty"` Hostid string `json:"hostid,omitempty"` Hostname string `json:"hostname,omitempty"` Hypervisor string `json:"hypervisor,omitempty"` Id string `json:"id,omitempty"` Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` Isextractable bool `json:"isextractable,omitempty"` Isfeatured bool `json:"isfeatured,omitempty"` Ispublic bool `json:"ispublic,omitempty"` Isready bool `json:"isready,omitempty"` Name string `json:"name,omitempty"` Ostypeid string `json:"ostypeid,omitempty"` Ostypename string `json:"ostypename,omitempty"` Passwordenabled bool `json:"passwordenabled,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Removed string `json:"removed,omitempty"` Size int64 `json:"size,omitempty"` Sourcetemplateid string `json:"sourcetemplateid,omitempty"` Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` Status string `json:"status,omitempty"` Tags []struct { Account string `json:"account,omitempty"` Customer string `json:"customer,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Key string `json:"key,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Resourceid string `json:"resourceid,omitempty"` Resourcetype string `json:"resourcetype,omitempty"` Value string `json:"value,omitempty"` } `json:"tags,omitempty"` Templatetag string `json:"templatetag,omitempty"` Templatetype string `json:"templatetype,omitempty"` Zoneid string `json:"zoneid,omitempty"` Zonename string `json:"zonename,omitempty"` } type DeleteIsoParams struct { p map[string]interface{} } func (p *DeleteIsoParams) toURLValues() url.Values { u := url.Values{} if p.p == nil { return u } if v, found := p.p["id"]; found { u.Set("id", v.(string)) } if v, found := p.p["zoneid"]; found { u.Set("zoneid", v.(string)) } return u } func (p *DeleteIsoParams) SetId(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["id"] = v return } func (p *DeleteIsoParams) SetZoneid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["zoneid"] = v return } // You should always use this function to get a new DeleteIsoParams instance, // as then you are sure you have configured all required params func (s *ISOService) NewDeleteIsoParams(id string) *DeleteIsoParams { p := &DeleteIsoParams{} p.p = make(map[string]interface{}) p.p["id"] = id return p } // Deletes an ISO file. func (s *ISOService) DeleteIso(p *DeleteIsoParams) (*DeleteIsoResponse, error) { resp, err := s.cs.newRequest("deleteIso", p.toURLValues()) if err != nil { return nil, err } var r DeleteIsoResponse if err := json.Unmarshal(resp, &r); err != nil { return nil, err } // If we have a async client, we need to wait for the async result if s.cs.async { b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) if err != nil { if err == AsyncTimeoutErr { return &r, err } return nil, err } if err := json.Unmarshal(b, &r); err != nil { return nil, err } } return &r, nil } type DeleteIsoResponse struct { JobID string `json:"jobid,omitempty"` Displaytext string `json:"displaytext,omitempty"` Success bool `json:"success,omitempty"` } type CopyIsoParams struct { p map[string]interface{} } func (p *CopyIsoParams) toURLValues() url.Values { u := url.Values{} if p.p == nil { return u } if v, found := p.p["destzoneid"]; found { u.Set("destzoneid", v.(string)) } if v, found := p.p["id"]; found { u.Set("id", v.(string)) } if v, found := p.p["sourcezoneid"]; found { u.Set("sourcezoneid", v.(string)) } return u } func (p *CopyIsoParams) SetDestzoneid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["destzoneid"] = v return } func (p *CopyIsoParams) SetId(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["id"] = v return } func (p *CopyIsoParams) SetSourcezoneid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["sourcezoneid"] = v return } // You should always use this function to get a new CopyIsoParams instance, // as then you are sure you have configured all required params func (s *ISOService) NewCopyIsoParams(destzoneid string, id string) *CopyIsoParams { p := &CopyIsoParams{} p.p = make(map[string]interface{}) p.p["destzoneid"] = destzoneid p.p["id"] = id return p } // Copies an ISO from one zone to another. func (s *ISOService) CopyIso(p *CopyIsoParams) (*CopyIsoResponse, error) { resp, err := s.cs.newRequest("copyIso", p.toURLValues()) if err != nil { return nil, err } var r CopyIsoResponse if err := json.Unmarshal(resp, &r); err != nil { return nil, err } // If we have a async client, we need to wait for the async result if s.cs.async { b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) if err != nil { if err == AsyncTimeoutErr { return &r, err } return nil, err } b, err = getRawValue(b) if err != nil { return nil, err } if err := json.Unmarshal(b, &r); err != nil { return nil, err } } return &r, nil } type CopyIsoResponse struct { JobID string `json:"jobid,omitempty"` Account string `json:"account,omitempty"` Accountid string `json:"accountid,omitempty"` Bootable bool `json:"bootable,omitempty"` Checksum string `json:"checksum,omitempty"` Created string `json:"created,omitempty"` CrossZones bool `json:"crossZones,omitempty"` Details map[string]string `json:"details,omitempty"` Displaytext string `json:"displaytext,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Format string `json:"format,omitempty"` Hostid string `json:"hostid,omitempty"` Hostname string `json:"hostname,omitempty"` Hypervisor string `json:"hypervisor,omitempty"` Id string `json:"id,omitempty"` Isdynamicallyscalable bool `json:"isdynamicallyscalable,omitempty"` Isextractable bool `json:"isextractable,omitempty"` Isfeatured bool `json:"isfeatured,omitempty"` Ispublic bool `json:"ispublic,omitempty"` Isready bool `json:"isready,omitempty"` Name string `json:"name,omitempty"` Ostypeid string `json:"ostypeid,omitempty"` Ostypename string `json:"ostypename,omitempty"` Passwordenabled bool `json:"passwordenabled,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Removed string `json:"removed,omitempty"` Size int64 `json:"size,omitempty"` Sourcetemplateid string `json:"sourcetemplateid,omitempty"` Sshkeyenabled bool `json:"sshkeyenabled,omitempty"` Status string `json:"status,omitempty"` Tags []struct { Account string `json:"account,omitempty"` Customer string `json:"customer,omitempty"` Domain string `json:"domain,omitempty"` Domainid string `json:"domainid,omitempty"` Key string `json:"key,omitempty"` Project string `json:"project,omitempty"` Projectid string `json:"projectid,omitempty"` Resourceid string `json:"resourceid,omitempty"` Resourcetype string `json:"resourcetype,omitempty"` Value string `json:"value,omitempty"` } `json:"tags,omitempty"` Templatetag string `json:"templatetag,omitempty"` Templatetype string `json:"templatetype,omitempty"` Zoneid string `json:"zoneid,omitempty"` Zonename string `json:"zonename,omitempty"` } type UpdateIsoPermissionsParams struct { p map[string]interface{} } func (p *UpdateIsoPermissionsParams) toURLValues() url.Values { u := url.Values{} if p.p == nil { return u } if v, found := p.p["accounts"]; found { vv := strings.Join(v.([]string), ",") u.Set("accounts", vv) } if v, found := p.p["id"]; found { u.Set("id", v.(string)) } if v, found := p.p["isextractable"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("isextractable", vv) } if v, found := p.p["isfeatured"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("isfeatured", vv) } if v, found := p.p["ispublic"]; found { vv := strconv.FormatBool(v.(bool)) u.Set("ispublic", vv) } if v, found := p.p["op"]; found { u.Set("op", v.(string)) } if v, found := p.p["projectids"]; found { vv := strings.Join(v.([]string), ",") u.Set("projectids", vv) } return u } func (p *UpdateIsoPermissionsParams) SetAccounts(v []string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["accounts"] = v return } func (p *UpdateIsoPermissionsParams) SetId(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["id"] = v return } func (p *UpdateIsoPermissionsParams) SetIsextractable(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["isextractable"] = v return } func (p *UpdateIsoPermissionsParams) SetIsfeatured(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["isfeatured"] = v return } func (p *UpdateIsoPermissionsParams) SetIspublic(v bool) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["ispublic"] = v return } func (p *UpdateIsoPermissionsParams) SetOp(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["op"] = v return } func (p *UpdateIsoPermissionsParams) SetProjectids(v []string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["projectids"] = v return } // You should always use this function to get a new UpdateIsoPermissionsParams instance, // as then you are sure you have configured all required params func (s *ISOService) NewUpdateIsoPermissionsParams(id string) *UpdateIsoPermissionsParams { p := &UpdateIsoPermissionsParams{} p.p = make(map[string]interface{}) p.p["id"] = id return p } // Updates ISO permissions func (s *ISOService) UpdateIsoPermissions(p *UpdateIsoPermissionsParams) (*UpdateIsoPermissionsResponse, error) { resp, err := s.cs.newRequest("updateIsoPermissions", p.toURLValues()) if err != nil { return nil, err } var r UpdateIsoPermissionsResponse if err := json.Unmarshal(resp, &r); err != nil { return nil, err } return &r, nil } type UpdateIsoPermissionsResponse struct { Displaytext string `json:"displaytext,omitempty"` Success string `json:"success,omitempty"` } type ListIsoPermissionsParams struct { p map[string]interface{} } func (p *ListIsoPermissionsParams) toURLValues() url.Values { u := url.Values{} if p.p == nil { return u } if v, found := p.p["id"]; found { u.Set("id", v.(string)) } return u } func (p *ListIsoPermissionsParams) SetId(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["id"] = v return } // You should always use this function to get a new ListIsoPermissionsParams instance, // as then you are sure you have configured all required params func (s *ISOService) NewListIsoPermissionsParams(id string) *ListIsoPermissionsParams { p := &ListIsoPermissionsParams{} p.p = make(map[string]interface{}) p.p["id"] = id return p } // This is a courtesy helper function, which in some cases may not work as expected! func (s *ISOService) GetIsoPermissionByID(id string, opts ...OptionFunc) (*IsoPermission, int, error) { p := &ListIsoPermissionsParams{} p.p = make(map[string]interface{}) p.p["id"] = id for _, fn := range opts { if err := fn(s.cs, p); err != nil { return nil, -1, err } } l, err := s.ListIsoPermissions(p) if err != nil { if strings.Contains(err.Error(), fmt.Sprintf( "Invalid parameter id value=%s due to incorrect long value format, "+ "or entity does not exist", id)) { return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l) } return nil, -1, err } if l.Count == 0 { return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l) } if l.Count == 1 { return l.IsoPermissions[0], l.Count, nil } return nil, l.Count, fmt.Errorf("There is more then one result for IsoPermission UUID: %s!", id) } // List iso visibility and all accounts that have permissions to view this iso. func (s *ISOService) ListIsoPermissions(p *ListIsoPermissionsParams) (*ListIsoPermissionsResponse, error) { resp, err := s.cs.newRequest("listIsoPermissions", p.toURLValues()) if err != nil { return nil, err } var r ListIsoPermissionsResponse if err := json.Unmarshal(resp, &r); err != nil { return nil, err } return &r, nil } type ListIsoPermissionsResponse struct { Count int `json:"count"` IsoPermissions []*IsoPermission `json:"isopermission"` } type IsoPermission struct { Account []string `json:"account,omitempty"` Domainid string `json:"domainid,omitempty"` Id string `json:"id,omitempty"` Ispublic bool `json:"ispublic,omitempty"` Projectids []string `json:"projectids,omitempty"` } type ExtractIsoParams struct { p map[string]interface{} } func (p *ExtractIsoParams) toURLValues() url.Values { u := url.Values{} if p.p == nil { return u } if v, found := p.p["id"]; found { u.Set("id", v.(string)) } if v, found := p.p["mode"]; found { u.Set("mode", v.(string)) } if v, found := p.p["url"]; found { u.Set("url", v.(string)) } if v, found := p.p["zoneid"]; found { u.Set("zoneid", v.(string)) } return u } func (p *ExtractIsoParams) SetId(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["id"] = v return } func (p *ExtractIsoParams) SetMode(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["mode"] = v return } func (p *ExtractIsoParams) SetUrl(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["url"] = v return } func (p *ExtractIsoParams) SetZoneid(v string) { if p.p == nil { p.p = make(map[string]interface{}) } p.p["zoneid"] = v return } // You should always use this function to get a new ExtractIsoParams instance, // as then you are sure you have configured all required params func (s *ISOService) NewExtractIsoParams(id string, mode string) *ExtractIsoParams { p := &ExtractIsoParams{} p.p = make(map[string]interface{}) p.p["id"] = id p.p["mode"] = mode return p } // Extracts an ISO func (s *ISOService) ExtractIso(p *ExtractIsoParams) (*ExtractIsoResponse, error) { resp, err := s.cs.newRequest("extractIso", p.toURLValues()) if err != nil { return nil, err } var r ExtractIsoResponse if err := json.Unmarshal(resp, &r); err != nil { return nil, err } // If we have a async client, we need to wait for the async result if s.cs.async { b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout) if err != nil { if err == AsyncTimeoutErr { return &r, err } return nil, err } b, err = getRawValue(b) if err != nil { return nil, err } if err := json.Unmarshal(b, &r); err != nil { return nil, err } } return &r, nil } type ExtractIsoResponse struct { JobID string `json:"jobid,omitempty"` Accountid string `json:"accountid,omitempty"` Created string `json:"created,omitempty"` ExtractId string `json:"extractId,omitempty"` ExtractMode string `json:"extractMode,omitempty"` Id string `json:"id,omitempty"` Name string `json:"name,omitempty"` Resultstring string `json:"resultstring,omitempty"` State string `json:"state,omitempty"` Status string `json:"status,omitempty"` Storagetype string `json:"storagetype,omitempty"` Uploadpercentage int `json:"uploadpercentage,omitempty"` Url string `json:"url,omitempty"` Zoneid string `json:"zoneid,omitempty"` Zonename string `json:"zonename,omitempty"` }
vendor/github.com/xanzy/go-cloudstack/cloudstack/ISOService.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0002467263548169285, 0.0001711606018943712, 0.0001616169756744057, 0.00017068698070943356, 0.0000070027722358645406 ]
{ "id": 5, "code_window": [ "\t\"testing\"\n", "\n", "\t\"reflect\"\n", "\n", "\t\"github.com/hashicorp/terraform/config\"\n", "\t\"github.com/hashicorp/terraform/helper/schema\"\n", "\t\"github.com/hashicorp/terraform/terraform\"\n", ")\n", "\n" ], "labels": [ "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 8 }
variable "foo" {} resource "test_instance" "foo" {} atlas { name = "foo" }
command/test-fixtures/push-input/main.tf
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.00017430350999347866, 0.00017430350999347866, 0.00017430350999347866, 0.00017430350999347866, 0 ]
{ "id": 6, "code_window": [ "var expectedScriptOut = `cd /tmp\n", "wget http://foobar\n", "exit 0\n", "`\n", "\n", "var expectedInlineScriptsOut = strings.Split(expectedScriptOut, \"\\n\")\n", "\n", "func TestResourceProvider_generateScript(t *testing.T) {\n", "\tp := Provisioner().(*schema.Provisioner)\n", "\tconf := map[string]interface{}{\n", "\t\t\"inline\": []interface{}{\n", "\t\t\t\"cd /tmp\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 48 }
package remoteexec import ( "bytes" "context" "fmt" "io" "io/ioutil" "log" "os" "sync/atomic" "time" "github.com/hashicorp/terraform/communicator" "github.com/hashicorp/terraform/communicator/remote" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/go-linereader" ) func Provisioner() terraform.ResourceProvisioner { return &schema.Provisioner{ Schema: map[string]*schema.Schema{ "inline": &schema.Schema{ Type: schema.TypeList, Elem: &schema.Schema{Type: schema.TypeString}, PromoteSingle: true, Optional: true, ConflictsWith: []string{"script", "scripts"}, }, "script": &schema.Schema{ Type: schema.TypeString, Optional: true, ConflictsWith: []string{"inline", "scripts"}, }, "scripts": &schema.Schema{ Type: schema.TypeList, Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, ConflictsWith: []string{"script", "inline"}, }, }, ApplyFunc: applyFn, } } // Apply executes the remote exec provisioner func applyFn(ctx context.Context) error { connState := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) data := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) // Get a new communicator comm, err := communicator.New(connState) if err != nil { return err } // Collect the scripts scripts, err := collectScripts(data) if err != nil { return err } for _, s := range scripts { defer s.Close() } // Copy and execute each script if err := runScripts(ctx, o, comm, scripts); err != nil { return err } return nil } // generateScripts takes the configuration and creates a script from each inline config func generateScripts(d *schema.ResourceData) ([]string, error) { var scripts []string for _, l := range d.Get("inline").([]interface{}) { scripts = append(scripts, l.(string)) } return scripts, nil } // collectScripts is used to collect all the scripts we need // to execute in preparation for copying them. func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) { // Check if inline if _, ok := d.GetOk("inline"); ok { scripts, err := generateScripts(d) if err != nil { return nil, err } var r []io.ReadCloser for _, script := range scripts { r = append(r, ioutil.NopCloser(bytes.NewReader([]byte(script)))) } return r, nil } // Collect scripts var scripts []string if script, ok := d.GetOk("script"); ok { scripts = append(scripts, script.(string)) } if scriptList, ok := d.GetOk("scripts"); ok { for _, script := range scriptList.([]interface{}) { scripts = append(scripts, script.(string)) } } // Open all the scripts var fhs []io.ReadCloser for _, s := range scripts { fh, err := os.Open(s) if err != nil { for _, fh := range fhs { fh.Close() } return nil, fmt.Errorf("Failed to open script '%s': %v", s, err) } fhs = append(fhs, fh) } // Done, return the file handles return fhs, nil } // runScripts is used to copy and execute a set of scripts func runScripts( ctx context.Context, o terraform.UIOutput, comm communicator.Communicator, scripts []io.ReadCloser) error { // Wrap out context in a cancelation function that we use to // kill the connection. ctx, cancelFunc := context.WithCancel(ctx) defer cancelFunc() // Wait for the context to end and then disconnect go func() { <-ctx.Done() comm.Disconnect() }() // Wait and retry until we establish the connection err := retryFunc(ctx, comm.Timeout(), func() error { err := comm.Connect(o) return err }) if err != nil { return err } for _, script := range scripts { var cmd *remote.Cmd outR, outW := io.Pipe() errR, errW := io.Pipe() outDoneCh := make(chan struct{}) errDoneCh := make(chan struct{}) go copyOutput(o, outR, outDoneCh) go copyOutput(o, errR, errDoneCh) remotePath := comm.ScriptPath() err = retryFunc(ctx, comm.Timeout(), func() error { if err := comm.UploadScript(remotePath, script); err != nil { return fmt.Errorf("Failed to upload script: %v", err) } cmd = &remote.Cmd{ Command: remotePath, Stdout: outW, Stderr: errW, } if err := comm.Start(cmd); err != nil { return fmt.Errorf("Error starting script: %v", err) } return nil }) if err == nil { cmd.Wait() if cmd.ExitStatus != 0 { err = fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) } } // If we have an error, end our context so the disconnect happens. // This has to happen before the output cleanup below since during // an interrupt this will cause the outputs to end. if err != nil { cancelFunc() } // Wait for output to clean up outW.Close() errW.Close() <-outDoneCh <-errDoneCh // Upload a blank follow up file in the same path to prevent residual // script contents from remaining on remote machine empty := bytes.NewReader([]byte("")) if err := comm.Upload(remotePath, empty); err != nil { // This feature is best-effort. log.Printf("[WARN] Failed to upload empty follow up script: %v", err) } // If we have an error, return it out now that we've cleaned up if err != nil { return err } } return nil } func copyOutput( o terraform.UIOutput, r io.Reader, doneCh chan<- struct{}) { defer close(doneCh) lr := linereader.New(r) for line := range lr.Ch { o.Output(line) } } // retryFunc is used to retry a function for a given duration func retryFunc(ctx context.Context, timeout time.Duration, f func() error) error { // Build a new context with the timeout ctx, done := context.WithTimeout(ctx, timeout) defer done() // Try the function in a goroutine var errVal atomic.Value doneCh := make(chan struct{}) go func() { defer close(doneCh) for { // If our context ended, we want to exit right away. select { case <-ctx.Done(): return default: } // Try the function call err := f() if err == nil { return } log.Printf("Retryable error: %v", err) errVal.Store(err) } }() // Wait for completion select { case <-doneCh: case <-ctx.Done(): } // Check if we have a context error to check if we're interrupted or timeout switch ctx.Err() { case context.Canceled: return fmt.Errorf("interrupted") case context.DeadlineExceeded: return fmt.Errorf("timeout") } // Check if we got an error executing if err, ok := errVal.Load().(error); ok { return err } return nil }
builtin/provisioners/remote-exec/resource_provisioner.go
1
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.005158083513379097, 0.00045737114851363003, 0.00016445275105070323, 0.00017232820391654968, 0.0009344529244117439 ]
{ "id": 6, "code_window": [ "var expectedScriptOut = `cd /tmp\n", "wget http://foobar\n", "exit 0\n", "`\n", "\n", "var expectedInlineScriptsOut = strings.Split(expectedScriptOut, \"\\n\")\n", "\n", "func TestResourceProvider_generateScript(t *testing.T) {\n", "\tp := Provisioner().(*schema.Provisioner)\n", "\tconf := map[string]interface{}{\n", "\t\t\"inline\": []interface{}{\n", "\t\t\t\"cd /tmp\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 48 }
// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.10.sdk/usr/include/sys/syscall.h // MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT // +build 386,darwin package unix const ( SYS_SYSCALL = 0 SYS_EXIT = 1 SYS_FORK = 2 SYS_READ = 3 SYS_WRITE = 4 SYS_OPEN = 5 SYS_CLOSE = 6 SYS_WAIT4 = 7 SYS_LINK = 9 SYS_UNLINK = 10 SYS_CHDIR = 12 SYS_FCHDIR = 13 SYS_MKNOD = 14 SYS_CHMOD = 15 SYS_CHOWN = 16 SYS_GETFSSTAT = 18 SYS_GETPID = 20 SYS_SETUID = 23 SYS_GETUID = 24 SYS_GETEUID = 25 SYS_PTRACE = 26 SYS_RECVMSG = 27 SYS_SENDMSG = 28 SYS_RECVFROM = 29 SYS_ACCEPT = 30 SYS_GETPEERNAME = 31 SYS_GETSOCKNAME = 32 SYS_ACCESS = 33 SYS_CHFLAGS = 34 SYS_FCHFLAGS = 35 SYS_SYNC = 36 SYS_KILL = 37 SYS_GETPPID = 39 SYS_DUP = 41 SYS_PIPE = 42 SYS_GETEGID = 43 SYS_SIGACTION = 46 SYS_GETGID = 47 SYS_SIGPROCMASK = 48 SYS_GETLOGIN = 49 SYS_SETLOGIN = 50 SYS_ACCT = 51 SYS_SIGPENDING = 52 SYS_SIGALTSTACK = 53 SYS_IOCTL = 54 SYS_REBOOT = 55 SYS_REVOKE = 56 SYS_SYMLINK = 57 SYS_READLINK = 58 SYS_EXECVE = 59 SYS_UMASK = 60 SYS_CHROOT = 61 SYS_MSYNC = 65 SYS_VFORK = 66 SYS_MUNMAP = 73 SYS_MPROTECT = 74 SYS_MADVISE = 75 SYS_MINCORE = 78 SYS_GETGROUPS = 79 SYS_SETGROUPS = 80 SYS_GETPGRP = 81 SYS_SETPGID = 82 SYS_SETITIMER = 83 SYS_SWAPON = 85 SYS_GETITIMER = 86 SYS_GETDTABLESIZE = 89 SYS_DUP2 = 90 SYS_FCNTL = 92 SYS_SELECT = 93 SYS_FSYNC = 95 SYS_SETPRIORITY = 96 SYS_SOCKET = 97 SYS_CONNECT = 98 SYS_GETPRIORITY = 100 SYS_BIND = 104 SYS_SETSOCKOPT = 105 SYS_LISTEN = 106 SYS_SIGSUSPEND = 111 SYS_GETTIMEOFDAY = 116 SYS_GETRUSAGE = 117 SYS_GETSOCKOPT = 118 SYS_READV = 120 SYS_WRITEV = 121 SYS_SETTIMEOFDAY = 122 SYS_FCHOWN = 123 SYS_FCHMOD = 124 SYS_SETREUID = 126 SYS_SETREGID = 127 SYS_RENAME = 128 SYS_FLOCK = 131 SYS_MKFIFO = 132 SYS_SENDTO = 133 SYS_SHUTDOWN = 134 SYS_SOCKETPAIR = 135 SYS_MKDIR = 136 SYS_RMDIR = 137 SYS_UTIMES = 138 SYS_FUTIMES = 139 SYS_ADJTIME = 140 SYS_GETHOSTUUID = 142 SYS_SETSID = 147 SYS_GETPGID = 151 SYS_SETPRIVEXEC = 152 SYS_PREAD = 153 SYS_PWRITE = 154 SYS_NFSSVC = 155 SYS_STATFS = 157 SYS_FSTATFS = 158 SYS_UNMOUNT = 159 SYS_GETFH = 161 SYS_QUOTACTL = 165 SYS_MOUNT = 167 SYS_CSOPS = 169 SYS_CSOPS_AUDITTOKEN = 170 SYS_WAITID = 173 SYS_KDEBUG_TRACE64 = 179 SYS_KDEBUG_TRACE = 180 SYS_SETGID = 181 SYS_SETEGID = 182 SYS_SETEUID = 183 SYS_SIGRETURN = 184 SYS_CHUD = 185 SYS_FDATASYNC = 187 SYS_STAT = 188 SYS_FSTAT = 189 SYS_LSTAT = 190 SYS_PATHCONF = 191 SYS_FPATHCONF = 192 SYS_GETRLIMIT = 194 SYS_SETRLIMIT = 195 SYS_GETDIRENTRIES = 196 SYS_MMAP = 197 SYS_LSEEK = 199 SYS_TRUNCATE = 200 SYS_FTRUNCATE = 201 SYS_SYSCTL = 202 SYS_MLOCK = 203 SYS_MUNLOCK = 204 SYS_UNDELETE = 205 SYS_OPEN_DPROTECTED_NP = 216 SYS_GETATTRLIST = 220 SYS_SETATTRLIST = 221 SYS_GETDIRENTRIESATTR = 222 SYS_EXCHANGEDATA = 223 SYS_SEARCHFS = 225 SYS_DELETE = 226 SYS_COPYFILE = 227 SYS_FGETATTRLIST = 228 SYS_FSETATTRLIST = 229 SYS_POLL = 230 SYS_WATCHEVENT = 231 SYS_WAITEVENT = 232 SYS_MODWATCH = 233 SYS_GETXATTR = 234 SYS_FGETXATTR = 235 SYS_SETXATTR = 236 SYS_FSETXATTR = 237 SYS_REMOVEXATTR = 238 SYS_FREMOVEXATTR = 239 SYS_LISTXATTR = 240 SYS_FLISTXATTR = 241 SYS_FSCTL = 242 SYS_INITGROUPS = 243 SYS_POSIX_SPAWN = 244 SYS_FFSCTL = 245 SYS_NFSCLNT = 247 SYS_FHOPEN = 248 SYS_MINHERIT = 250 SYS_SEMSYS = 251 SYS_MSGSYS = 252 SYS_SHMSYS = 253 SYS_SEMCTL = 254 SYS_SEMGET = 255 SYS_SEMOP = 256 SYS_MSGCTL = 258 SYS_MSGGET = 259 SYS_MSGSND = 260 SYS_MSGRCV = 261 SYS_SHMAT = 262 SYS_SHMCTL = 263 SYS_SHMDT = 264 SYS_SHMGET = 265 SYS_SHM_OPEN = 266 SYS_SHM_UNLINK = 267 SYS_SEM_OPEN = 268 SYS_SEM_CLOSE = 269 SYS_SEM_UNLINK = 270 SYS_SEM_WAIT = 271 SYS_SEM_TRYWAIT = 272 SYS_SEM_POST = 273 SYS_SYSCTLBYNAME = 274 SYS_OPEN_EXTENDED = 277 SYS_UMASK_EXTENDED = 278 SYS_STAT_EXTENDED = 279 SYS_LSTAT_EXTENDED = 280 SYS_FSTAT_EXTENDED = 281 SYS_CHMOD_EXTENDED = 282 SYS_FCHMOD_EXTENDED = 283 SYS_ACCESS_EXTENDED = 284 SYS_SETTID = 285 SYS_GETTID = 286 SYS_SETSGROUPS = 287 SYS_GETSGROUPS = 288 SYS_SETWGROUPS = 289 SYS_GETWGROUPS = 290 SYS_MKFIFO_EXTENDED = 291 SYS_MKDIR_EXTENDED = 292 SYS_IDENTITYSVC = 293 SYS_SHARED_REGION_CHECK_NP = 294 SYS_VM_PRESSURE_MONITOR = 296 SYS_PSYNCH_RW_LONGRDLOCK = 297 SYS_PSYNCH_RW_YIELDWRLOCK = 298 SYS_PSYNCH_RW_DOWNGRADE = 299 SYS_PSYNCH_RW_UPGRADE = 300 SYS_PSYNCH_MUTEXWAIT = 301 SYS_PSYNCH_MUTEXDROP = 302 SYS_PSYNCH_CVBROAD = 303 SYS_PSYNCH_CVSIGNAL = 304 SYS_PSYNCH_CVWAIT = 305 SYS_PSYNCH_RW_RDLOCK = 306 SYS_PSYNCH_RW_WRLOCK = 307 SYS_PSYNCH_RW_UNLOCK = 308 SYS_PSYNCH_RW_UNLOCK2 = 309 SYS_GETSID = 310 SYS_SETTID_WITH_PID = 311 SYS_PSYNCH_CVCLRPREPOST = 312 SYS_AIO_FSYNC = 313 SYS_AIO_RETURN = 314 SYS_AIO_SUSPEND = 315 SYS_AIO_CANCEL = 316 SYS_AIO_ERROR = 317 SYS_AIO_READ = 318 SYS_AIO_WRITE = 319 SYS_LIO_LISTIO = 320 SYS_IOPOLICYSYS = 322 SYS_PROCESS_POLICY = 323 SYS_MLOCKALL = 324 SYS_MUNLOCKALL = 325 SYS_ISSETUGID = 327 SYS___PTHREAD_KILL = 328 SYS___PTHREAD_SIGMASK = 329 SYS___SIGWAIT = 330 SYS___DISABLE_THREADSIGNAL = 331 SYS___PTHREAD_MARKCANCEL = 332 SYS___PTHREAD_CANCELED = 333 SYS___SEMWAIT_SIGNAL = 334 SYS_PROC_INFO = 336 SYS_SENDFILE = 337 SYS_STAT64 = 338 SYS_FSTAT64 = 339 SYS_LSTAT64 = 340 SYS_STAT64_EXTENDED = 341 SYS_LSTAT64_EXTENDED = 342 SYS_FSTAT64_EXTENDED = 343 SYS_GETDIRENTRIES64 = 344 SYS_STATFS64 = 345 SYS_FSTATFS64 = 346 SYS_GETFSSTAT64 = 347 SYS___PTHREAD_CHDIR = 348 SYS___PTHREAD_FCHDIR = 349 SYS_AUDIT = 350 SYS_AUDITON = 351 SYS_GETAUID = 353 SYS_SETAUID = 354 SYS_GETAUDIT_ADDR = 357 SYS_SETAUDIT_ADDR = 358 SYS_AUDITCTL = 359 SYS_BSDTHREAD_CREATE = 360 SYS_BSDTHREAD_TERMINATE = 361 SYS_KQUEUE = 362 SYS_KEVENT = 363 SYS_LCHOWN = 364 SYS_STACK_SNAPSHOT = 365 SYS_BSDTHREAD_REGISTER = 366 SYS_WORKQ_OPEN = 367 SYS_WORKQ_KERNRETURN = 368 SYS_KEVENT64 = 369 SYS___OLD_SEMWAIT_SIGNAL = 370 SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 SYS_THREAD_SELFID = 372 SYS_LEDGER = 373 SYS___MAC_EXECVE = 380 SYS___MAC_SYSCALL = 381 SYS___MAC_GET_FILE = 382 SYS___MAC_SET_FILE = 383 SYS___MAC_GET_LINK = 384 SYS___MAC_SET_LINK = 385 SYS___MAC_GET_PROC = 386 SYS___MAC_SET_PROC = 387 SYS___MAC_GET_FD = 388 SYS___MAC_SET_FD = 389 SYS___MAC_GET_PID = 390 SYS___MAC_GET_LCID = 391 SYS___MAC_GET_LCTX = 392 SYS___MAC_SET_LCTX = 393 SYS_SETLCID = 394 SYS_GETLCID = 395 SYS_READ_NOCANCEL = 396 SYS_WRITE_NOCANCEL = 397 SYS_OPEN_NOCANCEL = 398 SYS_CLOSE_NOCANCEL = 399 SYS_WAIT4_NOCANCEL = 400 SYS_RECVMSG_NOCANCEL = 401 SYS_SENDMSG_NOCANCEL = 402 SYS_RECVFROM_NOCANCEL = 403 SYS_ACCEPT_NOCANCEL = 404 SYS_MSYNC_NOCANCEL = 405 SYS_FCNTL_NOCANCEL = 406 SYS_SELECT_NOCANCEL = 407 SYS_FSYNC_NOCANCEL = 408 SYS_CONNECT_NOCANCEL = 409 SYS_SIGSUSPEND_NOCANCEL = 410 SYS_READV_NOCANCEL = 411 SYS_WRITEV_NOCANCEL = 412 SYS_SENDTO_NOCANCEL = 413 SYS_PREAD_NOCANCEL = 414 SYS_PWRITE_NOCANCEL = 415 SYS_WAITID_NOCANCEL = 416 SYS_POLL_NOCANCEL = 417 SYS_MSGSND_NOCANCEL = 418 SYS_MSGRCV_NOCANCEL = 419 SYS_SEM_WAIT_NOCANCEL = 420 SYS_AIO_SUSPEND_NOCANCEL = 421 SYS___SIGWAIT_NOCANCEL = 422 SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 SYS___MAC_MOUNT = 424 SYS___MAC_GET_MOUNT = 425 SYS___MAC_GETFSSTAT = 426 SYS_FSGETPATH = 427 SYS_AUDIT_SESSION_SELF = 428 SYS_AUDIT_SESSION_JOIN = 429 SYS_FILEPORT_MAKEPORT = 430 SYS_FILEPORT_MAKEFD = 431 SYS_AUDIT_SESSION_PORT = 432 SYS_PID_SUSPEND = 433 SYS_PID_RESUME = 434 SYS_PID_HIBERNATE = 435 SYS_PID_SHUTDOWN_SOCKETS = 436 SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 SYS_KAS_INFO = 439 SYS_MEMORYSTATUS_CONTROL = 440 SYS_GUARDED_OPEN_NP = 441 SYS_GUARDED_CLOSE_NP = 442 SYS_GUARDED_KQUEUE_NP = 443 SYS_CHANGE_FDGUARD_NP = 444 SYS_PROC_RLIMIT_CONTROL = 446 SYS_CONNECTX = 447 SYS_DISCONNECTX = 448 SYS_PEELOFF = 449 SYS_SOCKET_DELEGATE = 450 SYS_TELEMETRY = 451 SYS_PROC_UUID_POLICY = 452 SYS_MEMORYSTATUS_GET_LEVEL = 453 SYS_SYSTEM_OVERRIDE = 454 SYS_VFS_PURGE = 455 SYS_SFI_CTL = 456 SYS_SFI_PIDCTL = 457 SYS_COALITION = 458 SYS_COALITION_INFO = 459 SYS_NECP_MATCH_POLICY = 460 SYS_GETATTRLISTBULK = 461 SYS_OPENAT = 463 SYS_OPENAT_NOCANCEL = 464 SYS_RENAMEAT = 465 SYS_FACCESSAT = 466 SYS_FCHMODAT = 467 SYS_FCHOWNAT = 468 SYS_FSTATAT = 469 SYS_FSTATAT64 = 470 SYS_LINKAT = 471 SYS_UNLINKAT = 472 SYS_READLINKAT = 473 SYS_SYMLINKAT = 474 SYS_MKDIRAT = 475 SYS_GETATTRLISTAT = 476 SYS_PROC_TRACE_LOG = 477 SYS_BSDTHREAD_CTL = 478 SYS_OPENBYID_NP = 479 SYS_RECVMSG_X = 480 SYS_SENDMSG_X = 481 SYS_THREAD_SELFUSAGE = 482 SYS_CSRCTL = 483 SYS_GUARDED_OPEN_DPROTECTED_NP = 484 SYS_GUARDED_WRITE_NP = 485 SYS_GUARDED_PWRITE_NP = 486 SYS_GUARDED_WRITEV_NP = 487 SYS_RENAME_EXT = 488 SYS_MREMAP_ENCRYPTED = 489 SYS_MAXSYSCALL = 490 )
vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0001706373441265896, 0.00016613275511190295, 0.00016341358423233032, 0.00016581418458372355, 0.00000170087264450558 ]
{ "id": 6, "code_window": [ "var expectedScriptOut = `cd /tmp\n", "wget http://foobar\n", "exit 0\n", "`\n", "\n", "var expectedInlineScriptsOut = strings.Split(expectedScriptOut, \"\\n\")\n", "\n", "func TestResourceProvider_generateScript(t *testing.T) {\n", "\tp := Provisioner().(*schema.Provisioner)\n", "\tconf := map[string]interface{}{\n", "\t\t\"inline\": []interface{}{\n", "\t\t\t\"cd /tmp\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 48 }
package floatingips import "github.com/gophercloud/gophercloud" const resourcePath = "floatingips" func rootURL(c *gophercloud.ServiceClient) string { return c.ServiceURL(resourcePath) } func resourceURL(c *gophercloud.ServiceClient, id string) string { return c.ServiceURL(resourcePath, id) }
vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.00017315174045506865, 0.00017235599807463586, 0.00017156025569420308, 0.00017235599807463586, 7.957423804327846e-7 ]
{ "id": 6, "code_window": [ "var expectedScriptOut = `cd /tmp\n", "wget http://foobar\n", "exit 0\n", "`\n", "\n", "var expectedInlineScriptsOut = strings.Split(expectedScriptOut, \"\\n\")\n", "\n", "func TestResourceProvider_generateScript(t *testing.T) {\n", "\tp := Provisioner().(*schema.Provisioner)\n", "\tconf := map[string]interface{}{\n", "\t\t\"inline\": []interface{}{\n", "\t\t\t\"cd /tmp\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 48 }
--- layout: "inner" page_title: "Community" description: |- Terraform is a new project with a growing community. Despite this, there are active, dedicated users willing to help you through various mediums. --- <h1>Community</h1> <p> Terraform is a new project with a growing community. Despite this, there are active, dedicated users willing to help you through various mediums. </p> <p> <strong>Gitter:</strong> <a href="https://gitter.im/hashicorp-terraform/Lobby">Terraform Gitter Room</a> </p> <p> <strong>IRC:</strong> Use the <a href="https://irc.gitter.im">Gitter IRC bridge</a> </p> <p> <strong>Mailing list:</strong> <a href="https://groups.google.com/group/terraform-tool">Terraform Google Group</a> </p> <p> <strong>Bug Tracker:</strong> <a href="https://github.com/hashicorp/terraform/issues">Issue tracker on GitHub</a>. Please only use this for reporting bugs. Do not ask for general help here. Use IRC or the mailing list for that. </p> <p> <strong>Training:</strong> Paid <a href="https://www.hashicorp.com/training.html">HashiCorp training courses</a> are also available in a city near you. Private training courses are also available. </p> <h1>People</h1> <p> The following people are some of the faces behind Terraform. They each contribute to Terraform in some core way. Over time, faces may appear and disappear from this list as contributors come and go. </p> <div class="people"> <div class="person"> <img class="pull-left" src="https://www.gravatar.com/avatar/54079122b67de9677c1f93933ce8b63a.png?s=125"> <div class="bio"> <h3>Mitchell Hashimoto (<a href="https://github.com/mitchellh">@mitchellh</a>)</h3> <p> Mitchell Hashimoto is the creator of Terraform and works on all layers of Terraform from the core to providers. In addition to Terraform, Mitchell is the creator of <a href="https://www.vagrantup.com">Vagrant</a>, <a href="https://www.packer.io">Packer</a>, and <a href="https://www.consul.io">Consul</a>. </p> </div> </div> <div class="person"> <img class="pull-left" src="https://www.gravatar.com/avatar/2fafdc19b0f7248e9a1e1e07d5a8b678.png?s=125"> <div class="bio"> <h3>Paul Hinze (<a href="https://github.com/phinze">@phinze</a>)</h3> <p> Paul Hinze is the Project Lead of Terraform. He helps organize the team of HashiCorp employees and community members that work on Terraform day-to-day. He works on Terraform's core and providers. </p> </div> </div> <div class="person"> <img class="pull-left" src="https://www.gravatar.com/avatar/dfb3948650131e4f0385c3328187cfca.png?s=125"> <div class="bio"> <h3>Clint Shryock (<a href="https://github.com/catsby">@catsby</a>)</h3> <p> Clint Shryock is a HashiCorp Engineer working on Terraform. He is the primary maintainer of the AWS provider, and works across all providers. Clint is also the primary author of the Fastly provider. </p> </div> </div> <div class="person"> <img class="pull-left" src="https://www.gravatar.com/avatar/61334cbecffccf24984b9a64530a7508.png?s=125"> <div class="bio"> <h3>James Nugent (<a href="https://github.com/jen20">@jen20</a>)</h3> <p> James Nugent is a HashiCorp Engineer working on Terraform. He is one of the principal developers working in Terraform's core, though he can also be found working on providers from time to time as well. </p> </div> </div> <div class="person"> <img class="pull-left" src="https://www.gravatar.com/avatar/7a508a1329768332def9abdec9efa6fc?s=125"> <div class="bio"> <h3>Jake Champlin (<a href="https://github.com/grubernaut">@grubernaut</a>)</h3> <p> Jake Champlin is a HashiCorp Engineer working on Terraform. His primary focus will be the Terraform Provider Ecosystem. </p> </div> </div> <div class="person"> <img class="pull-left" src="https://www.gravatar.com/avatar/66fc33d259df37b17198d0d76e0ac169?s=125"> <div class="bio"> <h3>Radek Simko (<a href="https://github.com/radeksimko">@radeksimko</a>)</h3> <p> Radek Simko is a HashiCorp Engineer working on Terraform. His primary focus is the Terraform Provider Ecosystem, but you can find him working on some core features from time to time as well. </p> </div> </div> <div class="person"> <img class="pull-left" src="https://en.gravatar.com/userimage/69428860/3cff3bc89fd8344b831ab955d49d754a.jpg?size=125"> <div class="bio"> <h3>Matthew Frahry (<a href="https://github.com/mbfrahry">@mbfrahry</a>)</h3> <p> Matthew Frahry is a HashiCorp Engineer working on Terraform with an emphasis on the Terraform Provider Ecosystem. </p> </div> </div> <div class="clearfix"></div> </div>
website/source/community.html.erb
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0001748563372530043, 0.000173368607647717, 0.00017042644321918488, 0.0001738760620355606, 0.0000012914512126371847 ]
{ "id": 7, "code_window": [ "\tif err != nil {\n", "\t\tt.Fatalf(\"err: %v\", err)\n", "\t}\n", "\n", "\tif reflect.DeepEqual(out, expectedInlineScriptsOut) {\n", "\t\tt.Fatalf(\"bad: %v\", out)\n", "\t}\n", "}\n", "\n", "func TestResourceProvider_CollectScripts_inline(t *testing.T) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif len(out) != 1 {\n", "\t\tt.Fatal(\"expected 1 out\")\n", "\t}\n", "\n", "\tif out[0] != expectedScriptOut {\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 65 }
package remoteexec import ( "bytes" "context" "fmt" "io" "io/ioutil" "log" "os" "sync/atomic" "time" "github.com/hashicorp/terraform/communicator" "github.com/hashicorp/terraform/communicator/remote" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/go-linereader" ) func Provisioner() terraform.ResourceProvisioner { return &schema.Provisioner{ Schema: map[string]*schema.Schema{ "inline": &schema.Schema{ Type: schema.TypeList, Elem: &schema.Schema{Type: schema.TypeString}, PromoteSingle: true, Optional: true, ConflictsWith: []string{"script", "scripts"}, }, "script": &schema.Schema{ Type: schema.TypeString, Optional: true, ConflictsWith: []string{"inline", "scripts"}, }, "scripts": &schema.Schema{ Type: schema.TypeList, Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, ConflictsWith: []string{"script", "inline"}, }, }, ApplyFunc: applyFn, } } // Apply executes the remote exec provisioner func applyFn(ctx context.Context) error { connState := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) data := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) // Get a new communicator comm, err := communicator.New(connState) if err != nil { return err } // Collect the scripts scripts, err := collectScripts(data) if err != nil { return err } for _, s := range scripts { defer s.Close() } // Copy and execute each script if err := runScripts(ctx, o, comm, scripts); err != nil { return err } return nil } // generateScripts takes the configuration and creates a script from each inline config func generateScripts(d *schema.ResourceData) ([]string, error) { var scripts []string for _, l := range d.Get("inline").([]interface{}) { scripts = append(scripts, l.(string)) } return scripts, nil } // collectScripts is used to collect all the scripts we need // to execute in preparation for copying them. func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) { // Check if inline if _, ok := d.GetOk("inline"); ok { scripts, err := generateScripts(d) if err != nil { return nil, err } var r []io.ReadCloser for _, script := range scripts { r = append(r, ioutil.NopCloser(bytes.NewReader([]byte(script)))) } return r, nil } // Collect scripts var scripts []string if script, ok := d.GetOk("script"); ok { scripts = append(scripts, script.(string)) } if scriptList, ok := d.GetOk("scripts"); ok { for _, script := range scriptList.([]interface{}) { scripts = append(scripts, script.(string)) } } // Open all the scripts var fhs []io.ReadCloser for _, s := range scripts { fh, err := os.Open(s) if err != nil { for _, fh := range fhs { fh.Close() } return nil, fmt.Errorf("Failed to open script '%s': %v", s, err) } fhs = append(fhs, fh) } // Done, return the file handles return fhs, nil } // runScripts is used to copy and execute a set of scripts func runScripts( ctx context.Context, o terraform.UIOutput, comm communicator.Communicator, scripts []io.ReadCloser) error { // Wrap out context in a cancelation function that we use to // kill the connection. ctx, cancelFunc := context.WithCancel(ctx) defer cancelFunc() // Wait for the context to end and then disconnect go func() { <-ctx.Done() comm.Disconnect() }() // Wait and retry until we establish the connection err := retryFunc(ctx, comm.Timeout(), func() error { err := comm.Connect(o) return err }) if err != nil { return err } for _, script := range scripts { var cmd *remote.Cmd outR, outW := io.Pipe() errR, errW := io.Pipe() outDoneCh := make(chan struct{}) errDoneCh := make(chan struct{}) go copyOutput(o, outR, outDoneCh) go copyOutput(o, errR, errDoneCh) remotePath := comm.ScriptPath() err = retryFunc(ctx, comm.Timeout(), func() error { if err := comm.UploadScript(remotePath, script); err != nil { return fmt.Errorf("Failed to upload script: %v", err) } cmd = &remote.Cmd{ Command: remotePath, Stdout: outW, Stderr: errW, } if err := comm.Start(cmd); err != nil { return fmt.Errorf("Error starting script: %v", err) } return nil }) if err == nil { cmd.Wait() if cmd.ExitStatus != 0 { err = fmt.Errorf("Script exited with non-zero exit status: %d", cmd.ExitStatus) } } // If we have an error, end our context so the disconnect happens. // This has to happen before the output cleanup below since during // an interrupt this will cause the outputs to end. if err != nil { cancelFunc() } // Wait for output to clean up outW.Close() errW.Close() <-outDoneCh <-errDoneCh // Upload a blank follow up file in the same path to prevent residual // script contents from remaining on remote machine empty := bytes.NewReader([]byte("")) if err := comm.Upload(remotePath, empty); err != nil { // This feature is best-effort. log.Printf("[WARN] Failed to upload empty follow up script: %v", err) } // If we have an error, return it out now that we've cleaned up if err != nil { return err } } return nil } func copyOutput( o terraform.UIOutput, r io.Reader, doneCh chan<- struct{}) { defer close(doneCh) lr := linereader.New(r) for line := range lr.Ch { o.Output(line) } } // retryFunc is used to retry a function for a given duration func retryFunc(ctx context.Context, timeout time.Duration, f func() error) error { // Build a new context with the timeout ctx, done := context.WithTimeout(ctx, timeout) defer done() // Try the function in a goroutine var errVal atomic.Value doneCh := make(chan struct{}) go func() { defer close(doneCh) for { // If our context ended, we want to exit right away. select { case <-ctx.Done(): return default: } // Try the function call err := f() if err == nil { return } log.Printf("Retryable error: %v", err) errVal.Store(err) } }() // Wait for completion select { case <-doneCh: case <-ctx.Done(): } // Check if we have a context error to check if we're interrupted or timeout switch ctx.Err() { case context.Canceled: return fmt.Errorf("interrupted") case context.DeadlineExceeded: return fmt.Errorf("timeout") } // Check if we got an error executing if err, ok := errVal.Load().(error); ok { return err } return nil }
builtin/provisioners/remote-exec/resource_provisioner.go
1
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0011582236038520932, 0.0002497624373063445, 0.00016227620653808117, 0.00017084524733945727, 0.0002083016443066299 ]
{ "id": 7, "code_window": [ "\tif err != nil {\n", "\t\tt.Fatalf(\"err: %v\", err)\n", "\t}\n", "\n", "\tif reflect.DeepEqual(out, expectedInlineScriptsOut) {\n", "\t\tt.Fatalf(\"bad: %v\", out)\n", "\t}\n", "}\n", "\n", "func TestResourceProvider_CollectScripts_inline(t *testing.T) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif len(out) != 1 {\n", "\t\tt.Fatal(\"expected 1 out\")\n", "\t}\n", "\n", "\tif out[0] != expectedScriptOut {\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 65 }
package aws import ( "bytes" "fmt" "log" "sort" "strconv" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" ) func resourceAwsNetworkAcl() *schema.Resource { return &schema.Resource{ Create: resourceAwsNetworkAclCreate, Read: resourceAwsNetworkAclRead, Delete: resourceAwsNetworkAclDelete, Update: resourceAwsNetworkAclUpdate, Importer: &schema.ResourceImporter{ State: resourceAwsNetworkAclImportState, }, Schema: map[string]*schema.Schema{ "vpc_id": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, Computed: false, }, "subnet_id": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, Computed: false, Deprecated: "Attribute subnet_id is deprecated on network_acl resources. Use subnet_ids instead", }, "subnet_ids": &schema.Schema{ Type: schema.TypeSet, Optional: true, Computed: true, ConflictsWith: []string{"subnet_id"}, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, "ingress": &schema.Schema{ Type: schema.TypeSet, Required: false, Optional: true, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "from_port": &schema.Schema{ Type: schema.TypeInt, Required: true, }, "to_port": &schema.Schema{ Type: schema.TypeInt, Required: true, }, "rule_no": &schema.Schema{ Type: schema.TypeInt, Required: true, }, "action": &schema.Schema{ Type: schema.TypeString, Required: true, }, "protocol": &schema.Schema{ Type: schema.TypeString, Required: true, }, "cidr_block": &schema.Schema{ Type: schema.TypeString, Optional: true, }, "icmp_type": &schema.Schema{ Type: schema.TypeInt, Optional: true, }, "icmp_code": &schema.Schema{ Type: schema.TypeInt, Optional: true, }, }, }, Set: resourceAwsNetworkAclEntryHash, }, "egress": &schema.Schema{ Type: schema.TypeSet, Required: false, Optional: true, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "from_port": &schema.Schema{ Type: schema.TypeInt, Required: true, }, "to_port": &schema.Schema{ Type: schema.TypeInt, Required: true, }, "rule_no": &schema.Schema{ Type: schema.TypeInt, Required: true, }, "action": &schema.Schema{ Type: schema.TypeString, Required: true, }, "protocol": &schema.Schema{ Type: schema.TypeString, Required: true, }, "cidr_block": &schema.Schema{ Type: schema.TypeString, Optional: true, }, "icmp_type": &schema.Schema{ Type: schema.TypeInt, Optional: true, }, "icmp_code": &schema.Schema{ Type: schema.TypeInt, Optional: true, }, }, }, Set: resourceAwsNetworkAclEntryHash, }, "tags": tagsSchema(), }, } } func resourceAwsNetworkAclCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn // Create the Network Acl createOpts := &ec2.CreateNetworkAclInput{ VpcId: aws.String(d.Get("vpc_id").(string)), } log.Printf("[DEBUG] Network Acl create config: %#v", createOpts) resp, err := conn.CreateNetworkAcl(createOpts) if err != nil { return fmt.Errorf("Error creating network acl: %s", err) } // Get the ID and store it networkAcl := resp.NetworkAcl d.SetId(*networkAcl.NetworkAclId) log.Printf("[INFO] Network Acl ID: %s", *networkAcl.NetworkAclId) // Update rules and subnet association once acl is created return resourceAwsNetworkAclUpdate(d, meta) } func resourceAwsNetworkAclRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ NetworkAclIds: []*string{aws.String(d.Id())}, }) if err != nil { if ec2err, ok := err.(awserr.Error); ok { if ec2err.Code() == "InvalidNetworkAclID.NotFound" { log.Printf("[DEBUG] Network ACL (%s) not found", d.Id()) d.SetId("") return nil } } return err } if resp == nil { return nil } networkAcl := resp.NetworkAcls[0] var ingressEntries []*ec2.NetworkAclEntry var egressEntries []*ec2.NetworkAclEntry // separate the ingress and egress rules for _, e := range networkAcl.Entries { // Skip the default rules added by AWS. They can be neither // configured or deleted by users. if *e.RuleNumber == awsDefaultAclRuleNumber { continue } if *e.Egress == true { egressEntries = append(egressEntries, e) } else { ingressEntries = append(ingressEntries, e) } } d.Set("vpc_id", networkAcl.VpcId) d.Set("tags", tagsToMap(networkAcl.Tags)) var s []string for _, a := range networkAcl.Associations { s = append(s, *a.SubnetId) } sort.Strings(s) if err := d.Set("subnet_ids", s); err != nil { return err } if err := d.Set("ingress", networkAclEntriesToMapList(ingressEntries)); err != nil { return err } if err := d.Set("egress", networkAclEntriesToMapList(egressEntries)); err != nil { return err } return nil } func resourceAwsNetworkAclUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn d.Partial(true) if d.HasChange("ingress") { err := updateNetworkAclEntries(d, "ingress", conn) if err != nil { return err } } if d.HasChange("egress") { err := updateNetworkAclEntries(d, "egress", conn) if err != nil { return err } } if d.HasChange("subnet_id") { //associate new subnet with the acl. _, n := d.GetChange("subnet_id") newSubnet := n.(string) association, err := findNetworkAclAssociation(newSubnet, conn) if err != nil { return fmt.Errorf("Failed to update acl %s with subnet %s: %s", d.Id(), newSubnet, err) } _, err = conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ AssociationId: association.NetworkAclAssociationId, NetworkAclId: aws.String(d.Id()), }) if err != nil { return err } } if d.HasChange("subnet_ids") { o, n := d.GetChange("subnet_ids") if o == nil { o = new(schema.Set) } if n == nil { n = new(schema.Set) } os := o.(*schema.Set) ns := n.(*schema.Set) remove := os.Difference(ns).List() add := ns.Difference(os).List() if len(remove) > 0 { // A Network ACL is required for each subnet. In order to disassociate a // subnet from this ACL, we must associate it with the default ACL. defaultAcl, err := getDefaultNetworkAcl(d.Get("vpc_id").(string), conn) if err != nil { return fmt.Errorf("Failed to find Default ACL for VPC %s", d.Get("vpc_id").(string)) } for _, r := range remove { association, err := findNetworkAclAssociation(r.(string), conn) if err != nil { return fmt.Errorf("Failed to find acl association: acl %s with subnet %s: %s", d.Id(), r, err) } log.Printf("DEBUG] Replacing Network Acl Association (%s) with Default Network ACL ID (%s)", *association.NetworkAclAssociationId, *defaultAcl.NetworkAclId) _, err = conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ AssociationId: association.NetworkAclAssociationId, NetworkAclId: defaultAcl.NetworkAclId, }) if err != nil { return err } } } if len(add) > 0 { for _, a := range add { association, err := findNetworkAclAssociation(a.(string), conn) if err != nil { return fmt.Errorf("Failed to find acl association: acl %s with subnet %s: %s", d.Id(), a, err) } _, err = conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ AssociationId: association.NetworkAclAssociationId, NetworkAclId: aws.String(d.Id()), }) if err != nil { return err } } } } if err := setTags(conn, d); err != nil { return err } else { d.SetPartial("tags") } d.Partial(false) return resourceAwsNetworkAclRead(d, meta) } func updateNetworkAclEntries(d *schema.ResourceData, entryType string, conn *ec2.EC2) error { if d.HasChange(entryType) { o, n := d.GetChange(entryType) if o == nil { o = new(schema.Set) } if n == nil { n = new(schema.Set) } os := o.(*schema.Set) ns := n.(*schema.Set) toBeDeleted, err := expandNetworkAclEntries(os.Difference(ns).List(), entryType) if err != nil { return err } for _, remove := range toBeDeleted { // AWS includes default rules with all network ACLs that can be // neither modified nor destroyed. They have a custom rule // number that is out of bounds for any other rule. If we // encounter it, just continue. There's no work to be done. if *remove.RuleNumber == awsDefaultAclRuleNumber { continue } // Delete old Acl log.Printf("[DEBUG] Destroying Network ACL Entry number (%d)", int(*remove.RuleNumber)) _, err := conn.DeleteNetworkAclEntry(&ec2.DeleteNetworkAclEntryInput{ NetworkAclId: aws.String(d.Id()), RuleNumber: remove.RuleNumber, Egress: remove.Egress, }) if err != nil { return fmt.Errorf("Error deleting %s entry: %s", entryType, err) } } toBeCreated, err := expandNetworkAclEntries(ns.Difference(os).List(), entryType) if err != nil { return err } for _, add := range toBeCreated { // Protocol -1 rules don't store ports in AWS. Thus, they'll always // hash differently when being read out of the API. Force the user // to set from_port and to_port to 0 for these rules, to keep the // hashing consistent. if *add.Protocol == "-1" { to := *add.PortRange.To from := *add.PortRange.From expected := &expectedPortPair{ to_port: 0, from_port: 0, } if ok := validatePorts(to, from, *expected); !ok { return fmt.Errorf( "to_port (%d) and from_port (%d) must both be 0 to use the the 'all' \"-1\" protocol!", to, from) } } // AWS mutates the CIDR block into a network implied by the IP and // mask provided. This results in hashing inconsistencies between // the local config file and the state returned by the API. Error // if the user provides a CIDR block with an inappropriate mask if err := validateCIDRBlock(*add.CidrBlock); err != nil { return err } // Add new Acl entry _, connErr := conn.CreateNetworkAclEntry(&ec2.CreateNetworkAclEntryInput{ NetworkAclId: aws.String(d.Id()), CidrBlock: add.CidrBlock, Egress: add.Egress, PortRange: add.PortRange, Protocol: add.Protocol, RuleAction: add.RuleAction, RuleNumber: add.RuleNumber, IcmpTypeCode: add.IcmpTypeCode, }) if connErr != nil { return fmt.Errorf("Error creating %s entry: %s", entryType, connErr) } } } return nil } func resourceAwsNetworkAclDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn log.Printf("[INFO] Deleting Network Acl: %s", d.Id()) retryErr := resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := conn.DeleteNetworkAcl(&ec2.DeleteNetworkAclInput{ NetworkAclId: aws.String(d.Id()), }) if err != nil { ec2err := err.(awserr.Error) switch ec2err.Code() { case "InvalidNetworkAclID.NotFound": return nil case "DependencyViolation": // In case of dependency violation, we remove the association between subnet and network acl. // This means the subnet is attached to default acl of vpc. var associations []*ec2.NetworkAclAssociation if v, ok := d.GetOk("subnet_id"); ok { a, err := findNetworkAclAssociation(v.(string), conn) if err != nil { return resource.NonRetryableError(err) } associations = append(associations, a) } else if v, ok := d.GetOk("subnet_ids"); ok { ids := v.(*schema.Set).List() for _, i := range ids { a, err := findNetworkAclAssociation(i.(string), conn) if err != nil { return resource.NonRetryableError(err) } associations = append(associations, a) } } log.Printf("[DEBUG] Replacing network associations for Network ACL (%s): %s", d.Id(), associations) defaultAcl, err := getDefaultNetworkAcl(d.Get("vpc_id").(string), conn) if err != nil { return resource.NonRetryableError(err) } for _, a := range associations { log.Printf("DEBUG] Replacing Network Acl Association (%s) with Default Network ACL ID (%s)", *a.NetworkAclAssociationId, *defaultAcl.NetworkAclId) _, replaceErr := conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ AssociationId: a.NetworkAclAssociationId, NetworkAclId: defaultAcl.NetworkAclId, }) if replaceErr != nil { if replaceEc2err, ok := replaceErr.(awserr.Error); ok { // It's possible that during an attempt to replace this // association, the Subnet in question has already been moved to // another ACL. This can happen if you're destroying a network acl // and simultaneously re-associating it's subnet(s) with another // ACL; Terraform may have already re-associated the subnet(s) by // the time we attempt to destroy them, even between the time we // list them and then try to destroy them. In this case, the // association we're trying to replace will no longer exist and // this call will fail. Here we trap that error and fail // gracefully; the association we tried to replace gone, we trust // someone else has taken ownership. if replaceEc2err.Code() == "InvalidAssociationID.NotFound" { log.Printf("[WARN] Network Association (%s) no longer found; Network Association likely updated or removed externally, removing from state", *a.NetworkAclAssociationId) continue } } log.Printf("[ERR] Non retry-able error in replacing associations for Network ACL (%s): %s", d.Id(), replaceErr) return resource.NonRetryableError(replaceErr) } } return resource.RetryableError(fmt.Errorf("Dependencies found and cleaned up, retrying")) default: // Any other error, we want to quit the retry loop immediately return resource.NonRetryableError(err) } } log.Printf("[Info] Deleted network ACL %s successfully", d.Id()) return nil }) if retryErr != nil { return fmt.Errorf("[ERR] Error destroying Network ACL (%s): %s", d.Id(), retryErr) } return nil } func resourceAwsNetworkAclEntryHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) buf.WriteString(fmt.Sprintf("%d-", m["from_port"].(int))) buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int))) buf.WriteString(fmt.Sprintf("%d-", m["rule_no"].(int))) buf.WriteString(fmt.Sprintf("%s-", m["action"].(string))) // The AWS network ACL API only speaks protocol numbers, and that's // all we store. Never hash a protocol name. protocol := m["protocol"].(string) if _, err := strconv.Atoi(m["protocol"].(string)); err != nil { // We're a protocol name. Look up the number. buf.WriteString(fmt.Sprintf("%d-", protocolIntegers()[protocol])) } else { // We're a protocol number. Pass the value through. buf.WriteString(fmt.Sprintf("%s-", protocol)) } buf.WriteString(fmt.Sprintf("%s-", m["cidr_block"].(string))) if v, ok := m["ssl_certificate_id"]; ok { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } if v, ok := m["icmp_type"]; ok { buf.WriteString(fmt.Sprintf("%d-", v.(int))) } if v, ok := m["icmp_code"]; ok { buf.WriteString(fmt.Sprintf("%d-", v.(int))) } return hashcode.String(buf.String()) } func getDefaultNetworkAcl(vpc_id string, conn *ec2.EC2) (defaultAcl *ec2.NetworkAcl, err error) { resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ Filters: []*ec2.Filter{ &ec2.Filter{ Name: aws.String("default"), Values: []*string{aws.String("true")}, }, &ec2.Filter{ Name: aws.String("vpc-id"), Values: []*string{aws.String(vpc_id)}, }, }, }) if err != nil { return nil, err } return resp.NetworkAcls[0], nil } func findNetworkAclAssociation(subnetId string, conn *ec2.EC2) (networkAclAssociation *ec2.NetworkAclAssociation, err error) { resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ Filters: []*ec2.Filter{ &ec2.Filter{ Name: aws.String("association.subnet-id"), Values: []*string{aws.String(subnetId)}, }, }, }) if err != nil { return nil, err } if resp.NetworkAcls != nil && len(resp.NetworkAcls) > 0 { for _, association := range resp.NetworkAcls[0].Associations { if *association.SubnetId == subnetId { return association, nil } } } return nil, fmt.Errorf("could not find association for subnet: %s ", subnetId) } // networkAclEntriesToMapList turns ingress/egress rules read from AWS into a list // of maps. func networkAclEntriesToMapList(networkAcls []*ec2.NetworkAclEntry) []map[string]interface{} { result := make([]map[string]interface{}, 0, len(networkAcls)) for _, entry := range networkAcls { acl := make(map[string]interface{}) acl["rule_no"] = *entry.RuleNumber acl["action"] = *entry.RuleAction acl["cidr_block"] = *entry.CidrBlock // The AWS network ACL API only speaks protocol numbers, and // that's all we record. if _, err := strconv.Atoi(*entry.Protocol); err != nil { // We're a protocol name. Look up the number. acl["protocol"] = protocolIntegers()[*entry.Protocol] } else { // We're a protocol number. Pass through. acl["protocol"] = *entry.Protocol } acl["protocol"] = *entry.Protocol if entry.PortRange != nil { acl["from_port"] = *entry.PortRange.From acl["to_port"] = *entry.PortRange.To } if entry.IcmpTypeCode != nil { acl["icmp_type"] = *entry.IcmpTypeCode.Type acl["icmp_code"] = *entry.IcmpTypeCode.Code } result = append(result, acl) } return result }
builtin/providers/aws/resource_aws_network_acl.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.002320887055248022, 0.00022833225375507027, 0.0001598922535777092, 0.0001726145128486678, 0.0002870876342058182 ]
{ "id": 7, "code_window": [ "\tif err != nil {\n", "\t\tt.Fatalf(\"err: %v\", err)\n", "\t}\n", "\n", "\tif reflect.DeepEqual(out, expectedInlineScriptsOut) {\n", "\t\tt.Fatalf(\"bad: %v\", out)\n", "\t}\n", "}\n", "\n", "func TestResourceProvider_CollectScripts_inline(t *testing.T) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif len(out) != 1 {\n", "\t\tt.Fatal(\"expected 1 out\")\n", "\t}\n", "\n", "\tif out[0] != expectedScriptOut {\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 65 }
package aws import ( "fmt" "testing" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53" ) func TestAccAWSRoute53ZoneAssociation_basic(t *testing.T) { var zone route53.HostedZone resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckRoute53ZoneAssociationDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccRoute53ZoneAssociationConfig, Check: resource.ComposeTestCheckFunc( testAccCheckRoute53ZoneAssociationExists("aws_route53_zone_association.foobar", &zone), ), }, }, }) } func TestAccAWSRoute53ZoneAssociation_region(t *testing.T) { var zone route53.HostedZone // record the initialized providers so that we can use them to // check for the instances in each region var providers []*schema.Provider providerFactories := map[string]terraform.ResourceProviderFactory{ "aws": func() (terraform.ResourceProvider, error) { p := Provider() providers = append(providers, p.(*schema.Provider)) return p, nil }, } resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, ProviderFactories: providerFactories, CheckDestroy: testAccCheckRoute53ZoneAssociationDestroyWithProviders(&providers), Steps: []resource.TestStep{ resource.TestStep{ Config: testAccRoute53ZoneAssociationRegionConfig, Check: resource.ComposeTestCheckFunc( testAccCheckRoute53ZoneAssociationExistsWithProviders("aws_route53_zone_association.foobar", &zone, &providers), ), }, }, }) } func testAccCheckRoute53ZoneAssociationDestroy(s *terraform.State) error { return testAccCheckRoute53ZoneAssociationDestroyWithProvider(s, testAccProvider) } func testAccCheckRoute53ZoneAssociationDestroyWithProviders(providers *[]*schema.Provider) resource.TestCheckFunc { return func(s *terraform.State) error { for _, provider := range *providers { if provider.Meta() == nil { continue } if err := testAccCheckRoute53ZoneAssociationDestroyWithProvider(s, provider); err != nil { return err } } return nil } } func testAccCheckRoute53ZoneAssociationDestroyWithProvider(s *terraform.State, provider *schema.Provider) error { conn := provider.Meta().(*AWSClient).r53conn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_route53_zone_association" { continue } zone_id, vpc_id := resourceAwsRoute53ZoneAssociationParseId(rs.Primary.ID) resp, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone_id)}) if err != nil { exists := false for _, vpc := range resp.VPCs { if vpc_id == *vpc.VPCId { exists = true } } if exists { return fmt.Errorf("VPC: %v is still associated to HostedZone: %v", vpc_id, zone_id) } } } return nil } func testAccCheckRoute53ZoneAssociationExists(n string, zone *route53.HostedZone) resource.TestCheckFunc { return func(s *terraform.State) error { return testAccCheckRoute53ZoneAssociationExistsWithProvider(s, n, zone, testAccProvider) } } func testAccCheckRoute53ZoneAssociationExistsWithProviders(n string, zone *route53.HostedZone, providers *[]*schema.Provider) resource.TestCheckFunc { return func(s *terraform.State) error { for _, provider := range *providers { if provider.Meta() == nil { continue } if err := testAccCheckRoute53ZoneAssociationExistsWithProvider(s, n, zone, provider); err != nil { return err } } return nil } } func testAccCheckRoute53ZoneAssociationExistsWithProvider(s *terraform.State, n string, zone *route53.HostedZone, provider *schema.Provider) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } if rs.Primary.ID == "" { return fmt.Errorf("No zone association ID is set") } zone_id, vpc_id := resourceAwsRoute53ZoneAssociationParseId(rs.Primary.ID) conn := provider.Meta().(*AWSClient).r53conn resp, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone_id)}) if err != nil { return fmt.Errorf("Hosted zone err: %v", err) } exists := false for _, vpc := range resp.VPCs { if vpc_id == *vpc.VPCId { exists = true } } if !exists { return fmt.Errorf("Hosted zone association not found") } *zone = *resp.HostedZone return nil } const testAccRoute53ZoneAssociationConfig = ` resource "aws_vpc" "foo" { cidr_block = "10.6.0.0/16" enable_dns_hostnames = true enable_dns_support = true } resource "aws_vpc" "bar" { cidr_block = "10.7.0.0/16" enable_dns_hostnames = true enable_dns_support = true } resource "aws_route53_zone" "foo" { name = "foo.com" vpc_id = "${aws_vpc.foo.id}" } resource "aws_route53_zone_association" "foobar" { zone_id = "${aws_route53_zone.foo.id}" vpc_id = "${aws_vpc.bar.id}" } ` const testAccRoute53ZoneAssociationRegionConfig = ` provider "aws" { alias = "west" region = "us-west-2" } provider "aws" { alias = "east" region = "us-east-1" } resource "aws_vpc" "foo" { provider = "aws.west" cidr_block = "10.6.0.0/16" enable_dns_hostnames = true enable_dns_support = true } resource "aws_vpc" "bar" { provider = "aws.east" cidr_block = "10.7.0.0/16" enable_dns_hostnames = true enable_dns_support = true } resource "aws_route53_zone" "foo" { provider = "aws.west" name = "foo.com" vpc_id = "${aws_vpc.foo.id}" } resource "aws_route53_zone_association" "foobar" { provider = "aws.west" zone_id = "${aws_route53_zone.foo.id}" vpc_id = "${aws_vpc.bar.id}" vpc_region = "us-east-1" } `
builtin/providers/aws/resource_aws_route53_zone_association_test.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0005966852768324316, 0.0001966750860447064, 0.00016199404490180314, 0.00017318851314485073, 0.00008959099068306386 ]
{ "id": 7, "code_window": [ "\tif err != nil {\n", "\t\tt.Fatalf(\"err: %v\", err)\n", "\t}\n", "\n", "\tif reflect.DeepEqual(out, expectedInlineScriptsOut) {\n", "\t\tt.Fatalf(\"bad: %v\", out)\n", "\t}\n", "}\n", "\n", "func TestResourceProvider_CollectScripts_inline(t *testing.T) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif len(out) != 1 {\n", "\t\tt.Fatal(\"expected 1 out\")\n", "\t}\n", "\n", "\tif out[0] != expectedScriptOut {\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 65 }
package dns import "github.com/jen20/riviera/azure" type MXRecord struct { Preference string `json:"preference" mapstructure:"preference"` //*Why* is this a string in the API?! Exchange string `json:"exchange" mapstructure:"exchange"` } type CreateMXRecordSetResponse struct { ID string `mapstructure:"id"` Name string `mapstructure:"name"` Location string `mapstructure:"location"` Tags map[string]*string `mapstructure:"tags"` TTL *int `mapstructure:"TTL"` MXRecords []MXRecord `mapstructure:"MXRecords"` } type CreateMXRecordSet struct { Name string `json:"-"` ResourceGroupName string `json:"-"` ZoneName string `json:"-"` Location string `json:"-" riviera:"location"` Tags map[string]*string `json:"-" riviera:"tags"` TTL int `json:"TTL"` MXRecords []MXRecord `json:"MXRecords"` } func (command CreateMXRecordSet) APIInfo() azure.APIInfo { return azure.APIInfo{ APIVersion: apiVersion, Method: "PUT", URLPathFunc: dnsRecordSetDefaultURLPathFunc(command.ResourceGroupName, command.ZoneName, "MX", command.Name), ResponseTypeFunc: func() interface{} { return &CreateMXRecordSetResponse{} }, } }
vendor/github.com/jen20/riviera/dns/create_dns_mx_recordset.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.00017337509780190885, 0.00017147725156974047, 0.00016912200953811407, 0.00017170593491755426, 0.0000015646212432329776 ]
{ "id": 8, "code_window": [ "\tif err != nil {\n", "\t\tt.Fatalf(\"err: %v\", err)\n", "\t}\n", "\n", "\tif len(scripts) != 3 {\n", "\t\tt.Fatalf(\"bad: %v\", scripts)\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif len(scripts) != 1 {\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 86 }
package remoteexec import ( "bytes" "io" "strings" "testing" "reflect" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) func TestResourceProvider_Validate_good(t *testing.T) { c := testConfig(t, map[string]interface{}{ "inline": "echo foo", }) p := Provisioner() warn, errs := p.Validate(c) if len(warn) > 0 { t.Fatalf("Warnings: %v", warn) } if len(errs) > 0 { t.Fatalf("Errors: %v", errs) } } func TestResourceProvider_Validate_bad(t *testing.T) { c := testConfig(t, map[string]interface{}{ "invalid": "nope", }) p := Provisioner() warn, errs := p.Validate(c) if len(warn) > 0 { t.Fatalf("Warnings: %v", warn) } if len(errs) == 0 { t.Fatalf("Should have errors") } } var expectedScriptOut = `cd /tmp wget http://foobar exit 0 ` var expectedInlineScriptsOut = strings.Split(expectedScriptOut, "\n") func TestResourceProvider_generateScript(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "inline": []interface{}{ "cd /tmp", "wget http://foobar", "exit 0", }, } out, err := generateScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if reflect.DeepEqual(out, expectedInlineScriptsOut) { t.Fatalf("bad: %v", out) } } func TestResourceProvider_CollectScripts_inline(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "inline": []interface{}{ "cd /tmp", "wget http://foobar", "exit 0", }, } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 3 { t.Fatalf("bad: %v", scripts) } for i, script := range scripts { var out bytes.Buffer _, err = io.Copy(&out, script) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedInlineScriptsOut[i] { t.Fatalf("bad: %v", out.String()) } } } func TestResourceProvider_CollectScripts_script(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "script": "test-fixtures/script1.sh", } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 1 { t.Fatalf("bad: %v", scripts) } var out bytes.Buffer _, err = io.Copy(&out, scripts[0]) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedScriptOut { t.Fatalf("bad: %v", out.String()) } } func TestResourceProvider_CollectScripts_scripts(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "scripts": []interface{}{ "test-fixtures/script1.sh", "test-fixtures/script1.sh", "test-fixtures/script1.sh", }, } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 3 { t.Fatalf("bad: %v", scripts) } for idx := range scripts { var out bytes.Buffer _, err = io.Copy(&out, scripts[idx]) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedScriptOut { t.Fatalf("bad: %v", out.String()) } } } func testConfig( t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { r, err := config.NewRawConfig(c) if err != nil { t.Fatalf("bad: %s", err) } return terraform.NewResourceConfig(r) }
builtin/provisioners/remote-exec/resource_provisioner_test.go
1
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.9923341274261475, 0.14376221597194672, 0.00016624821000732481, 0.0029402654618024826, 0.30772528052330017 ]
{ "id": 8, "code_window": [ "\tif err != nil {\n", "\t\tt.Fatalf(\"err: %v\", err)\n", "\t}\n", "\n", "\tif len(scripts) != 3 {\n", "\t\tt.Fatalf(\"bad: %v\", scripts)\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif len(scripts) != 1 {\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 86 }
package aws import ( "fmt" "log" "github.com/hashicorp/terraform/terraform" ) func resourceAwsElasticBeanstalkEnvironmentMigrateState( v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { switch v { case 0: log.Println("[INFO] Found AWS Elastic Beanstalk Environment State v0; migrating to v1") return migrateBeanstalkEnvironmentStateV0toV1(is) default: return is, fmt.Errorf("Unexpected schema version: %d", v) } } func migrateBeanstalkEnvironmentStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { if is.Empty() || is.Attributes == nil { log.Println("[DEBUG] Empty Elastic Beanstalk Environment State; nothing to migrate.") return is, nil } log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) if is.Attributes["tier"] == "" { is.Attributes["tier"] = "WebServer" } log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) return is, nil }
builtin/providers/aws/resource_aws_elastic_beanstalk_environment_migrate.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0005694539286196232, 0.00035906527773477137, 0.00017034749907907099, 0.00034822983434423804, 0.00017769078840501606 ]
{ "id": 8, "code_window": [ "\tif err != nil {\n", "\t\tt.Fatalf(\"err: %v\", err)\n", "\t}\n", "\n", "\tif len(scripts) != 3 {\n", "\t\tt.Fatalf(\"bad: %v\", scripts)\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif len(scripts) != 1 {\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 86 }
--- layout: "azure" page_title: "Azure: azure_dns_server" sidebar_current: "docs-azure-resource-dns-server" description: |- Creates a new DNS server definition to be used internally in Azure. --- # azure\_dns\_server Creates a new DNS server definition to be used internally in Azure. ## Example Usage ``` resource "azure_dns_server" "google-dns" { name = "google" dns_address = "8.8.8.8" } ``` ## Argument Reference The following arguments are supported: * `name` - (Required) The name of the DNS server reference. Changing this forces a new resource to be created. * `dns_address` - (Required) The IP address of the DNS server. ## Attributes Reference The following attributes are exported: * `id` - The DNS server definition ID. Coincides with the given `name`.
website/source/docs/providers/azure/r/dns_server.html.markdown
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.000168468221090734, 0.0001666360185481608, 0.00016469236288685352, 0.00016669175238348544, 0.0000016445696928713005 ]
{ "id": 8, "code_window": [ "\tif err != nil {\n", "\t\tt.Fatalf(\"err: %v\", err)\n", "\t}\n", "\n", "\tif len(scripts) != 3 {\n", "\t\tt.Fatalf(\"bad: %v\", scripts)\n", "\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\tif len(scripts) != 1 {\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 86 }
package logentries import ( "fmt" lexp "github.com/hashicorp/terraform/builtin/providers/logentries/expect" "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "github.com/logentries/le_goclient" "testing" ) type LogSetResource struct { Name string `tfresource:"name"` Location string `tfresource:"location"` } func TestAccLogentriesLogSet_Basic(t *testing.T) { var logSetResource LogSetResource logSetName := fmt.Sprintf("terraform-test-%s", acctest.RandString(8)) testAccLogentriesLogSetConfig := fmt.Sprintf(` resource "logentries_logset" "test_logset" { name = "%s" location = "terraform.io" } `, logSetName) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckLogentriesLogSetDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccLogentriesLogSetConfig, Check: lexp.TestCheckResourceExpectation( "logentries_logset.test_logset", &logSetResource, testAccCheckLogentriesLogSetExists, map[string]lexp.TestExpectValue{ "name": lexp.Equals(logSetName), "location": lexp.Equals("terraform.io"), }, ), }, }, }) } func TestAccLogentriesLogSet_NoLocation(t *testing.T) { var logSetResource LogSetResource logSetName := fmt.Sprintf("terraform-test-%s", acctest.RandString(8)) testAccLogentriesLogSetConfig := fmt.Sprintf(` resource "logentries_logset" "test_logset" { name = "%s" } `, logSetName) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckLogentriesLogSetDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccLogentriesLogSetConfig, Check: lexp.TestCheckResourceExpectation( "logentries_logset.test_logset", &logSetResource, testAccCheckLogentriesLogSetExists, map[string]lexp.TestExpectValue{ "name": lexp.Equals(logSetName), "location": lexp.Equals("nonlocation"), }, ), }, }, }) } func testAccCheckLogentriesLogSetDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*logentries.Client) for _, rs := range s.RootModule().Resources { if rs.Type != "logentries_logset" { continue } resp, err := client.LogSet.Read(logentries.LogSetReadRequest{Key: rs.Primary.ID}) if err == nil { return fmt.Errorf("Log set still exists: %#v", resp) } } return nil } func testAccCheckLogentriesLogSetExists(resource string, fact interface{}) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resource] if !ok { return fmt.Errorf("Not found: %s", resource) } if rs.Primary.ID == "" { return fmt.Errorf("No LogSet Key is set") } client := testAccProvider.Meta().(*logentries.Client) resp, err := client.LogSet.Read(logentries.LogSetReadRequest{Key: rs.Primary.ID}) if err != nil { return err } res := fact.(*LogSetResource) res.Location = resp.Location res.Name = resp.Name return nil } }
builtin/providers/logentries/resource_logentries_logset_test.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0006151496199890971, 0.00021306361304596066, 0.00016226644220296293, 0.00017301394836977124, 0.00011823825479950756 ]
{ "id": 9, "code_window": [ "\t\tt.Fatalf(\"bad: %v\", scripts)\n", "\t}\n", "\n", "\tfor i, script := range scripts {\n", "\t\tvar out bytes.Buffer\n", "\t\t_, err = io.Copy(&out, script)\n", "\t\tif err != nil {\n", "\t\t\tt.Fatalf(\"err: %v\", err)\n", "\t\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "keep" ], "after_edit": [ "\tvar out bytes.Buffer\n", "\t_, err = io.Copy(&out, scripts[0])\n", "\tif err != nil {\n", "\t\tt.Fatalf(\"err: %v\", err)\n", "\t}\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 90 }
package remoteexec import ( "bytes" "io" "strings" "testing" "reflect" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) func TestResourceProvider_Validate_good(t *testing.T) { c := testConfig(t, map[string]interface{}{ "inline": "echo foo", }) p := Provisioner() warn, errs := p.Validate(c) if len(warn) > 0 { t.Fatalf("Warnings: %v", warn) } if len(errs) > 0 { t.Fatalf("Errors: %v", errs) } } func TestResourceProvider_Validate_bad(t *testing.T) { c := testConfig(t, map[string]interface{}{ "invalid": "nope", }) p := Provisioner() warn, errs := p.Validate(c) if len(warn) > 0 { t.Fatalf("Warnings: %v", warn) } if len(errs) == 0 { t.Fatalf("Should have errors") } } var expectedScriptOut = `cd /tmp wget http://foobar exit 0 ` var expectedInlineScriptsOut = strings.Split(expectedScriptOut, "\n") func TestResourceProvider_generateScript(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "inline": []interface{}{ "cd /tmp", "wget http://foobar", "exit 0", }, } out, err := generateScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if reflect.DeepEqual(out, expectedInlineScriptsOut) { t.Fatalf("bad: %v", out) } } func TestResourceProvider_CollectScripts_inline(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "inline": []interface{}{ "cd /tmp", "wget http://foobar", "exit 0", }, } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 3 { t.Fatalf("bad: %v", scripts) } for i, script := range scripts { var out bytes.Buffer _, err = io.Copy(&out, script) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedInlineScriptsOut[i] { t.Fatalf("bad: %v", out.String()) } } } func TestResourceProvider_CollectScripts_script(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "script": "test-fixtures/script1.sh", } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 1 { t.Fatalf("bad: %v", scripts) } var out bytes.Buffer _, err = io.Copy(&out, scripts[0]) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedScriptOut { t.Fatalf("bad: %v", out.String()) } } func TestResourceProvider_CollectScripts_scripts(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "scripts": []interface{}{ "test-fixtures/script1.sh", "test-fixtures/script1.sh", "test-fixtures/script1.sh", }, } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 3 { t.Fatalf("bad: %v", scripts) } for idx := range scripts { var out bytes.Buffer _, err = io.Copy(&out, scripts[idx]) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedScriptOut { t.Fatalf("bad: %v", out.String()) } } } func testConfig( t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { r, err := config.NewRawConfig(c) if err != nil { t.Fatalf("bad: %s", err) } return terraform.NewResourceConfig(r) }
builtin/provisioners/remote-exec/resource_provisioner_test.go
1
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.9982553124427795, 0.2781262695789337, 0.0001664914161665365, 0.0037716461811214685, 0.44360700249671936 ]
{ "id": 9, "code_window": [ "\t\tt.Fatalf(\"bad: %v\", scripts)\n", "\t}\n", "\n", "\tfor i, script := range scripts {\n", "\t\tvar out bytes.Buffer\n", "\t\t_, err = io.Copy(&out, script)\n", "\t\tif err != nil {\n", "\t\t\tt.Fatalf(\"err: %v\", err)\n", "\t\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "keep" ], "after_edit": [ "\tvar out bytes.Buffer\n", "\t_, err = io.Copy(&out, scripts[0])\n", "\tif err != nil {\n", "\t\tt.Fatalf(\"err: %v\", err)\n", "\t}\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 90 }
package librato import ( "encoding/json" "fmt" "log" "strconv" "time" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "github.com/henrikhodne/go-librato/librato" ) func resourceLibratoService() *schema.Resource { return &schema.Resource{ Create: resourceLibratoServiceCreate, Read: resourceLibratoServiceRead, Update: resourceLibratoServiceUpdate, Delete: resourceLibratoServiceDelete, Schema: map[string]*schema.Schema{ "id": &schema.Schema{ Type: schema.TypeInt, Computed: true, }, "type": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, "title": &schema.Schema{ Type: schema.TypeString, Required: true, }, "settings": &schema.Schema{ Type: schema.TypeString, Required: true, StateFunc: normalizeJson, }, }, } } // Takes JSON in a string. Decodes JSON into // settings hash func resourceLibratoServicesExpandSettings(rawSettings string) (map[string]string, error) { var settings map[string]string settings = make(map[string]string) err := json.Unmarshal([]byte(rawSettings), &settings) if err != nil { return nil, fmt.Errorf("Error decoding JSON: %s", err) } return settings, err } // Encodes a settings hash into a JSON string func resourceLibratoServicesFlatten(settings map[string]string) (string, error) { byteArray, err := json.Marshal(settings) if err != nil { return "", fmt.Errorf("Error encoding to JSON: %s", err) } return string(byteArray), nil } func normalizeJson(jsonString interface{}) string { if jsonString == nil || jsonString == "" { return "" } var j interface{} err := json.Unmarshal([]byte(jsonString.(string)), &j) if err != nil { return fmt.Sprintf("Error parsing JSON: %s", err) } b, _ := json.Marshal(j) return string(b[:]) } func resourceLibratoServiceCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*librato.Client) service := new(librato.Service) if v, ok := d.GetOk("type"); ok { service.Type = librato.String(v.(string)) } if v, ok := d.GetOk("title"); ok { service.Title = librato.String(v.(string)) } if v, ok := d.GetOk("settings"); ok { res, err := resourceLibratoServicesExpandSettings(normalizeJson(v.(string))) if err != nil { return fmt.Errorf("Error expanding Librato service settings: %s", err) } service.Settings = res } serviceResult, _, err := client.Services.Create(service) if err != nil { return fmt.Errorf("Error creating Librato service: %s", err) } resource.Retry(1*time.Minute, func() *resource.RetryError { _, _, err := client.Services.Get(*serviceResult.ID) if err != nil { if errResp, ok := err.(*librato.ErrorResponse); ok && errResp.Response.StatusCode == 404 { return resource.RetryableError(err) } return resource.NonRetryableError(err) } return nil }) return resourceLibratoServiceReadResult(d, serviceResult) } func resourceLibratoServiceRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*librato.Client) id, err := strconv.ParseUint(d.Id(), 10, 0) if err != nil { return err } service, _, err := client.Services.Get(uint(id)) if err != nil { if errResp, ok := err.(*librato.ErrorResponse); ok && errResp.Response.StatusCode == 404 { d.SetId("") return nil } return fmt.Errorf("Error reading Librato Service %s: %s", d.Id(), err) } return resourceLibratoServiceReadResult(d, service) } func resourceLibratoServiceReadResult(d *schema.ResourceData, service *librato.Service) error { d.SetId(strconv.FormatUint(uint64(*service.ID), 10)) d.Set("id", *service.ID) d.Set("type", *service.Type) d.Set("title", *service.Title) settings, _ := resourceLibratoServicesFlatten(service.Settings) d.Set("settings", settings) return nil } func resourceLibratoServiceUpdate(d *schema.ResourceData, meta interface{}) error { client := meta.(*librato.Client) serviceID, err := strconv.ParseUint(d.Id(), 10, 0) if err != nil { return err } service := new(librato.Service) if d.HasChange("type") { service.Type = librato.String(d.Get("type").(string)) } if d.HasChange("title") { service.Title = librato.String(d.Get("title").(string)) } if d.HasChange("settings") { res, err := resourceLibratoServicesExpandSettings(normalizeJson(d.Get("settings").(string))) if err != nil { return fmt.Errorf("Error expanding Librato service settings: %s", err) } service.Settings = res } _, err = client.Services.Edit(uint(serviceID), service) if err != nil { return fmt.Errorf("Error updating Librato service: %s", err) } return resourceLibratoServiceRead(d, meta) } func resourceLibratoServiceDelete(d *schema.ResourceData, meta interface{}) error { client := meta.(*librato.Client) id, err := strconv.ParseUint(d.Id(), 10, 0) if err != nil { return err } log.Printf("[INFO] Deleting Service: %d", id) _, err = client.Services.Delete(uint(id)) if err != nil { return fmt.Errorf("Error deleting Service: %s", err) } resource.Retry(1*time.Minute, func() *resource.RetryError { _, _, err := client.Services.Get(uint(id)) if err != nil { if errResp, ok := err.(*librato.ErrorResponse); ok && errResp.Response.StatusCode == 404 { return nil } return resource.NonRetryableError(err) } return resource.RetryableError(fmt.Errorf("service still exists")) }) d.SetId("") return nil }
builtin/providers/librato/resource_librato_service.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.5102623701095581, 0.028161996975541115, 0.00016718453844077885, 0.00017748431127984077, 0.10814312845468521 ]
{ "id": 9, "code_window": [ "\t\tt.Fatalf(\"bad: %v\", scripts)\n", "\t}\n", "\n", "\tfor i, script := range scripts {\n", "\t\tvar out bytes.Buffer\n", "\t\t_, err = io.Copy(&out, script)\n", "\t\tif err != nil {\n", "\t\t\tt.Fatalf(\"err: %v\", err)\n", "\t\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "keep" ], "after_edit": [ "\tvar out bytes.Buffer\n", "\t_, err = io.Copy(&out, scripts[0])\n", "\tif err != nil {\n", "\t\tt.Fatalf(\"err: %v\", err)\n", "\t}\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 90 }
package aws import ( "fmt" "net" "regexp" "sort" "strconv" "testing" "time" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) func TestAccAWSIPRanges(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccAWSIPRangesConfig, Check: resource.ComposeTestCheckFunc( testAccAWSIPRanges("data.aws_ip_ranges.some"), ), }, }, }) } func testAccAWSIPRanges(n string) resource.TestCheckFunc { return func(s *terraform.State) error { r := s.RootModule().Resources[n] a := r.Primary.Attributes var ( cidrBlockSize int createDate time.Time err error syncToken int ) if cidrBlockSize, err = strconv.Atoi(a["cidr_blocks.#"]); err != nil { return err } if cidrBlockSize < 10 { return fmt.Errorf("cidr_blocks for eu-west-1 seem suspiciously low: %d", cidrBlockSize) } if createDate, err = time.Parse("2006-01-02-15-04-05", a["create_date"]); err != nil { return err } if syncToken, err = strconv.Atoi(a["sync_token"]); err != nil { return err } if syncToken != int(createDate.Unix()) { return fmt.Errorf("sync_token %d does not match create_date %s", syncToken, createDate) } var cidrBlocks sort.StringSlice = make([]string, cidrBlockSize) for i := range make([]string, cidrBlockSize) { block := a[fmt.Sprintf("cidr_blocks.%d", i)] if _, _, err := net.ParseCIDR(block); err != nil { return fmt.Errorf("malformed CIDR block %s: %s", block, err) } cidrBlocks[i] = block } if !sort.IsSorted(cidrBlocks) { return fmt.Errorf("unexpected order of cidr_blocks: %s", cidrBlocks) } var ( regionMember = regexp.MustCompile(`regions\.\d+`) regions, services int serviceMember = regexp.MustCompile(`services\.\d+`) ) for k, v := range a { if regionMember.MatchString(k) { if !(v == "eu-west-1" || v == "EU-central-1") { return fmt.Errorf("unexpected region %s", v) } regions = regions + 1 } if serviceMember.MatchString(k) { if v != "EC2" { return fmt.Errorf("unexpected service %s", v) } services = services + 1 } } if regions != 2 { return fmt.Errorf("unexpected number of regions: %d", regions) } if services != 1 { return fmt.Errorf("unexpected number of services: %d", services) } return nil } } const testAccAWSIPRangesConfig = ` data "aws_ip_ranges" "some" { regions = [ "eu-west-1", "EU-central-1" ] services = [ "EC2" ] } `
builtin/providers/aws/data_source_aws_ip_ranges_test.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.5210212469100952, 0.04138420522212982, 0.0001654380903346464, 0.00017735896108206362, 0.13849180936813354 ]
{ "id": 9, "code_window": [ "\t\tt.Fatalf(\"bad: %v\", scripts)\n", "\t}\n", "\n", "\tfor i, script := range scripts {\n", "\t\tvar out bytes.Buffer\n", "\t\t_, err = io.Copy(&out, script)\n", "\t\tif err != nil {\n", "\t\t\tt.Fatalf(\"err: %v\", err)\n", "\t\t}\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "keep" ], "after_edit": [ "\tvar out bytes.Buffer\n", "\t_, err = io.Copy(&out, scripts[0])\n", "\tif err != nil {\n", "\t\tt.Fatalf(\"err: %v\", err)\n", "\t}\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 90 }
package null import ( "fmt" "math/rand" "time" "github.com/hashicorp/terraform/helper/schema" ) func init() { rand.Seed(time.Now().Unix()) } func resource() *schema.Resource { return &schema.Resource{ Create: resourceCreate, Read: resourceRead, Delete: resourceDelete, Schema: map[string]*schema.Schema{ "triggers": &schema.Schema{ Type: schema.TypeMap, Optional: true, ForceNew: true, }, }, } } func resourceCreate(d *schema.ResourceData, meta interface{}) error { d.SetId(fmt.Sprintf("%d", rand.Int())) return nil } func resourceRead(d *schema.ResourceData, meta interface{}) error { return nil } func resourceDelete(d *schema.ResourceData, meta interface{}) error { d.SetId("") return nil }
builtin/providers/null/resource.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.00017862912500277162, 0.00017474705236963928, 0.00017011130694299936, 0.00017415067122783512, 0.0000029276661734911613 ]
{ "id": 10, "code_window": [ "\n", "\t\tif out.String() != expectedInlineScriptsOut[i] {\n", "\t\t\tt.Fatalf(\"bad: %v\", out.String())\n", "\t\t}\n", "\t}\n", "}\n", "\n", "func TestResourceProvider_CollectScripts_script(t *testing.T) {\n" ], "labels": [ "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif out.String() != expectedScriptOut {\n", "\t\tt.Fatalf(\"bad: %v\", out.String())\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 97 }
package remoteexec import ( "bytes" "io" "strings" "testing" "reflect" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) func TestResourceProvider_Validate_good(t *testing.T) { c := testConfig(t, map[string]interface{}{ "inline": "echo foo", }) p := Provisioner() warn, errs := p.Validate(c) if len(warn) > 0 { t.Fatalf("Warnings: %v", warn) } if len(errs) > 0 { t.Fatalf("Errors: %v", errs) } } func TestResourceProvider_Validate_bad(t *testing.T) { c := testConfig(t, map[string]interface{}{ "invalid": "nope", }) p := Provisioner() warn, errs := p.Validate(c) if len(warn) > 0 { t.Fatalf("Warnings: %v", warn) } if len(errs) == 0 { t.Fatalf("Should have errors") } } var expectedScriptOut = `cd /tmp wget http://foobar exit 0 ` var expectedInlineScriptsOut = strings.Split(expectedScriptOut, "\n") func TestResourceProvider_generateScript(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "inline": []interface{}{ "cd /tmp", "wget http://foobar", "exit 0", }, } out, err := generateScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if reflect.DeepEqual(out, expectedInlineScriptsOut) { t.Fatalf("bad: %v", out) } } func TestResourceProvider_CollectScripts_inline(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "inline": []interface{}{ "cd /tmp", "wget http://foobar", "exit 0", }, } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 3 { t.Fatalf("bad: %v", scripts) } for i, script := range scripts { var out bytes.Buffer _, err = io.Copy(&out, script) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedInlineScriptsOut[i] { t.Fatalf("bad: %v", out.String()) } } } func TestResourceProvider_CollectScripts_script(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "script": "test-fixtures/script1.sh", } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 1 { t.Fatalf("bad: %v", scripts) } var out bytes.Buffer _, err = io.Copy(&out, scripts[0]) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedScriptOut { t.Fatalf("bad: %v", out.String()) } } func TestResourceProvider_CollectScripts_scripts(t *testing.T) { p := Provisioner().(*schema.Provisioner) conf := map[string]interface{}{ "scripts": []interface{}{ "test-fixtures/script1.sh", "test-fixtures/script1.sh", "test-fixtures/script1.sh", }, } scripts, err := collectScripts(schema.TestResourceDataRaw( t, p.Schema, conf)) if err != nil { t.Fatalf("err: %v", err) } if len(scripts) != 3 { t.Fatalf("bad: %v", scripts) } for idx := range scripts { var out bytes.Buffer _, err = io.Copy(&out, scripts[idx]) if err != nil { t.Fatalf("err: %v", err) } if out.String() != expectedScriptOut { t.Fatalf("bad: %v", out.String()) } } } func testConfig( t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { r, err := config.NewRawConfig(c) if err != nil { t.Fatalf("bad: %s", err) } return terraform.NewResourceConfig(r) }
builtin/provisioners/remote-exec/resource_provisioner_test.go
1
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.9701546430587769, 0.1341867595911026, 0.00017261336324736476, 0.006773681379854679, 0.25660091638565063 ]
{ "id": 10, "code_window": [ "\n", "\t\tif out.String() != expectedInlineScriptsOut[i] {\n", "\t\t\tt.Fatalf(\"bad: %v\", out.String())\n", "\t\t}\n", "\t}\n", "}\n", "\n", "func TestResourceProvider_CollectScripts_script(t *testing.T) {\n" ], "labels": [ "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif out.String() != expectedScriptOut {\n", "\t\tt.Fatalf(\"bad: %v\", out.String())\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 97 }
# cleanhttp Functions for accessing "clean" Go http.Client values ------------- The Go standard library contains a default `http.Client` called `http.DefaultClient`. It is a common idiom in Go code to start with `http.DefaultClient` and tweak it as necessary, and in fact, this is encouraged; from the `http` package documentation: > The Client's Transport typically has internal state (cached TCP connections), so Clients should be reused instead of created as needed. Clients are safe for concurrent use by multiple goroutines. Unfortunately, this is a shared value, and it is not uncommon for libraries to assume that they are free to modify it at will. With enough dependencies, it can be very easy to encounter strange problems and race conditions due to manipulation of this shared value across libraries and goroutines (clients are safe for concurrent use, but writing values to the client struct itself is not protected). Making things worse is the fact that a bare `http.Client` will use a default `http.Transport` called `http.DefaultTransport`, which is another global value that behaves the same way. So it is not simply enough to replace `http.DefaultClient` with `&http.Client{}`. This repository provides some simple functions to get a "clean" `http.Client` -- one that uses the same default values as the Go standard library, but returns a client that does not share any state with other clients.
vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0001791661197785288, 0.00016889144899323583, 0.00016343477182090282, 0.0001664824812905863, 0.000006131055670266505 ]
{ "id": 10, "code_window": [ "\n", "\t\tif out.String() != expectedInlineScriptsOut[i] {\n", "\t\t\tt.Fatalf(\"bad: %v\", out.String())\n", "\t\t}\n", "\t}\n", "}\n", "\n", "func TestResourceProvider_CollectScripts_script(t *testing.T) {\n" ], "labels": [ "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif out.String() != expectedScriptOut {\n", "\t\tt.Fatalf(\"bad: %v\", out.String())\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 97 }
package pq import ( "bufio" "crypto/md5" "database/sql" "database/sql/driver" "encoding/binary" "errors" "fmt" "io" "net" "os" "os/user" "path" "path/filepath" "strconv" "strings" "time" "unicode" "github.com/lib/pq/oid" ) // Common error types var ( ErrNotSupported = errors.New("pq: Unsupported command") ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less.") ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly.") errUnexpectedReady = errors.New("unexpected ReadyForQuery") errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") errNoLastInsertId = errors.New("no LastInsertId available after the empty statement") ) type drv struct{} func (d *drv) Open(name string) (driver.Conn, error) { return Open(name) } func init() { sql.Register("postgres", &drv{}) } type parameterStatus struct { // server version in the same format as server_version_num, or 0 if // unavailable serverVersion int // the current location based on the TimeZone value of the session, if // available currentLocation *time.Location } type transactionStatus byte const ( txnStatusIdle transactionStatus = 'I' txnStatusIdleInTransaction transactionStatus = 'T' txnStatusInFailedTransaction transactionStatus = 'E' ) func (s transactionStatus) String() string { switch s { case txnStatusIdle: return "idle" case txnStatusIdleInTransaction: return "idle in transaction" case txnStatusInFailedTransaction: return "in a failed transaction" default: errorf("unknown transactionStatus %d", s) } panic("not reached") } type Dialer interface { Dial(network, address string) (net.Conn, error) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) } type defaultDialer struct{} func (d defaultDialer) Dial(ntw, addr string) (net.Conn, error) { return net.Dial(ntw, addr) } func (d defaultDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout(ntw, addr, timeout) } type conn struct { c net.Conn buf *bufio.Reader namei int scratch [512]byte txnStatus transactionStatus parameterStatus parameterStatus saveMessageType byte saveMessageBuffer []byte // If true, this connection is bad and all public-facing functions should // return ErrBadConn. bad bool // If set, this connection should never use the binary format when // receiving query results from prepared statements. Only provided for // debugging. disablePreparedBinaryResult bool // Whether to always send []byte parameters over as binary. Enables single // round-trip mode for non-prepared Query calls. binaryParameters bool // If true this connection is in the middle of a COPY inCopy bool } // Handle driver-side settings in parsed connection string. func (c *conn) handleDriverSettings(o values) (err error) { boolSetting := func(key string, val *bool) error { if value := o.Get(key); value != "" { if value == "yes" { *val = true } else if value == "no" { *val = false } else { return fmt.Errorf("unrecognized value %q for %s", value, key) } } return nil } err = boolSetting("disable_prepared_binary_result", &c.disablePreparedBinaryResult) if err != nil { return err } err = boolSetting("binary_parameters", &c.binaryParameters) if err != nil { return err } return nil } func (c *conn) handlePgpass(o values) { // if a password was supplied, do not process .pgpass _, ok := o["password"] if ok { return } filename := os.Getenv("PGPASSFILE") if filename == "" { // XXX this code doesn't work on Windows where the default filename is // XXX %APPDATA%\postgresql\pgpass.conf user, err := user.Current() if err != nil { return } filename = filepath.Join(user.HomeDir, ".pgpass") } fileinfo, err := os.Stat(filename) if err != nil { return } mode := fileinfo.Mode() if mode&(0x77) != 0 { // XXX should warn about incorrect .pgpass permissions as psql does return } file, err := os.Open(filename) if err != nil { return } defer file.Close() scanner := bufio.NewScanner(io.Reader(file)) hostname := o.Get("host") ntw, _ := network(o) port := o.Get("port") db := o.Get("dbname") username := o.Get("user") // From: https://github.com/tg/pgpass/blob/master/reader.go getFields := func(s string) []string { fs := make([]string, 0, 5) f := make([]rune, 0, len(s)) var esc bool for _, c := range s { switch { case esc: f = append(f, c) esc = false case c == '\\': esc = true case c == ':': fs = append(fs, string(f)) f = f[:0] default: f = append(f, c) } } return append(fs, string(f)) } for scanner.Scan() { line := scanner.Text() if len(line) == 0 || line[0] == '#' { continue } split := getFields(line) if len(split) != 5 { continue } if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { o["password"] = split[4] return } } } func (c *conn) writeBuf(b byte) *writeBuf { c.scratch[0] = b return &writeBuf{ buf: c.scratch[:5], pos: 1, } } func Open(name string) (_ driver.Conn, err error) { return DialOpen(defaultDialer{}, name) } func DialOpen(d Dialer, name string) (_ driver.Conn, err error) { // Handle any panics during connection initialization. Note that we // specifically do *not* want to use errRecover(), as that would turn any // connection errors into ErrBadConns, hiding the real error message from // the user. defer errRecoverNoErrBadConn(&err) o := make(values) // A number of defaults are applied here, in this order: // // * Very low precedence defaults applied in every situation // * Environment variables // * Explicitly passed connection information o.Set("host", "localhost") o.Set("port", "5432") // N.B.: Extra float digits should be set to 3, but that breaks // Postgres 8.4 and older, where the max is 2. o.Set("extra_float_digits", "2") for k, v := range parseEnviron(os.Environ()) { o.Set(k, v) } if strings.HasPrefix(name, "postgres://") || strings.HasPrefix(name, "postgresql://") { name, err = ParseURL(name) if err != nil { return nil, err } } if err := parseOpts(name, o); err != nil { return nil, err } // Use the "fallback" application name if necessary if fallback := o.Get("fallback_application_name"); fallback != "" { if !o.Isset("application_name") { o.Set("application_name", fallback) } } // We can't work with any client_encoding other than UTF-8 currently. // However, we have historically allowed the user to set it to UTF-8 // explicitly, and there's no reason to break such programs, so allow that. // Note that the "options" setting could also set client_encoding, but // parsing its value is not worth it. Instead, we always explicitly send // client_encoding as a separate run-time parameter, which should override // anything set in options. if enc := o.Get("client_encoding"); enc != "" && !isUTF8(enc) { return nil, errors.New("client_encoding must be absent or 'UTF8'") } o.Set("client_encoding", "UTF8") // DateStyle needs a similar treatment. if datestyle := o.Get("datestyle"); datestyle != "" { if datestyle != "ISO, MDY" { panic(fmt.Sprintf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle)) } } else { o.Set("datestyle", "ISO, MDY") } // If a user is not provided by any other means, the last // resort is to use the current operating system provided user // name. if o.Get("user") == "" { u, err := userCurrent() if err != nil { return nil, err } else { o.Set("user", u) } } cn := &conn{} err = cn.handleDriverSettings(o) if err != nil { return nil, err } cn.handlePgpass(o) cn.c, err = dial(d, o) if err != nil { return nil, err } cn.ssl(o) cn.buf = bufio.NewReader(cn.c) cn.startup(o) // reset the deadline, in case one was set (see dial) if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" { err = cn.c.SetDeadline(time.Time{}) } return cn, err } func dial(d Dialer, o values) (net.Conn, error) { ntw, addr := network(o) // SSL is not necessary or supported over UNIX domain sockets if ntw == "unix" { o["sslmode"] = "disable" } // Zero or not specified means wait indefinitely. if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" { seconds, err := strconv.ParseInt(timeout, 10, 0) if err != nil { return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) } duration := time.Duration(seconds) * time.Second // connect_timeout should apply to the entire connection establishment // procedure, so we both use a timeout for the TCP connection // establishment and set a deadline for doing the initial handshake. // The deadline is then reset after startup() is done. deadline := time.Now().Add(duration) conn, err := d.DialTimeout(ntw, addr, duration) if err != nil { return nil, err } err = conn.SetDeadline(deadline) return conn, err } return d.Dial(ntw, addr) } func network(o values) (string, string) { host := o.Get("host") if strings.HasPrefix(host, "/") { sockPath := path.Join(host, ".s.PGSQL."+o.Get("port")) return "unix", sockPath } return "tcp", net.JoinHostPort(host, o.Get("port")) } type values map[string]string func (vs values) Set(k, v string) { vs[k] = v } func (vs values) Get(k string) (v string) { return vs[k] } func (vs values) Isset(k string) bool { _, ok := vs[k] return ok } // scanner implements a tokenizer for libpq-style option strings. type scanner struct { s []rune i int } // newScanner returns a new scanner initialized with the option string s. func newScanner(s string) *scanner { return &scanner{[]rune(s), 0} } // Next returns the next rune. // It returns 0, false if the end of the text has been reached. func (s *scanner) Next() (rune, bool) { if s.i >= len(s.s) { return 0, false } r := s.s[s.i] s.i++ return r, true } // SkipSpaces returns the next non-whitespace rune. // It returns 0, false if the end of the text has been reached. func (s *scanner) SkipSpaces() (rune, bool) { r, ok := s.Next() for unicode.IsSpace(r) && ok { r, ok = s.Next() } return r, ok } // parseOpts parses the options from name and adds them to the values. // // The parsing code is based on conninfo_parse from libpq's fe-connect.c func parseOpts(name string, o values) error { s := newScanner(name) for { var ( keyRunes, valRunes []rune r rune ok bool ) if r, ok = s.SkipSpaces(); !ok { break } // Scan the key for !unicode.IsSpace(r) && r != '=' { keyRunes = append(keyRunes, r) if r, ok = s.Next(); !ok { break } } // Skip any whitespace if we're not at the = yet if r != '=' { r, ok = s.SkipSpaces() } // The current character should be = if r != '=' || !ok { return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) } // Skip any whitespace after the = if r, ok = s.SkipSpaces(); !ok { // If we reach the end here, the last value is just an empty string as per libpq. o.Set(string(keyRunes), "") break } if r != '\'' { for !unicode.IsSpace(r) { if r == '\\' { if r, ok = s.Next(); !ok { return fmt.Errorf(`missing character after backslash`) } } valRunes = append(valRunes, r) if r, ok = s.Next(); !ok { break } } } else { quote: for { if r, ok = s.Next(); !ok { return fmt.Errorf(`unterminated quoted string literal in connection string`) } switch r { case '\'': break quote case '\\': r, _ = s.Next() fallthrough default: valRunes = append(valRunes, r) } } } o.Set(string(keyRunes), string(valRunes)) } return nil } func (cn *conn) isInTransaction() bool { return cn.txnStatus == txnStatusIdleInTransaction || cn.txnStatus == txnStatusInFailedTransaction } func (cn *conn) checkIsInTransaction(intxn bool) { if cn.isInTransaction() != intxn { cn.bad = true errorf("unexpected transaction status %v", cn.txnStatus) } } func (cn *conn) Begin() (_ driver.Tx, err error) { if cn.bad { return nil, driver.ErrBadConn } defer cn.errRecover(&err) cn.checkIsInTransaction(false) _, commandTag, err := cn.simpleExec("BEGIN") if err != nil { return nil, err } if commandTag != "BEGIN" { cn.bad = true return nil, fmt.Errorf("unexpected command tag %s", commandTag) } if cn.txnStatus != txnStatusIdleInTransaction { cn.bad = true return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) } return cn, nil } func (cn *conn) Commit() (err error) { if cn.bad { return driver.ErrBadConn } defer cn.errRecover(&err) cn.checkIsInTransaction(true) // We don't want the client to think that everything is okay if it tries // to commit a failed transaction. However, no matter what we return, // database/sql will release this connection back into the free connection // pool so we have to abort the current transaction here. Note that you // would get the same behaviour if you issued a COMMIT in a failed // transaction, so it's also the least surprising thing to do here. if cn.txnStatus == txnStatusInFailedTransaction { if err := cn.Rollback(); err != nil { return err } return ErrInFailedTransaction } _, commandTag, err := cn.simpleExec("COMMIT") if err != nil { if cn.isInTransaction() { cn.bad = true } return err } if commandTag != "COMMIT" { cn.bad = true return fmt.Errorf("unexpected command tag %s", commandTag) } cn.checkIsInTransaction(false) return nil } func (cn *conn) Rollback() (err error) { if cn.bad { return driver.ErrBadConn } defer cn.errRecover(&err) cn.checkIsInTransaction(true) _, commandTag, err := cn.simpleExec("ROLLBACK") if err != nil { if cn.isInTransaction() { cn.bad = true } return err } if commandTag != "ROLLBACK" { return fmt.Errorf("unexpected command tag %s", commandTag) } cn.checkIsInTransaction(false) return nil } func (cn *conn) gname() string { cn.namei++ return strconv.FormatInt(int64(cn.namei), 10) } func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { b := cn.writeBuf('Q') b.string(q) cn.send(b) for { t, r := cn.recv1() switch t { case 'C': res, commandTag = cn.parseComplete(r.string()) case 'Z': cn.processReadyForQuery(r) if res == nil && err == nil { err = errUnexpectedReady } // done return case 'E': err = parseError(r) case 'I': res = emptyRows case 'T', 'D': // ignore any results default: cn.bad = true errorf("unknown response for simple query: %q", t) } } } func (cn *conn) simpleQuery(q string) (res *rows, err error) { defer cn.errRecover(&err) b := cn.writeBuf('Q') b.string(q) cn.send(b) for { t, r := cn.recv1() switch t { case 'C', 'I': // We allow queries which don't return any results through Query as // well as Exec. We still have to give database/sql a rows object // the user can close, though, to avoid connections from being // leaked. A "rows" with done=true works fine for that purpose. if err != nil { cn.bad = true errorf("unexpected message %q in simple query execution", t) } if res == nil { res = &rows{ cn: cn, } } res.done = true case 'Z': cn.processReadyForQuery(r) // done return case 'E': res = nil err = parseError(r) case 'D': if res == nil { cn.bad = true errorf("unexpected DataRow in simple query execution") } // the query didn't fail; kick off to Next cn.saveMessage(t, r) return case 'T': // res might be non-nil here if we received a previous // CommandComplete, but that's fine; just overwrite it res = &rows{cn: cn} res.colNames, res.colFmts, res.colTyps = parsePortalRowDescribe(r) // To work around a bug in QueryRow in Go 1.2 and earlier, wait // until the first DataRow has been received. default: cn.bad = true errorf("unknown response for simple query: %q", t) } } } type noRows struct{} var emptyRows noRows var _ driver.Result = noRows{} func (noRows) LastInsertId() (int64, error) { return 0, errNoLastInsertId } func (noRows) RowsAffected() (int64, error) { return 0, errNoRowsAffected } // Decides which column formats to use for a prepared statement. The input is // an array of type oids, one element per result column. func decideColumnFormats(colTyps []oid.Oid, forceText bool) (colFmts []format, colFmtData []byte) { if len(colTyps) == 0 { return nil, colFmtDataAllText } colFmts = make([]format, len(colTyps)) if forceText { return colFmts, colFmtDataAllText } allBinary := true allText := true for i, o := range colTyps { switch o { // This is the list of types to use binary mode for when receiving them // through a prepared statement. If a type appears in this list, it // must also be implemented in binaryDecode in encode.go. case oid.T_bytea: fallthrough case oid.T_int8: fallthrough case oid.T_int4: fallthrough case oid.T_int2: fallthrough case oid.T_uuid: colFmts[i] = formatBinary allText = false default: allBinary = false } } if allBinary { return colFmts, colFmtDataAllBinary } else if allText { return colFmts, colFmtDataAllText } else { colFmtData = make([]byte, 2+len(colFmts)*2) binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) for i, v := range colFmts { binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) } return colFmts, colFmtData } } func (cn *conn) prepareTo(q, stmtName string) *stmt { st := &stmt{cn: cn, name: stmtName} b := cn.writeBuf('P') b.string(st.name) b.string(q) b.int16(0) b.next('D') b.byte('S') b.string(st.name) b.next('S') cn.send(b) cn.readParseResponse() st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) cn.readReadyForQuery() return st } func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { if cn.bad { return nil, driver.ErrBadConn } defer cn.errRecover(&err) if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { s, err := cn.prepareCopyIn(q) if err == nil { cn.inCopy = true } return s, err } return cn.prepareTo(q, cn.gname()), nil } func (cn *conn) Close() (err error) { // Skip cn.bad return here because we always want to close a connection. defer cn.errRecover(&err) // Ensure that cn.c.Close is always run. Since error handling is done with // panics and cn.errRecover, the Close must be in a defer. defer func() { cerr := cn.c.Close() if err == nil { err = cerr } }() // Don't go through send(); ListenerConn relies on us not scribbling on the // scratch buffer of this connection. return cn.sendSimpleMessage('X') } // Implement the "Queryer" interface func (cn *conn) Query(query string, args []driver.Value) (_ driver.Rows, err error) { if cn.bad { return nil, driver.ErrBadConn } if cn.inCopy { return nil, errCopyInProgress } defer cn.errRecover(&err) // Check to see if we can use the "simpleQuery" interface, which is // *much* faster than going through prepare/exec if len(args) == 0 { return cn.simpleQuery(query) } if cn.binaryParameters { cn.sendBinaryModeQuery(query, args) cn.readParseResponse() cn.readBindResponse() rows := &rows{cn: cn} rows.colNames, rows.colFmts, rows.colTyps = cn.readPortalDescribeResponse() cn.postExecuteWorkaround() return rows, nil } else { st := cn.prepareTo(query, "") st.exec(args) return &rows{ cn: cn, colNames: st.colNames, colTyps: st.colTyps, colFmts: st.colFmts, }, nil } } // Implement the optional "Execer" interface for one-shot queries func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { if cn.bad { return nil, driver.ErrBadConn } defer cn.errRecover(&err) // Check to see if we can use the "simpleExec" interface, which is // *much* faster than going through prepare/exec if len(args) == 0 { // ignore commandTag, our caller doesn't care r, _, err := cn.simpleExec(query) return r, err } if cn.binaryParameters { cn.sendBinaryModeQuery(query, args) cn.readParseResponse() cn.readBindResponse() cn.readPortalDescribeResponse() cn.postExecuteWorkaround() res, _, err = cn.readExecuteResponse("Execute") return res, err } else { // Use the unnamed statement to defer planning until bind // time, or else value-based selectivity estimates cannot be // used. st := cn.prepareTo(query, "") r, err := st.Exec(args) if err != nil { panic(err) } return r, err } } func (cn *conn) send(m *writeBuf) { _, err := cn.c.Write(m.wrap()) if err != nil { panic(err) } } func (cn *conn) sendStartupPacket(m *writeBuf) { // sanity check if m.buf[0] != 0 { panic("oops") } _, err := cn.c.Write((m.wrap())[1:]) if err != nil { panic(err) } } // Send a message of type typ to the server on the other end of cn. The // message should have no payload. This method does not use the scratch // buffer. func (cn *conn) sendSimpleMessage(typ byte) (err error) { _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) return err } // saveMessage memorizes a message and its buffer in the conn struct. // recvMessage will then return these values on the next call to it. This // method is useful in cases where you have to see what the next message is // going to be (e.g. to see whether it's an error or not) but you can't handle // the message yourself. func (cn *conn) saveMessage(typ byte, buf *readBuf) { if cn.saveMessageType != 0 { cn.bad = true errorf("unexpected saveMessageType %d", cn.saveMessageType) } cn.saveMessageType = typ cn.saveMessageBuffer = *buf } // recvMessage receives any message from the backend, or returns an error if // a problem occurred while reading the message. func (cn *conn) recvMessage(r *readBuf) (byte, error) { // workaround for a QueryRow bug, see exec if cn.saveMessageType != 0 { t := cn.saveMessageType *r = cn.saveMessageBuffer cn.saveMessageType = 0 cn.saveMessageBuffer = nil return t, nil } x := cn.scratch[:5] _, err := io.ReadFull(cn.buf, x) if err != nil { return 0, err } // read the type and length of the message that follows t := x[0] n := int(binary.BigEndian.Uint32(x[1:])) - 4 var y []byte if n <= len(cn.scratch) { y = cn.scratch[:n] } else { y = make([]byte, n) } _, err = io.ReadFull(cn.buf, y) if err != nil { return 0, err } *r = y return t, nil } // recv receives a message from the backend, but if an error happened while // reading the message or the received message was an ErrorResponse, it panics. // NoticeResponses are ignored. This function should generally be used only // during the startup sequence. func (cn *conn) recv() (t byte, r *readBuf) { for { var err error r = &readBuf{} t, err = cn.recvMessage(r) if err != nil { panic(err) } switch t { case 'E': panic(parseError(r)) case 'N': // ignore default: return } } } // recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by // the caller to avoid an allocation. func (cn *conn) recv1Buf(r *readBuf) byte { for { t, err := cn.recvMessage(r) if err != nil { panic(err) } switch t { case 'A', 'N': // ignore case 'S': cn.processParameterStatus(r) default: return t } } } // recv1 receives a message from the backend, panicking if an error occurs // while attempting to read it. All asynchronous messages are ignored, with // the exception of ErrorResponse. func (cn *conn) recv1() (t byte, r *readBuf) { r = &readBuf{} t = cn.recv1Buf(r) return t, r } func (cn *conn) ssl(o values) { upgrade := ssl(o) if upgrade == nil { // Nothing to do return } w := cn.writeBuf(0) w.int32(80877103) cn.sendStartupPacket(w) b := cn.scratch[:1] _, err := io.ReadFull(cn.c, b) if err != nil { panic(err) } if b[0] != 'S' { panic(ErrSSLNotSupported) } cn.c = upgrade(cn.c) } // isDriverSetting returns true iff a setting is purely for configuring the // driver's options and should not be sent to the server in the connection // startup packet. func isDriverSetting(key string) bool { switch key { case "host", "port": return true case "password": return true case "sslmode", "sslcert", "sslkey", "sslrootcert": return true case "fallback_application_name": return true case "connect_timeout": return true case "disable_prepared_binary_result": return true case "binary_parameters": return true default: return false } } func (cn *conn) startup(o values) { w := cn.writeBuf(0) w.int32(196608) // Send the backend the name of the database we want to connect to, and the // user we want to connect as. Additionally, we send over any run-time // parameters potentially included in the connection string. If the server // doesn't recognize any of them, it will reply with an error. for k, v := range o { if isDriverSetting(k) { // skip options which can't be run-time parameters continue } // The protocol requires us to supply the database name as "database" // instead of "dbname". if k == "dbname" { k = "database" } w.string(k) w.string(v) } w.string("") cn.sendStartupPacket(w) for { t, r := cn.recv() switch t { case 'K': case 'S': cn.processParameterStatus(r) case 'R': cn.auth(r, o) case 'Z': cn.processReadyForQuery(r) return default: errorf("unknown response for startup: %q", t) } } } func (cn *conn) auth(r *readBuf, o values) { switch code := r.int32(); code { case 0: // OK case 3: w := cn.writeBuf('p') w.string(o.Get("password")) cn.send(w) t, r := cn.recv() if t != 'R' { errorf("unexpected password response: %q", t) } if r.int32() != 0 { errorf("unexpected authentication response: %q", t) } case 5: s := string(r.next(4)) w := cn.writeBuf('p') w.string("md5" + md5s(md5s(o.Get("password")+o.Get("user"))+s)) cn.send(w) t, r := cn.recv() if t != 'R' { errorf("unexpected password response: %q", t) } if r.int32() != 0 { errorf("unexpected authentication response: %q", t) } default: errorf("unknown authentication response: %d", code) } } type format int const formatText format = 0 const formatBinary format = 1 // One result-column format code with the value 1 (i.e. all binary). var colFmtDataAllBinary []byte = []byte{0, 1, 0, 1} // No result-column format codes (i.e. all text). var colFmtDataAllText []byte = []byte{0, 0} type stmt struct { cn *conn name string colNames []string colFmts []format colFmtData []byte colTyps []oid.Oid paramTyps []oid.Oid closed bool } func (st *stmt) Close() (err error) { if st.closed { return nil } if st.cn.bad { return driver.ErrBadConn } defer st.cn.errRecover(&err) w := st.cn.writeBuf('C') w.byte('S') w.string(st.name) st.cn.send(w) st.cn.send(st.cn.writeBuf('S')) t, _ := st.cn.recv1() if t != '3' { st.cn.bad = true errorf("unexpected close response: %q", t) } st.closed = true t, r := st.cn.recv1() if t != 'Z' { st.cn.bad = true errorf("expected ready for query, but got: %q", t) } st.cn.processReadyForQuery(r) return nil } func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { if st.cn.bad { return nil, driver.ErrBadConn } defer st.cn.errRecover(&err) st.exec(v) return &rows{ cn: st.cn, colNames: st.colNames, colTyps: st.colTyps, colFmts: st.colFmts, }, nil } func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { if st.cn.bad { return nil, driver.ErrBadConn } defer st.cn.errRecover(&err) st.exec(v) res, _, err = st.cn.readExecuteResponse("simple query") return res, err } func (st *stmt) exec(v []driver.Value) { if len(v) >= 65536 { errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) } if len(v) != len(st.paramTyps) { errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) } cn := st.cn w := cn.writeBuf('B') w.byte(0) // unnamed portal w.string(st.name) if cn.binaryParameters { cn.sendBinaryParameters(w, v) } else { w.int16(0) w.int16(len(v)) for i, x := range v { if x == nil { w.int32(-1) } else { b := encode(&cn.parameterStatus, x, st.paramTyps[i]) w.int32(len(b)) w.bytes(b) } } } w.bytes(st.colFmtData) w.next('E') w.byte(0) w.int32(0) w.next('S') cn.send(w) cn.readBindResponse() cn.postExecuteWorkaround() } func (st *stmt) NumInput() int { return len(st.paramTyps) } // parseComplete parses the "command tag" from a CommandComplete message, and // returns the number of rows affected (if applicable) and a string // identifying only the command that was executed, e.g. "ALTER TABLE". If the // command tag could not be parsed, parseComplete panics. func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { commandsWithAffectedRows := []string{ "SELECT ", // INSERT is handled below "UPDATE ", "DELETE ", "FETCH ", "MOVE ", "COPY ", } var affectedRows *string for _, tag := range commandsWithAffectedRows { if strings.HasPrefix(commandTag, tag) { t := commandTag[len(tag):] affectedRows = &t commandTag = tag[:len(tag)-1] break } } // INSERT also includes the oid of the inserted row in its command tag. // Oids in user tables are deprecated, and the oid is only returned when // exactly one row is inserted, so it's unlikely to be of value to any // real-world application and we can ignore it. if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { parts := strings.Split(commandTag, " ") if len(parts) != 3 { cn.bad = true errorf("unexpected INSERT command tag %s", commandTag) } affectedRows = &parts[len(parts)-1] commandTag = "INSERT" } // There should be no affected rows attached to the tag, just return it if affectedRows == nil { return driver.RowsAffected(0), commandTag } n, err := strconv.ParseInt(*affectedRows, 10, 64) if err != nil { cn.bad = true errorf("could not parse commandTag: %s", err) } return driver.RowsAffected(n), commandTag } type rows struct { cn *conn colNames []string colTyps []oid.Oid colFmts []format done bool rb readBuf } func (rs *rows) Close() error { // no need to look at cn.bad as Next() will for { err := rs.Next(nil) switch err { case nil: case io.EOF: return nil default: return err } } } func (rs *rows) Columns() []string { return rs.colNames } func (rs *rows) Next(dest []driver.Value) (err error) { if rs.done { return io.EOF } conn := rs.cn if conn.bad { return driver.ErrBadConn } defer conn.errRecover(&err) for { t := conn.recv1Buf(&rs.rb) switch t { case 'E': err = parseError(&rs.rb) case 'C', 'I': continue case 'Z': conn.processReadyForQuery(&rs.rb) rs.done = true if err != nil { return err } return io.EOF case 'D': n := rs.rb.int16() if err != nil { conn.bad = true errorf("unexpected DataRow after error %s", err) } if n < len(dest) { dest = dest[:n] } for i := range dest { l := rs.rb.int32() if l == -1 { dest[i] = nil continue } dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i], rs.colFmts[i]) } return case 'T': rs.colNames, rs.colFmts, rs.colTyps = parsePortalRowDescribe(&rs.rb) return io.EOF default: errorf("unexpected message after execute: %q", t) } } } func (rs *rows) HasNextResultSet() bool { return !rs.done } func (rs *rows) NextResultSet() error { return nil } // QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be // used as part of an SQL statement. For example: // // tblname := "my_table" // data := "my_data" // err = db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", pq.QuoteIdentifier(tblname)), data) // // Any double quotes in name will be escaped. The quoted identifier will be // case sensitive when used in a query. If the input string contains a zero // byte, the result will be truncated immediately before it. func QuoteIdentifier(name string) string { end := strings.IndexRune(name, 0) if end > -1 { name = name[:end] } return `"` + strings.Replace(name, `"`, `""`, -1) + `"` } func md5s(s string) string { h := md5.New() h.Write([]byte(s)) return fmt.Sprintf("%x", h.Sum(nil)) } func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { // Do one pass over the parameters to see if we're going to send any of // them over in binary. If we are, create a paramFormats array at the // same time. var paramFormats []int for i, x := range args { _, ok := x.([]byte) if ok { if paramFormats == nil { paramFormats = make([]int, len(args)) } paramFormats[i] = 1 } } if paramFormats == nil { b.int16(0) } else { b.int16(len(paramFormats)) for _, x := range paramFormats { b.int16(x) } } b.int16(len(args)) for _, x := range args { if x == nil { b.int32(-1) } else { datum := binaryEncode(&cn.parameterStatus, x) b.int32(len(datum)) b.bytes(datum) } } } func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { if len(args) >= 65536 { errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) } b := cn.writeBuf('P') b.byte(0) // unnamed statement b.string(query) b.int16(0) b.next('B') b.int16(0) // unnamed portal and statement cn.sendBinaryParameters(b, args) b.bytes(colFmtDataAllText) b.next('D') b.byte('P') b.byte(0) // unnamed portal b.next('E') b.byte(0) b.int32(0) b.next('S') cn.send(b) } func (c *conn) processParameterStatus(r *readBuf) { var err error param := r.string() switch param { case "server_version": var major1 int var major2 int var minor int _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor) if err == nil { c.parameterStatus.serverVersion = major1*10000 + major2*100 + minor } case "TimeZone": c.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) if err != nil { c.parameterStatus.currentLocation = nil } default: // ignore } } func (c *conn) processReadyForQuery(r *readBuf) { c.txnStatus = transactionStatus(r.byte()) } func (cn *conn) readReadyForQuery() { t, r := cn.recv1() switch t { case 'Z': cn.processReadyForQuery(r) return default: cn.bad = true errorf("unexpected message %q; expected ReadyForQuery", t) } } func (cn *conn) readParseResponse() { t, r := cn.recv1() switch t { case '1': return case 'E': err := parseError(r) cn.readReadyForQuery() panic(err) default: cn.bad = true errorf("unexpected Parse response %q", t) } } func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []oid.Oid) { for { t, r := cn.recv1() switch t { case 't': nparams := r.int16() paramTyps = make([]oid.Oid, nparams) for i := range paramTyps { paramTyps[i] = r.oid() } case 'n': return paramTyps, nil, nil case 'T': colNames, colTyps = parseStatementRowDescribe(r) return paramTyps, colNames, colTyps case 'E': err := parseError(r) cn.readReadyForQuery() panic(err) default: cn.bad = true errorf("unexpected Describe statement response %q", t) } } } func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []oid.Oid) { t, r := cn.recv1() switch t { case 'T': return parsePortalRowDescribe(r) case 'n': return nil, nil, nil case 'E': err := parseError(r) cn.readReadyForQuery() panic(err) default: cn.bad = true errorf("unexpected Describe response %q", t) } panic("not reached") } func (cn *conn) readBindResponse() { t, r := cn.recv1() switch t { case '2': return case 'E': err := parseError(r) cn.readReadyForQuery() panic(err) default: cn.bad = true errorf("unexpected Bind response %q", t) } } func (cn *conn) postExecuteWorkaround() { // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores // any errors from rows.Next, which masks errors that happened during the // execution of the query. To avoid the problem in common cases, we wait // here for one more message from the database. If it's not an error the // query will likely succeed (or perhaps has already, if it's a // CommandComplete), so we push the message into the conn struct; recv1 // will return it as the next message for rows.Next or rows.Close. // However, if it's an error, we wait until ReadyForQuery and then return // the error to our caller. for { t, r := cn.recv1() switch t { case 'E': err := parseError(r) cn.readReadyForQuery() panic(err) case 'C', 'D', 'I': // the query didn't fail, but we can't process this message cn.saveMessage(t, r) return default: cn.bad = true errorf("unexpected message during extended query execution: %q", t) } } } // Only for Exec(), since we ignore the returned data func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) { for { t, r := cn.recv1() switch t { case 'C': if err != nil { cn.bad = true errorf("unexpected CommandComplete after error %s", err) } res, commandTag = cn.parseComplete(r.string()) case 'Z': cn.processReadyForQuery(r) if res == nil && err == nil { err = errUnexpectedReady } return res, commandTag, err case 'E': err = parseError(r) case 'T', 'D', 'I': if err != nil { cn.bad = true errorf("unexpected %q after error %s", t, err) } if t == 'I' { res = emptyRows } // ignore any results default: cn.bad = true errorf("unknown %s response: %q", protocolState, t) } } } func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []oid.Oid) { n := r.int16() colNames = make([]string, n) colTyps = make([]oid.Oid, n) for i := range colNames { colNames[i] = r.string() r.next(6) colTyps[i] = r.oid() r.next(6) // format code not known when describing a statement; always 0 r.next(2) } return } func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []oid.Oid) { n := r.int16() colNames = make([]string, n) colFmts = make([]format, n) colTyps = make([]oid.Oid, n) for i := range colNames { colNames[i] = r.string() r.next(6) colTyps[i] = r.oid() r.next(6) colFmts[i] = format(r.int16()) } return } // parseEnviron tries to mimic some of libpq's environment handling // // To ease testing, it does not directly reference os.Environ, but is // designed to accept its output. // // Environment-set connection information is intended to have a higher // precedence than a library default but lower than any explicitly // passed information (such as in the URL or connection string). func parseEnviron(env []string) (out map[string]string) { out = make(map[string]string) for _, v := range env { parts := strings.SplitN(v, "=", 2) accrue := func(keyname string) { out[keyname] = parts[1] } unsupported := func() { panic(fmt.Sprintf("setting %v not supported", parts[0])) } // The order of these is the same as is seen in the // PostgreSQL 9.1 manual. Unsupported but well-defined // keys cause a panic; these should be unset prior to // execution. Options which pq expects to be set to a // certain value are allowed, but must be set to that // value if present (they can, of course, be absent). switch parts[0] { case "PGHOST": accrue("host") case "PGHOSTADDR": unsupported() case "PGPORT": accrue("port") case "PGDATABASE": accrue("dbname") case "PGUSER": accrue("user") case "PGPASSWORD": accrue("password") case "PGSERVICE", "PGSERVICEFILE", "PGREALM": unsupported() case "PGOPTIONS": accrue("options") case "PGAPPNAME": accrue("application_name") case "PGSSLMODE": accrue("sslmode") case "PGSSLCERT": accrue("sslcert") case "PGSSLKEY": accrue("sslkey") case "PGSSLROOTCERT": accrue("sslrootcert") case "PGREQUIRESSL", "PGSSLCRL": unsupported() case "PGREQUIREPEER": unsupported() case "PGKRBSRVNAME", "PGGSSLIB": unsupported() case "PGCONNECT_TIMEOUT": accrue("connect_timeout") case "PGCLIENTENCODING": accrue("client_encoding") case "PGDATESTYLE": accrue("datestyle") case "PGTZ": accrue("timezone") case "PGGEQO": accrue("geqo") case "PGSYSCONFDIR", "PGLOCALEDIR": unsupported() } } return out } // isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". func isUTF8(name string) bool { // Recognize all sorts of silly things as "UTF-8", like Postgres does s := strings.Map(alnumLowerASCII, name) return s == "utf8" || s == "unicode" } func alnumLowerASCII(ch rune) rune { if 'A' <= ch && ch <= 'Z' { return ch + ('a' - 'A') } if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { return ch } return -1 // discard }
vendor/github.com/lib/pq/conn.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0038295392878353596, 0.00036723350058309734, 0.0001635095977690071, 0.0001729656069073826, 0.0005873761256225407 ]
{ "id": 10, "code_window": [ "\n", "\t\tif out.String() != expectedInlineScriptsOut[i] {\n", "\t\t\tt.Fatalf(\"bad: %v\", out.String())\n", "\t\t}\n", "\t}\n", "}\n", "\n", "func TestResourceProvider_CollectScripts_script(t *testing.T) {\n" ], "labels": [ "keep", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif out.String() != expectedScriptOut {\n", "\t\tt.Fatalf(\"bad: %v\", out.String())\n" ], "file_path": "builtin/provisioners/remote-exec/resource_provisioner_test.go", "type": "replace", "edit_start_line_idx": 97 }
package vault import ( "fmt" "io/ioutil" "log" "strings" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" "github.com/hashicorp/vault/api" "github.com/mitchellh/go-homedir" ) func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ "address": &schema.Schema{ Type: schema.TypeString, Required: true, DefaultFunc: schema.EnvDefaultFunc("VAULT_ADDR", nil), Description: "URL of the root of the target Vault server.", }, "token": &schema.Schema{ Type: schema.TypeString, Required: true, DefaultFunc: schema.EnvDefaultFunc("VAULT_TOKEN", ""), Description: "Token to use to authenticate to Vault.", }, "ca_cert_file": &schema.Schema{ Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("VAULT_CACERT", ""), Description: "Path to a CA certificate file to validate the server's certificate.", }, "ca_cert_dir": &schema.Schema{ Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("VAULT_CAPATH", ""), Description: "Path to directory containing CA certificate files to validate the server's certificate.", }, "client_auth": &schema.Schema{ Type: schema.TypeList, Optional: true, Description: "Client authentication credentials.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cert_file": &schema.Schema{ Type: schema.TypeString, Required: true, DefaultFunc: schema.EnvDefaultFunc("VAULT_CLIENT_CERT", ""), Description: "Path to a file containing the client certificate.", }, "key_file": &schema.Schema{ Type: schema.TypeString, Required: true, DefaultFunc: schema.EnvDefaultFunc("VAULT_CLIENT_KEY", ""), Description: "Path to a file containing the private key that the certificate was issued for.", }, }, }, }, "skip_tls_verify": &schema.Schema{ Type: schema.TypeBool, Optional: true, DefaultFunc: schema.EnvDefaultFunc("VAULT_SKIP_VERIFY", ""), Description: "Set this to true only if the target Vault server is an insecure development instance.", }, "max_lease_ttl_seconds": &schema.Schema{ Type: schema.TypeInt, Optional: true, // Default is 20min, which is intended to be enough time for // a reasonable Terraform run can complete but not // significantly longer, so that any leases are revoked shortly // after Terraform has finished running. DefaultFunc: schema.EnvDefaultFunc("TERRAFORM_VAULT_MAX_TTL", 1200), Description: "Maximum TTL for secret leases requested by this provider", }, }, ConfigureFunc: providerConfigure, DataSourcesMap: map[string]*schema.Resource{ "vault_generic_secret": genericSecretDataSource(), }, ResourcesMap: map[string]*schema.Resource{ "vault_generic_secret": genericSecretResource(), }, } } func providerConfigure(d *schema.ResourceData) (interface{}, error) { config := &api.Config{ Address: d.Get("address").(string), } clientAuthI := d.Get("client_auth").([]interface{}) if len(clientAuthI) > 1 { return nil, fmt.Errorf("client_auth block may appear only once") } clientAuthCert := "" clientAuthKey := "" if len(clientAuthI) == 1 { clientAuth := clientAuthI[0].(map[string]interface{}) clientAuthCert = clientAuth["cert_file"].(string) clientAuthKey = clientAuth["key_file"].(string) } config.ConfigureTLS(&api.TLSConfig{ CACert: d.Get("ca_cert_file").(string), CAPath: d.Get("ca_cert_dir").(string), Insecure: d.Get("skip_tls_verify").(bool), ClientCert: clientAuthCert, ClientKey: clientAuthKey, }) client, err := api.NewClient(config) if err != nil { return nil, fmt.Errorf("failed to configure Vault API: %s", err) } token := d.Get("token").(string) if token == "" { // Use the vault CLI's token, if present. homePath, err := homedir.Dir() if err != nil { return nil, fmt.Errorf("Can't find home directory when looking for ~/.vault-token: %s", err) } tokenBytes, err := ioutil.ReadFile(homePath + "/.vault-token") if err != nil { return nil, fmt.Errorf("No vault token found: %s", err) } token = strings.TrimSpace(string(tokenBytes)) } // In order to enforce our relatively-short lease TTL, we derive a // temporary child token that inherits all of the policies of the // token we were given but expires after max_lease_ttl_seconds. // // The intent here is that Terraform will need to re-fetch any // secrets on each run and so we limit the exposure risk of secrets // that end up stored in the Terraform state, assuming that they are // credentials that Vault is able to revoke. // // Caution is still required with state files since not all secrets // can explicitly be revoked, and this limited scope won't apply to // any secrets that are *written* by Terraform to Vault. client.SetToken(token) renewable := false childTokenLease, err := client.Auth().Token().Create(&api.TokenCreateRequest{ DisplayName: "terraform", TTL: fmt.Sprintf("%ds", d.Get("max_lease_ttl_seconds").(int)), ExplicitMaxTTL: fmt.Sprintf("%ds", d.Get("max_lease_ttl_seconds").(int)), Renewable: &renewable, }) if err != nil { return nil, fmt.Errorf("failed to create limited child token: %s", err) } childToken := childTokenLease.Auth.ClientToken policies := childTokenLease.Auth.Policies log.Printf("[INFO] Using Vault token with the following policies: %s", strings.Join(policies, ", ")) client.SetToken(childToken) return client, nil }
builtin/providers/vault/provider.go
0
https://github.com/hashicorp/terraform/commit/640faf18c3cf4dbe2b4c1eb62895634ded294181
[ 0.0001971427263924852, 0.00017385500541422516, 0.00016710023919586092, 0.0001727797498460859, 0.000006158389169286238 ]
{ "id": 1, "code_window": [ "\t\t// If snap-sync is requested, this flag is also required\n", "\t\tif cfg.SyncMode == downloader.SnapSync {\n", "\t\t\tlog.Info(\"Snap sync requested, enabling --snapshot\")\n", "\t\t} else {\n", "\t\t\tcfg.TrieCleanCache += cfg.SnapshotCache\n", "\t\t\tcfg.SnapshotCache = 0 // Disabled\n", "\t\t}\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tif !ctx.Bool(SnapshotFlag.Name) {\n", "\t\t\t\tlog.Warn(\"Snap sync requested, enabling --snapshot\")\n", "\t\t\t}\n", "\t\t\tif cfg.SnapshotCache == 0 {\n", "\t\t\t\tlog.Warn(\"Snap sync requested, resetting --cache.snapshot\")\n", "\t\t\t\tcfg.SnapshotCache = ctx.Int(CacheFlag.Name) * CacheSnapshotFlag.Value / 100\n", "\t\t\t}\n" ], "file_path": "cmd/utils/flags.go", "type": "replace", "edit_start_line_idx": 1003 }
// Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package eth import ( "errors" "math" "math/big" "sync" "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/fetcher" "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) const ( // txChanSize is the size of channel listening to NewTxsEvent. // The number is referenced from the size of tx pool. txChanSize = 4096 // txMaxBroadcastSize is the max size of a transaction that will be broadcasted. // All transactions with a higher size will be announced and need to be fetched // by the peer. txMaxBroadcastSize = 4096 ) var syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge // txPool defines the methods needed from a transaction pool implementation to // support all the operations needed by the Ethereum chain protocols. type txPool interface { // Has returns an indicator whether txpool has a transaction // cached with the given hash. Has(hash common.Hash) bool // Get retrieves the transaction from local txpool with given // tx hash. Get(hash common.Hash) *types.Transaction // Add should add the given transactions to the pool. Add(txs []*types.Transaction, local bool, sync bool) []error // Pending should return pending transactions. // The slice should be modifiable by the caller. Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction // SubscribeTransactions subscribes to new transaction events. The subscriber // can decide whether to receive notifications only for newly seen transactions // or also for reorged out ones. SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription } // handlerConfig is the collection of initialization parameters to create a full // node network handler. type handlerConfig struct { Database ethdb.Database // Database for direct sync insertions Chain *core.BlockChain // Blockchain to serve data from TxPool txPool // Transaction pool to propagate from Merger *consensus.Merger // The manager for eth1/2 transition Network uint64 // Network identifier to advertise Sync downloader.SyncMode // Whether to snap or full sync BloomCache uint64 // Megabytes to alloc for snap sync bloom EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` RequiredBlocks map[uint64]common.Hash // Hard coded map of required block hashes for sync challenges } type handler struct { networkID uint64 forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node snapSync atomic.Bool // Flag whether snap sync is enabled (gets disabled if we already have blocks) synced atomic.Bool // Flag whether we're considered synchronised (enables transaction processing) database ethdb.Database txpool txPool chain *core.BlockChain maxPeers int downloader *downloader.Downloader blockFetcher *fetcher.BlockFetcher txFetcher *fetcher.TxFetcher peers *peerSet merger *consensus.Merger eventMux *event.TypeMux txsCh chan core.NewTxsEvent txsSub event.Subscription minedBlockSub *event.TypeMuxSubscription requiredBlocks map[uint64]common.Hash // channels for fetcher, syncer, txsyncLoop quitSync chan struct{} chainSync *chainSyncer wg sync.WaitGroup handlerStartCh chan struct{} handlerDoneCh chan struct{} } // newHandler returns a handler for all Ethereum chain management protocol. func newHandler(config *handlerConfig) (*handler, error) { // Create the protocol manager with the base fields if config.EventMux == nil { config.EventMux = new(event.TypeMux) // Nicety initialization for tests } h := &handler{ networkID: config.Network, forkFilter: forkid.NewFilter(config.Chain), eventMux: config.EventMux, database: config.Database, txpool: config.TxPool, chain: config.Chain, peers: newPeerSet(), merger: config.Merger, requiredBlocks: config.RequiredBlocks, quitSync: make(chan struct{}), handlerDoneCh: make(chan struct{}), handlerStartCh: make(chan struct{}), } if config.Sync == downloader.FullSync { // The database seems empty as the current block is the genesis. Yet the snap // block is ahead, so snap sync was enabled for this node at a certain point. // The scenarios where this can happen is // * if the user manually (or via a bad block) rolled back a snap sync node // below the sync point. // * the last snap sync is not finished while user specifies a full sync this // time. But we don't have any recent state for full sync. // In these cases however it's safe to reenable snap sync. fullBlock, snapBlock := h.chain.CurrentBlock(), h.chain.CurrentSnapBlock() if fullBlock.Number.Uint64() == 0 && snapBlock.Number.Uint64() > 0 { h.snapSync.Store(true) log.Warn("Switch sync mode from full sync to snap sync", "reason", "snap sync incomplete") } else if !h.chain.HasState(fullBlock.Root) { h.snapSync.Store(true) log.Warn("Switch sync mode from full sync to snap sync", "reason", "head state missing") } } else { head := h.chain.CurrentBlock() if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) { // Print warning log if database is not empty to run snap sync. log.Warn("Switch sync mode from snap sync to full sync", "reason", "snap sync complete") } else { // If snap sync was requested and our database is empty, grant it h.snapSync.Store(true) log.Info("Enabled snap sync", "head", head.Number, "hash", head.Hash()) } } // Construct the downloader (long sync) h.downloader = downloader.New(config.Database, h.eventMux, h.chain, nil, h.removePeer, h.enableSyncedFeatures) if ttd := h.chain.Config().TerminalTotalDifficulty; ttd != nil { if h.chain.Config().TerminalTotalDifficultyPassed { log.Info("Chain post-merge, sync via beacon client") } else { head := h.chain.CurrentBlock() if td := h.chain.GetTd(head.Hash(), head.Number.Uint64()); td.Cmp(ttd) >= 0 { log.Info("Chain post-TTD, sync via beacon client") } else { log.Warn("Chain pre-merge, sync via PoW (ensure beacon client is ready)") } } } else if h.chain.Config().TerminalTotalDifficultyPassed { log.Error("Chain configured post-merge, but without TTD. Are you debugging sync?") } // Construct the fetcher (short sync) validator := func(header *types.Header) error { // All the block fetcher activities should be disabled // after the transition. Print the warning log. if h.merger.PoSFinalized() { log.Warn("Unexpected validation activity", "hash", header.Hash(), "number", header.Number) return errors.New("unexpected behavior after transition") } // Reject all the PoS style headers in the first place. No matter // the chain has finished the transition or not, the PoS headers // should only come from the trusted consensus layer instead of // p2p network. if beacon, ok := h.chain.Engine().(*beacon.Beacon); ok { if beacon.IsPoSHeader(header) { return errors.New("unexpected post-merge header") } } return h.chain.Engine().VerifyHeader(h.chain, header) } heighter := func() uint64 { return h.chain.CurrentBlock().Number.Uint64() } inserter := func(blocks types.Blocks) (int, error) { // All the block fetcher activities should be disabled // after the transition. Print the warning log. if h.merger.PoSFinalized() { var ctx []interface{} ctx = append(ctx, "blocks", len(blocks)) if len(blocks) > 0 { ctx = append(ctx, "firsthash", blocks[0].Hash()) ctx = append(ctx, "firstnumber", blocks[0].Number()) ctx = append(ctx, "lasthash", blocks[len(blocks)-1].Hash()) ctx = append(ctx, "lastnumber", blocks[len(blocks)-1].Number()) } log.Warn("Unexpected insertion activity", ctx...) return 0, errors.New("unexpected behavior after transition") } // If snap sync is running, deny importing weird blocks. This is a problematic // clause when starting up a new network, because snap-syncing miners might not // accept each others' blocks until a restart. Unfortunately we haven't figured // out a way yet where nodes can decide unilaterally whether the network is new // or not. This should be fixed if we figure out a solution. if !h.synced.Load() { log.Warn("Syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) return 0, nil } if h.merger.TDDReached() { // The blocks from the p2p network is regarded as untrusted // after the transition. In theory block gossip should be disabled // entirely whenever the transition is started. But in order to // handle the transition boundary reorg in the consensus-layer, // the legacy blocks are still accepted, but only for the terminal // pow blocks. Spec: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-3675.md#halt-the-importing-of-pow-blocks for i, block := range blocks { ptd := h.chain.GetTd(block.ParentHash(), block.NumberU64()-1) if ptd == nil { return 0, nil } td := new(big.Int).Add(ptd, block.Difficulty()) if !h.chain.Config().IsTerminalPoWBlock(ptd, td) { log.Info("Filtered out non-terminal pow block", "number", block.NumberU64(), "hash", block.Hash()) return 0, nil } if err := h.chain.InsertBlockWithoutSetHead(block); err != nil { return i, err } } return 0, nil } return h.chain.InsertChain(blocks) } h.blockFetcher = fetcher.NewBlockFetcher(false, nil, h.chain.GetBlockByHash, validator, h.BroadcastBlock, heighter, nil, inserter, h.removePeer) fetchTx := func(peer string, hashes []common.Hash) error { p := h.peers.peer(peer) if p == nil { return errors.New("unknown peer") } return p.RequestTxs(hashes) } addTxs := func(txs []*types.Transaction) []error { return h.txpool.Add(txs, false, false) } h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx, h.removePeer) h.chainSync = newChainSyncer(h) return h, nil } // protoTracker tracks the number of active protocol handlers. func (h *handler) protoTracker() { defer h.wg.Done() var active int for { select { case <-h.handlerStartCh: active++ case <-h.handlerDoneCh: active-- case <-h.quitSync: // Wait for all active handlers to finish. for ; active > 0; active-- { <-h.handlerDoneCh } return } } } // incHandlers signals to increment the number of active handlers if not // quitting. func (h *handler) incHandlers() bool { select { case h.handlerStartCh <- struct{}{}: return true case <-h.quitSync: return false } } // decHandlers signals to decrement the number of active handlers. func (h *handler) decHandlers() { h.handlerDoneCh <- struct{}{} } // runEthPeer registers an eth peer into the joint eth/snap peerset, adds it to // various subsystems and starts handling messages. func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { if !h.incHandlers() { return p2p.DiscQuitting } defer h.decHandlers() // If the peer has a `snap` extension, wait for it to connect so we can have // a uniform initialization/teardown mechanism snap, err := h.peers.waitSnapExtension(peer) if err != nil { peer.Log().Error("Snapshot extension barrier failed", "err", err) return err } // Execute the Ethereum handshake var ( genesis = h.chain.Genesis() head = h.chain.CurrentHeader() hash = head.Hash() number = head.Number.Uint64() td = h.chain.GetTd(hash, number) ) forkID := forkid.NewID(h.chain.Config(), genesis, number, head.Time) if err := peer.Handshake(h.networkID, td, hash, genesis.Hash(), forkID, h.forkFilter); err != nil { peer.Log().Debug("Ethereum handshake failed", "err", err) return err } reject := false // reserved peer slots if h.snapSync.Load() { if snap == nil { // If we are running snap-sync, we want to reserve roughly half the peer // slots for peers supporting the snap protocol. // The logic here is; we only allow up to 5 more non-snap peers than snap-peers. if all, snp := h.peers.len(), h.peers.snapLen(); all-snp > snp+5 { reject = true } } } // Ignore maxPeers if this is a trusted peer if !peer.Peer.Info().Network.Trusted { if reject || h.peers.len() >= h.maxPeers { return p2p.DiscTooManyPeers } } peer.Log().Debug("Ethereum peer connected", "name", peer.Name()) // Register the peer locally if err := h.peers.registerPeer(peer, snap); err != nil { peer.Log().Error("Ethereum peer registration failed", "err", err) return err } defer h.unregisterPeer(peer.ID()) p := h.peers.peer(peer.ID()) if p == nil { return errors.New("peer dropped during handling") } // Register the peer in the downloader. If the downloader considers it banned, we disconnect if err := h.downloader.RegisterPeer(peer.ID(), peer.Version(), peer); err != nil { peer.Log().Error("Failed to register peer in eth syncer", "err", err) return err } if snap != nil { if err := h.downloader.SnapSyncer.Register(snap); err != nil { peer.Log().Error("Failed to register peer in snap syncer", "err", err) return err } } h.chainSync.handlePeerEvent() // Propagate existing transactions. new transactions appearing // after this will be sent via broadcasts. h.syncTransactions(peer) // Create a notification channel for pending requests if the peer goes down dead := make(chan struct{}) defer close(dead) // If we have any explicit peer required block hashes, request them for number, hash := range h.requiredBlocks { resCh := make(chan *eth.Response) req, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh) if err != nil { return err } go func(number uint64, hash common.Hash, req *eth.Request) { // Ensure the request gets cancelled in case of error/drop defer req.Close() timeout := time.NewTimer(syncChallengeTimeout) defer timeout.Stop() select { case res := <-resCh: headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersRequest)) if len(headers) == 0 { // Required blocks are allowed to be missing if the remote // node is not yet synced res.Done <- nil return } // Validate the header and either drop the peer or continue if len(headers) > 1 { res.Done <- errors.New("too many headers in required block response") return } if headers[0].Number.Uint64() != number || headers[0].Hash() != hash { peer.Log().Info("Required block mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash) res.Done <- errors.New("required block mismatch") return } peer.Log().Debug("Peer required block verified", "number", number, "hash", hash) res.Done <- nil case <-timeout.C: peer.Log().Warn("Required block challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name()) h.removePeer(peer.ID()) } }(number, hash, req) } // Handle incoming messages until the connection is torn down return handler(peer) } // runSnapExtension registers a `snap` peer into the joint eth/snap peerset and // starts handling inbound messages. As `snap` is only a satellite protocol to // `eth`, all subsystem registrations and lifecycle management will be done by // the main `eth` handler to prevent strange races. func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error { if !h.incHandlers() { return p2p.DiscQuitting } defer h.decHandlers() if err := h.peers.registerSnapExtension(peer); err != nil { if metrics.Enabled { if peer.Inbound() { snap.IngressRegistrationErrorMeter.Mark(1) } else { snap.EgressRegistrationErrorMeter.Mark(1) } } peer.Log().Debug("Snapshot extension registration failed", "err", err) return err } return handler(peer) } // removePeer requests disconnection of a peer. func (h *handler) removePeer(id string) { peer := h.peers.peer(id) if peer != nil { peer.Peer.Disconnect(p2p.DiscUselessPeer) } } // unregisterPeer removes a peer from the downloader, fetchers and main peer set. func (h *handler) unregisterPeer(id string) { // Create a custom logger to avoid printing the entire id var logger log.Logger if len(id) < 16 { // Tests use short IDs, don't choke on them logger = log.New("peer", id) } else { logger = log.New("peer", id[:8]) } // Abort if the peer does not exist peer := h.peers.peer(id) if peer == nil { logger.Error("Ethereum peer removal failed", "err", errPeerNotRegistered) return } // Remove the `eth` peer if it exists logger.Debug("Removing Ethereum peer", "snap", peer.snapExt != nil) // Remove the `snap` extension if it exists if peer.snapExt != nil { h.downloader.SnapSyncer.Unregister(id) } h.downloader.UnregisterPeer(id) h.txFetcher.Drop(id) if err := h.peers.unregisterPeer(id); err != nil { logger.Error("Ethereum peer removal failed", "err", err) } } func (h *handler) Start(maxPeers int) { h.maxPeers = maxPeers // broadcast and announce transactions (only new ones, not resurrected ones) h.wg.Add(1) h.txsCh = make(chan core.NewTxsEvent, txChanSize) h.txsSub = h.txpool.SubscribeTransactions(h.txsCh, false) go h.txBroadcastLoop() // broadcast mined blocks h.wg.Add(1) h.minedBlockSub = h.eventMux.Subscribe(core.NewMinedBlockEvent{}) go h.minedBroadcastLoop() // start sync handlers h.wg.Add(1) go h.chainSync.loop() // start peer handler tracker h.wg.Add(1) go h.protoTracker() } func (h *handler) Stop() { h.txsSub.Unsubscribe() // quits txBroadcastLoop h.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop // Quit chainSync and txsync64. // After this is done, no new peers will be accepted. close(h.quitSync) // Disconnect existing sessions. // This also closes the gate for any new registrations on the peer set. // sessions which are already established but not added to h.peers yet // will exit when they try to register. h.peers.close() h.wg.Wait() log.Info("Ethereum protocol stopped") } // BroadcastBlock will either propagate a block to a subset of its peers, or // will only announce its availability (depending what's requested). func (h *handler) BroadcastBlock(block *types.Block, propagate bool) { // Disable the block propagation if the chain has already entered the PoS // stage. The block propagation is delegated to the consensus layer. if h.merger.PoSFinalized() { return } // Disable the block propagation if it's the post-merge block. if beacon, ok := h.chain.Engine().(*beacon.Beacon); ok { if beacon.IsPoSHeader(block.Header()) { return } } hash := block.Hash() peers := h.peers.peersWithoutBlock(hash) // If propagation is requested, send to a subset of the peer if propagate { // Calculate the TD of the block (it's not imported yet, so block.Td is not valid) var td *big.Int if parent := h.chain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil { td = new(big.Int).Add(block.Difficulty(), h.chain.GetTd(block.ParentHash(), block.NumberU64()-1)) } else { log.Error("Propagating dangling block", "number", block.Number(), "hash", hash) return } // Send the block to a subset of our peers transfer := peers[:int(math.Sqrt(float64(len(peers))))] for _, peer := range transfer { peer.AsyncSendNewBlock(block, td) } log.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt))) return } // Otherwise if the block is indeed in out own chain, announce it if h.chain.HasBlock(hash, block.NumberU64()) { for _, peer := range peers { peer.AsyncSendNewBlockHash(block) } log.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt))) } } // BroadcastTransactions will propagate a batch of transactions // - To a square root of all peers for non-blob transactions // - And, separately, as announcements to all peers which are not known to // already have the given transaction. func (h *handler) BroadcastTransactions(txs types.Transactions) { var ( blobTxs int // Number of blob transactions to announce only largeTxs int // Number of large transactions to announce only directCount int // Number of transactions sent directly to peers (duplicates included) directPeers int // Number of peers that were sent transactions directly annCount int // Number of transactions announced across all peers (duplicates included) annPeers int // Number of peers announced about transactions txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce ) // Broadcast transactions to a batch of peers not knowing about it for _, tx := range txs { peers := h.peers.peersWithoutTransaction(tx.Hash()) var numDirect int switch { case tx.Type() == types.BlobTxType: blobTxs++ case tx.Size() > txMaxBroadcastSize: largeTxs++ default: numDirect = int(math.Sqrt(float64(len(peers)))) } // Send the tx unconditionally to a subset of our peers for _, peer := range peers[:numDirect] { txset[peer] = append(txset[peer], tx.Hash()) } // For the remaining peers, send announcement only for _, peer := range peers[numDirect:] { annos[peer] = append(annos[peer], tx.Hash()) } } for peer, hashes := range txset { directPeers++ directCount += len(hashes) peer.AsyncSendTransactions(hashes) } for peer, hashes := range annos { annPeers++ annCount += len(hashes) peer.AsyncSendPooledTransactionHashes(hashes) } log.Debug("Distributed transactions", "plaintxs", len(txs)-blobTxs-largeTxs, "blobtxs", blobTxs, "largetxs", largeTxs, "bcastpeers", directPeers, "bcastcount", directCount, "annpeers", annPeers, "anncount", annCount) } // minedBroadcastLoop sends mined blocks to connected peers. func (h *handler) minedBroadcastLoop() { defer h.wg.Done() for obj := range h.minedBlockSub.Chan() { if ev, ok := obj.Data.(core.NewMinedBlockEvent); ok { h.BroadcastBlock(ev.Block, true) // First propagate block to peers h.BroadcastBlock(ev.Block, false) // Only then announce to the rest } } } // txBroadcastLoop announces new transactions to connected peers. func (h *handler) txBroadcastLoop() { defer h.wg.Done() for { select { case event := <-h.txsCh: h.BroadcastTransactions(event.Txs) case <-h.txsSub.Err(): return } } } // enableSyncedFeatures enables the post-sync functionalities when the initial // sync is finished. func (h *handler) enableSyncedFeatures() { // Mark the local node as synced. h.synced.Store(true) // If we were running snap sync and it finished, disable doing another // round on next sync cycle if h.snapSync.Load() { log.Info("Snap sync complete, auto disabling") h.snapSync.Store(false) } if h.chain.TrieDB().Scheme() == rawdb.PathScheme { h.chain.TrieDB().SetBufferSize(pathdb.DefaultBufferSize) } }
eth/handler.go
1
https://github.com/ethereum/go-ethereum/commit/d98d70f670297a4bfa86db1a67a9c024f7186f43
[ 0.003927571699023247, 0.0003479322185739875, 0.0001618677342776209, 0.0001709172793198377, 0.0005931009072810411 ]
{ "id": 1, "code_window": [ "\t\t// If snap-sync is requested, this flag is also required\n", "\t\tif cfg.SyncMode == downloader.SnapSync {\n", "\t\t\tlog.Info(\"Snap sync requested, enabling --snapshot\")\n", "\t\t} else {\n", "\t\t\tcfg.TrieCleanCache += cfg.SnapshotCache\n", "\t\t\tcfg.SnapshotCache = 0 // Disabled\n", "\t\t}\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tif !ctx.Bool(SnapshotFlag.Name) {\n", "\t\t\t\tlog.Warn(\"Snap sync requested, enabling --snapshot\")\n", "\t\t\t}\n", "\t\t\tif cfg.SnapshotCache == 0 {\n", "\t\t\t\tlog.Warn(\"Snap sync requested, resetting --cache.snapshot\")\n", "\t\t\t\tcfg.SnapshotCache = ctx.Int(CacheFlag.Name) * CacheSnapshotFlag.Value / 100\n", "\t\t\t}\n" ], "file_path": "cmd/utils/flags.go", "type": "replace", "edit_start_line_idx": 1003 }
// Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package rpc import ( "bufio" "bytes" "io" "net" "os" "path/filepath" "strings" "testing" "time" ) func TestServerRegisterName(t *testing.T) { server := NewServer() service := new(testService) svcName := "test" if err := server.RegisterName(svcName, service); err != nil { t.Fatalf("%v", err) } if len(server.services.services) != 2 { t.Fatalf("Expected 2 service entries, got %d", len(server.services.services)) } svc, ok := server.services.services[svcName] if !ok { t.Fatalf("Expected service %s to be registered", svcName) } wantCallbacks := 14 if len(svc.callbacks) != wantCallbacks { t.Errorf("Expected %d callbacks for service 'service', got %d", wantCallbacks, len(svc.callbacks)) } } func TestServer(t *testing.T) { files, err := os.ReadDir("testdata") if err != nil { t.Fatal("where'd my testdata go?") } for _, f := range files { if f.IsDir() || strings.HasPrefix(f.Name(), ".") { continue } path := filepath.Join("testdata", f.Name()) name := strings.TrimSuffix(f.Name(), filepath.Ext(f.Name())) t.Run(name, func(t *testing.T) { runTestScript(t, path) }) } } func runTestScript(t *testing.T, file string) { server := newTestServer() server.SetBatchLimits(4, 100000) content, err := os.ReadFile(file) if err != nil { t.Fatal(err) } clientConn, serverConn := net.Pipe() defer clientConn.Close() go server.ServeCodec(NewCodec(serverConn), 0) readbuf := bufio.NewReader(clientConn) for _, line := range strings.Split(string(content), "\n") { line = strings.TrimSpace(line) switch { case len(line) == 0 || strings.HasPrefix(line, "//"): // skip comments, blank lines continue case strings.HasPrefix(line, "--> "): t.Log(line) // write to connection clientConn.SetWriteDeadline(time.Now().Add(5 * time.Second)) if _, err := io.WriteString(clientConn, line[4:]+"\n"); err != nil { t.Fatalf("write error: %v", err) } case strings.HasPrefix(line, "<-- "): t.Log(line) want := line[4:] // read line from connection and compare text clientConn.SetReadDeadline(time.Now().Add(5 * time.Second)) sent, err := readbuf.ReadString('\n') if err != nil { t.Fatalf("read error: %v", err) } sent = strings.TrimRight(sent, "\r\n") if sent != want { t.Errorf("wrong line from server\ngot: %s\nwant: %s", sent, want) } default: panic("invalid line in test script: " + line) } } } // This test checks that responses are delivered for very short-lived connections that // only carry a single request. func TestServerShortLivedConn(t *testing.T) { server := newTestServer() defer server.Stop() listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatal("can't listen:", err) } defer listener.Close() go server.ServeListener(listener) var ( request = `{"jsonrpc":"2.0","id":1,"method":"rpc_modules"}` + "\n" wantResp = `{"jsonrpc":"2.0","id":1,"result":{"nftest":"1.0","rpc":"1.0","test":"1.0"}}` + "\n" deadline = time.Now().Add(10 * time.Second) ) for i := 0; i < 20; i++ { conn, err := net.Dial("tcp", listener.Addr().String()) if err != nil { t.Fatal("can't dial:", err) } conn.SetDeadline(deadline) // Write the request, then half-close the connection so the server stops reading. conn.Write([]byte(request)) conn.(*net.TCPConn).CloseWrite() // Now try to get the response. buf := make([]byte, 2000) n, err := conn.Read(buf) conn.Close() if err != nil { t.Fatal("read error:", err) } if !bytes.Equal(buf[:n], []byte(wantResp)) { t.Fatalf("wrong response: %s", buf[:n]) } } } func TestServerBatchResponseSizeLimit(t *testing.T) { server := newTestServer() defer server.Stop() server.SetBatchLimits(100, 60) var ( batch []BatchElem client = DialInProc(server) ) for i := 0; i < 5; i++ { batch = append(batch, BatchElem{ Method: "test_echo", Args: []any{"x", 1}, Result: new(echoResult), }) } if err := client.BatchCall(batch); err != nil { t.Fatal("error sending batch:", err) } for i := range batch { // We expect the first two queries to be ok, but after that the size limit takes effect. if i < 2 { if batch[i].Error != nil { t.Fatalf("batch elem %d has unexpected error: %v", i, batch[i].Error) } continue } // After two, we expect an error. re, ok := batch[i].Error.(Error) if !ok { t.Fatalf("batch elem %d has wrong error: %v", i, batch[i].Error) } wantedCode := errcodeResponseTooLarge if re.ErrorCode() != wantedCode { t.Errorf("batch elem %d wrong error code, have %d want %d", i, re.ErrorCode(), wantedCode) } } }
rpc/server_test.go
0
https://github.com/ethereum/go-ethereum/commit/d98d70f670297a4bfa86db1a67a9c024f7186f43
[ 0.0001785206695785746, 0.00017198505520354956, 0.0001651682541705668, 0.00017179307178594172, 0.000003205218263246934 ]
{ "id": 1, "code_window": [ "\t\t// If snap-sync is requested, this flag is also required\n", "\t\tif cfg.SyncMode == downloader.SnapSync {\n", "\t\t\tlog.Info(\"Snap sync requested, enabling --snapshot\")\n", "\t\t} else {\n", "\t\t\tcfg.TrieCleanCache += cfg.SnapshotCache\n", "\t\t\tcfg.SnapshotCache = 0 // Disabled\n", "\t\t}\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tif !ctx.Bool(SnapshotFlag.Name) {\n", "\t\t\t\tlog.Warn(\"Snap sync requested, enabling --snapshot\")\n", "\t\t\t}\n", "\t\t\tif cfg.SnapshotCache == 0 {\n", "\t\t\t\tlog.Warn(\"Snap sync requested, resetting --cache.snapshot\")\n", "\t\t\t\tcfg.SnapshotCache = ctx.Int(CacheFlag.Name) * CacheSnapshotFlag.Value / 100\n", "\t\t\t}\n" ], "file_path": "cmd/utils/flags.go", "type": "replace", "edit_start_line_idx": 1003 }
// Copyright 2019 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package build import ( "bufio" "crypto/sha256" "encoding/hex" "fmt" "io" "log" "net/http" "os" "path/filepath" "strings" ) // ChecksumDB keeps file checksums. type ChecksumDB struct { allChecksums []string } // MustLoadChecksums loads a file containing checksums. func MustLoadChecksums(file string) *ChecksumDB { content, err := os.ReadFile(file) if err != nil { log.Fatal("can't load checksum file: " + err.Error()) } return &ChecksumDB{strings.Split(string(content), "\n")} } // Verify checks whether the given file is valid according to the checksum database. func (db *ChecksumDB) Verify(path string) error { fd, err := os.Open(path) if err != nil { return err } defer fd.Close() h := sha256.New() if _, err := io.Copy(h, bufio.NewReader(fd)); err != nil { return err } fileHash := hex.EncodeToString(h.Sum(nil)) if !db.findHash(filepath.Base(path), fileHash) { return fmt.Errorf("invalid file hash %s for %s", fileHash, filepath.Base(path)) } return nil } func (db *ChecksumDB) findHash(basename, hash string) bool { want := hash + " " + basename for _, line := range db.allChecksums { if strings.TrimSpace(line) == want { return true } } return false } // DownloadFile downloads a file and verifies its checksum. func (db *ChecksumDB) DownloadFile(url, dstPath string) error { if err := db.Verify(dstPath); err == nil { fmt.Printf("%s is up-to-date\n", dstPath) return nil } fmt.Printf("%s is stale\n", dstPath) fmt.Printf("downloading from %s\n", url) resp, err := http.Get(url) if err != nil { return fmt.Errorf("download error: %v", err) } else if resp.StatusCode != http.StatusOK { return fmt.Errorf("download error: status %d", resp.StatusCode) } defer resp.Body.Close() if err := os.MkdirAll(filepath.Dir(dstPath), 0755); err != nil { return err } fd, err := os.OpenFile(dstPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) if err != nil { return err } dst := newDownloadWriter(fd, resp.ContentLength) _, err = io.Copy(dst, resp.Body) dst.Close() if err != nil { return err } return db.Verify(dstPath) } type downloadWriter struct { file *os.File dstBuf *bufio.Writer size int64 written int64 lastpct int64 } func newDownloadWriter(dst *os.File, size int64) *downloadWriter { return &downloadWriter{ file: dst, dstBuf: bufio.NewWriter(dst), size: size, } } func (w *downloadWriter) Write(buf []byte) (int, error) { n, err := w.dstBuf.Write(buf) // Report progress. w.written += int64(n) pct := w.written * 10 / w.size * 10 if pct != w.lastpct { if w.lastpct != 0 { fmt.Print("...") } fmt.Print(pct, "%") w.lastpct = pct } return n, err } func (w *downloadWriter) Close() error { if w.lastpct > 0 { fmt.Println() // Finish the progress line. } flushErr := w.dstBuf.Flush() closeErr := w.file.Close() if flushErr != nil { return flushErr } return closeErr }
internal/build/download.go
0
https://github.com/ethereum/go-ethereum/commit/d98d70f670297a4bfa86db1a67a9c024f7186f43
[ 0.0021861358545720577, 0.00029960402753204107, 0.00016544581740163267, 0.0001717669510981068, 0.00048717675963416696 ]
{ "id": 1, "code_window": [ "\t\t// If snap-sync is requested, this flag is also required\n", "\t\tif cfg.SyncMode == downloader.SnapSync {\n", "\t\t\tlog.Info(\"Snap sync requested, enabling --snapshot\")\n", "\t\t} else {\n", "\t\t\tcfg.TrieCleanCache += cfg.SnapshotCache\n", "\t\t\tcfg.SnapshotCache = 0 // Disabled\n", "\t\t}\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tif !ctx.Bool(SnapshotFlag.Name) {\n", "\t\t\t\tlog.Warn(\"Snap sync requested, enabling --snapshot\")\n", "\t\t\t}\n", "\t\t\tif cfg.SnapshotCache == 0 {\n", "\t\t\t\tlog.Warn(\"Snap sync requested, resetting --cache.snapshot\")\n", "\t\t\t\tcfg.SnapshotCache = ctx.Int(CacheFlag.Name) * CacheSnapshotFlag.Value / 100\n", "\t\t\t}\n" ], "file_path": "cmd/utils/flags.go", "type": "replace", "edit_start_line_idx": 1003 }
// Copyright 2018 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package simulations import ( "testing" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/rpc" ) // NoopService is the service that does not do anything // but implements node.Service interface. type NoopService struct { c map[enode.ID]chan struct{} } func NewNoopService(ackC map[enode.ID]chan struct{}) *NoopService { return &NoopService{ c: ackC, } } func (t *NoopService) Protocols() []p2p.Protocol { return []p2p.Protocol{ { Name: "noop", Version: 666, Length: 0, Run: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error { if t.c != nil { t.c[peer.ID()] = make(chan struct{}) close(t.c[peer.ID()]) } rw.ReadMsg() return nil }, NodeInfo: func() interface{} { return struct{}{} }, PeerInfo: func(id enode.ID) interface{} { return struct{}{} }, Attributes: []enr.Entry{}, }, } } func (t *NoopService) APIs() []rpc.API { return []rpc.API{} } func (t *NoopService) Start() error { return nil } func (t *NoopService) Stop() error { return nil } func VerifyRing(t *testing.T, net *Network, ids []enode.ID) { t.Helper() n := len(ids) for i := 0; i < n; i++ { for j := i + 1; j < n; j++ { c := net.GetConn(ids[i], ids[j]) if i == j-1 || (i == 0 && j == n-1) { if c == nil { t.Errorf("nodes %v and %v are not connected, but they should be", i, j) } } else { if c != nil { t.Errorf("nodes %v and %v are connected, but they should not be", i, j) } } } } } func VerifyChain(t *testing.T, net *Network, ids []enode.ID) { t.Helper() n := len(ids) for i := 0; i < n; i++ { for j := i + 1; j < n; j++ { c := net.GetConn(ids[i], ids[j]) if i == j-1 { if c == nil { t.Errorf("nodes %v and %v are not connected, but they should be", i, j) } } else { if c != nil { t.Errorf("nodes %v and %v are connected, but they should not be", i, j) } } } } } func VerifyFull(t *testing.T, net *Network, ids []enode.ID) { t.Helper() n := len(ids) var connections int for i, lid := range ids { for _, rid := range ids[i+1:] { if net.GetConn(lid, rid) != nil { connections++ } } } want := n * (n - 1) / 2 if connections != want { t.Errorf("wrong number of connections, got: %v, want: %v", connections, want) } } func VerifyStar(t *testing.T, net *Network, ids []enode.ID, centerIndex int) { t.Helper() n := len(ids) for i := 0; i < n; i++ { for j := i + 1; j < n; j++ { c := net.GetConn(ids[i], ids[j]) if i == centerIndex || j == centerIndex { if c == nil { t.Errorf("nodes %v and %v are not connected, but they should be", i, j) } } else { if c != nil { t.Errorf("nodes %v and %v are connected, but they should not be", i, j) } } } } }
p2p/simulations/test.go
0
https://github.com/ethereum/go-ethereum/commit/d98d70f670297a4bfa86db1a67a9c024f7186f43
[ 0.00017858111823443323, 0.0001713029050733894, 0.00016222684644162655, 0.00017173434025608003, 0.000004785780220117886 ]
{ "id": 0, "code_window": [ "\tcol, _ := s.Next(0)\n", "\treturn col\n", "}\n", "\n", "// TranslateColSet is used to translate a ColSet from one set of column IDs\n", "// to an equivalent set. This is relevant for set operations such as UNION,\n", "// INTERSECT and EXCEPT, and can be used to map a ColSet defined on the left\n", "// relation to an equivalent ColSet on the right relation (or between any two\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "// ToList converts the set to a ColList, in column ID order.\n", "func (s ColSet) ToList() ColList {\n", "\tres := make(ColList, 0, s.Len())\n", "\ts.ForEach(func(x ColumnID) {\n", "\t\tres = append(res, x)\n", "\t})\n", "\treturn res\n", "}\n", "\n" ], "file_path": "pkg/sql/opt/colset.go", "type": "add", "edit_start_line_idx": 101 }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package xform import ( "sort" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" "github.com/cockroachdb/cockroach/pkg/sql/opt/invertedexpr" "github.com/cockroachdb/cockroach/pkg/sql/opt/invertedidx" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" ) // IsLocking returns true if the ScanPrivate is configured to use a row-level // locking mode. This can be the case either because the Scan is in the scope of // a SELECT .. FOR [KEY] UPDATE/SHARE clause or because the Scan was configured // as part of the row retrieval of a DELETE or UPDATE statement. func (c *CustomFuncs) IsLocking(scan *memo.ScanPrivate) bool { return scan.IsLocking() } // GeneratePartialIndexScans generates unconstrained index scans over all // non-inverted, partial indexes with predicates that are implied by the // filters. Partial indexes with predicates which cannot be proven to be implied // by the filters are disregarded. // // When a filter completely matches the predicate, the remaining filters are // simplified so that they do not include the filter. A redundant filter is // unnecessary to include in the remaining filters because a scan over the partial // index implicitly filters the results. // // For every partial index that is implied by the filters, a Scan will be // generated along with a combination of an IndexJoin and Selects. There are // three questions to consider which determine which operators are generated. // // 1. Does the index "cover" the columns needed? // 2. Are there any remaining filters to apply after the Scan? // 3. If there are remaining filters does the index cover the referenced // columns? // // If the index covers the columns needed, no IndexJoin is need. The two // possible generated expressions are either a lone Scan or a Scan wrapped in a // Select that applies any remaining filters. // // (Scan $scanDef) // // (Select (Scan $scanDef) $remainingFilters) // // If the index is not covering, then an IndexJoin is required to retrieve the // needed columns. Some or all of the remaining filters may be required to be // applied after the IndexJoin, because they reference columns not covered by // the index. Therefore, Selects can be constructed before, after, or both // before and after the IndexJoin depending on the columns referenced in the // remaining filters. // // If the index is not covering, then an IndexJoin is required to retrieve the // needed columns. Some of the remaining filters may be applied in a Select // before the IndexJoin, if all the columns referenced in the filter are covered // by the index. Some of the remaining filters may be applied in a Select after // the IndexJoin, if their columns are not covered. Therefore, Selects can be // constructed before, after, or both before and after the IndexJoin. // // (IndexJoin (Scan $scanDef) $indexJoinDef) // // (IndexJoin // (Select (Scan $scanDef) $remainingFilters) // $indexJoinDef // ) // // (Select // (IndexJoin (Scan $scanDef) $indexJoinDef) // $outerFilter // ) // // (Select // (IndexJoin // (Select (Scan $scanDef) $innerFilter) // $indexJoinDef // ) // $outerFilter // ) // func (c *CustomFuncs) GeneratePartialIndexScans( grp memo.RelExpr, scanPrivate *memo.ScanPrivate, filters memo.FiltersExpr, ) { // Iterate over all partial indexes. var iter scanIndexIter iter.Init(c.e.mem, &c.im, scanPrivate, filters, rejectNonPartialIndexes|rejectInvertedIndexes) iter.ForEach(func(index cat.Index, remainingFilters memo.FiltersExpr, indexCols opt.ColSet, isCovering bool) { var sb indexScanBuilder sb.init(c, scanPrivate.Table) newScanPrivate := *scanPrivate newScanPrivate.Index = index.Ordinal() // If index is covering, just add a Select with the remaining filters, // if there are any. if isCovering { sb.setScan(&newScanPrivate) sb.addSelect(remainingFilters) sb.build(grp) return } // If the index is not covering, scan the needed index columns plus // primary key columns. newScanPrivate.Cols = indexCols.Intersection(scanPrivate.Cols) newScanPrivate.Cols.UnionWith(sb.primaryKeyCols()) sb.setScan(&newScanPrivate) // Add a Select with any remaining filters that can be filtered before // the IndexJoin. If there are no remaining filters this is a no-op. If // all or parts of the remaining filters cannot be applied until after // the IndexJoin, the new value of remainingFilters will contain those // filters. remainingFilters = sb.addSelectAfterSplit(remainingFilters, newScanPrivate.Cols) // Add an IndexJoin to retrieve the columns not provided by the Scan. sb.addIndexJoin(scanPrivate.Cols) // Add a Select with any remaining filters. sb.addSelect(remainingFilters) sb.build(grp) }) } // GenerateConstrainedScans enumerates all non-inverted secondary indexes on the // Scan operator's table and tries to push the given Select filter into new // constrained Scan operators using those indexes. Since this only needs to be // done once per table, GenerateConstrainedScans should only be called on the // original unaltered primary index Scan operator (i.e. not constrained or // limited). // // For each secondary index that "covers" the columns needed by the scan, there // are three cases: // // - a filter that can be completely converted to a constraint over that index // generates a single constrained Scan operator (to be added to the same // group as the original Select operator): // // (Scan $scanDef) // // - a filter that can be partially converted to a constraint over that index // generates a constrained Scan operator in a new memo group, wrapped in a // Select operator having the remaining filter (to be added to the same group // as the original Select operator): // // (Select (Scan $scanDef) $filter) // // - a filter that cannot be converted to a constraint generates nothing // // And for a secondary index that does not cover the needed columns: // // - a filter that can be completely converted to a constraint over that index // generates a single constrained Scan operator in a new memo group, wrapped // in an IndexJoin operator that looks up the remaining needed columns (and // is added to the same group as the original Select operator) // // (IndexJoin (Scan $scanDef) $indexJoinDef) // // - a filter that can be partially converted to a constraint over that index // generates a constrained Scan operator in a new memo group, wrapped in an // IndexJoin operator that looks up the remaining needed columns; the // remaining filter is distributed above and/or below the IndexJoin, // depending on which columns it references: // // (IndexJoin // (Select (Scan $scanDef) $filter) // $indexJoinDef // ) // // (Select // (IndexJoin (Scan $scanDef) $indexJoinDef) // $filter // ) // // (Select // (IndexJoin // (Select (Scan $scanDef) $innerFilter) // $indexJoinDef // ) // $outerFilter // ) // // GenerateConstrainedScans will further constrain the enumerated index scans // by trying to use the check constraints and computed columns that apply to the // table being scanned, as well as the partitioning defined for the index. See // comments above checkColumnFilters, computedColFilters, and // partitionValuesFilters for more detail. func (c *CustomFuncs) GenerateConstrainedScans( grp memo.RelExpr, scanPrivate *memo.ScanPrivate, explicitFilters memo.FiltersExpr, ) { var sb indexScanBuilder sb.init(c, scanPrivate.Table) // Generate implicit filters from constraints and computed columns as // optional filters to help constrain an index scan. optionalFilters := c.checkConstraintFilters(scanPrivate.Table) computedColFilters := c.computedColFilters(scanPrivate.Table, explicitFilters, optionalFilters) optionalFilters = append(optionalFilters, computedColFilters...) filterColumns := c.FilterOuterCols(explicitFilters) filterColumns.UnionWith(c.FilterOuterCols(optionalFilters)) // Iterate over all non-inverted indexes. md := c.e.mem.Metadata() tabMeta := md.TableMeta(scanPrivate.Table) var iter scanIndexIter iter.Init(c.e.mem, &c.im, scanPrivate, explicitFilters, rejectInvertedIndexes) iter.ForEach(func(index cat.Index, filters memo.FiltersExpr, indexCols opt.ColSet, isCovering bool) { // We only consider the partition values when a particular index can otherwise // not be constrained. For indexes that are constrained, the partitioned values // add no benefit as they don't really constrain anything. // Furthermore, if the filters don't take advantage of the index (use any of the // index columns), using the partition values add no benefit. // // If the index is partitioned (by list), we generate two constraints and // union them: the "main" constraint and the "in-between" constraint.The // "main" constraint restricts the index to the known partition ranges. The // "in-between" constraint restricts the index to the rest of the ranges // (i.e. everything that falls in-between the main ranges); the in-between // constraint is necessary for correctness (there can be rows outside of the // partitioned ranges). // // For both constraints, the partition-related filters are passed as // "optional" which guarantees that they return no remaining filters. This // allows us to merge the remaining filters from both constraints. // // Consider the following index and its partition: // // CREATE INDEX orders_by_seq_num // ON orders (region, seq_num, id) // STORING (total) // PARTITION BY LIST (region) // ( // PARTITION us_east1 VALUES IN ('us-east1'), // PARTITION us_west1 VALUES IN ('us-west1'), // PARTITION europe_west2 VALUES IN ('europe-west2') // ) // // The constraint generated for the query: // SELECT sum(total) FROM orders WHERE seq_num >= 100 AND seq_num < 200 // is: // [/'europe-west2'/100 - /'europe-west2'/199] // [/'us-east1'/100 - /'us-east1'/199] // [/'us-west1'/100 - /'us-west1'/199] // // The spans before europe-west2, after us-west1 and in between the defined // partitions are missing. We must add these spans now, appropriately // constrained using the filters. // // It is important that we add these spans after the partition spans are generated // because otherwise these spans would merge with the partition spans and would // disallow the partition spans (and the in between ones) to be constrained further. // Using the partitioning example and the query above, if we added the in between // spans at the same time as the partitioned ones, we would end up with a span that // looked like: // [ - /'europe-west2'/99] // // Allowing the partition spans to be constrained further and then adding // the spans give us a more constrained index scan as shown below: // [ - /'europe-west2') // [/'europe-west2'/100 - /'europe-west2'/199] // [/e'europe-west2\x00'/100 - /'us-east1') // [/'us-east1'/100 - /'us-east1'/199] // [/e'us-east1\x00'/100 - /'us-west1') // [/'us-west1'/100 - /'us-west1'/199] // [/e'us-west1\x00'/100 - ] // // Notice how we 'skip' all the europe-west2 rows with seq_num < 100. // var partitionFilters, inBetweenFilters memo.FiltersExpr indexColumns := tabMeta.IndexKeyColumns(index.Ordinal()) firstIndexCol := scanPrivate.Table.IndexColumnID(index, 0) if !filterColumns.Contains(firstIndexCol) && indexColumns.Intersects(filterColumns) { // Calculate any partition filters if appropriate (see below). partitionFilters, inBetweenFilters = c.partitionValuesFilters(scanPrivate.Table, index) } // Check whether the filter (along with any partitioning filters) can constrain the index. constraint, remainingFilters, ok := c.tryConstrainIndex( filters, append(optionalFilters, partitionFilters...), scanPrivate.Table, index.Ordinal(), false, /* isInverted */ ) if !ok { return } if len(partitionFilters) > 0 { inBetweenConstraint, inBetweenRemainingFilters, ok := c.tryConstrainIndex( filters, append(optionalFilters, inBetweenFilters...), scanPrivate.Table, index.Ordinal(), false, /* isInverted */ ) if !ok { panic(errors.AssertionFailedf("in-between filters didn't yield a constraint")) } constraint.UnionWith(c.e.evalCtx, inBetweenConstraint) // Even though the partitioned constraints and the inBetween constraints // were consolidated, we must make sure their Union is as well. constraint.ConsolidateSpans(c.e.evalCtx) // Add all remaining filters that need to be present in the // inBetween spans. Some of the remaining filters are common // between them, so we must deduplicate them. remainingFilters = c.ConcatFilters(remainingFilters, inBetweenRemainingFilters) remainingFilters.Sort() remainingFilters.Deduplicate() } // Construct new constrained ScanPrivate. newScanPrivate := *scanPrivate newScanPrivate.Index = index.Ordinal() newScanPrivate.Constraint = constraint // Record whether we were able to use partitions to constrain the scan. newScanPrivate.PartitionConstrainedScan = (len(partitionFilters) > 0) // If the alternate index includes the set of needed columns, then construct // a new Scan operator using that index. if isCovering { sb.setScan(&newScanPrivate) // If there are remaining filters, then the constrained Scan operator // will be created in a new group, and a Select operator will be added // to the same group as the original operator. sb.addSelect(remainingFilters) sb.build(grp) return } // Otherwise, construct an IndexJoin operator that provides the columns // missing from the index. if scanPrivate.Flags.NoIndexJoin { return } // Scan whatever columns we need which are available from the index, plus // the PK columns. newScanPrivate.Cols = indexCols.Intersection(scanPrivate.Cols) newScanPrivate.Cols.UnionWith(sb.primaryKeyCols()) sb.setScan(&newScanPrivate) // If remaining filter exists, split it into one part that can be pushed // below the IndexJoin, and one part that needs to stay above. remainingFilters = sb.addSelectAfterSplit(remainingFilters, newScanPrivate.Cols) sb.addIndexJoin(scanPrivate.Cols) sb.addSelect(remainingFilters) sb.build(grp) }) } // findConstantFilterCols adds to constFilterCols mappings from table column ID // to the constant value of that column. It does this by iterating over the // given lists of filters and finding expressions that constrain columns to a // single constant value. For example: // // x = 5 AND y = 'foo' // // This would add a mapping from x => 5 and y => 'foo', which constants can // then be used to prove that dependent computed columns are also constant. func (c *CustomFuncs) findConstantFilterCols( constFilterCols map[opt.ColumnID]opt.ScalarExpr, tabID opt.TableID, filters memo.FiltersExpr, ) { tab := c.e.mem.Metadata().Table(tabID) for i := range filters { // If filter constraints are not tight, then no way to derive constant // values. props := filters[i].ScalarProps() if !props.TightConstraints { continue } // Iterate over constraint conjuncts with a single column and single // span having a single key. for i, n := 0, props.Constraints.Length(); i < n; i++ { cons := props.Constraints.Constraint(i) if cons.Columns.Count() != 1 || cons.Spans.Count() != 1 { continue } // Skip columns with a data type that uses a composite key encoding. // Each of these data types can have multiple distinct values that // compare equal. For example, 0 == -0 for the FLOAT data type. It's // not safe to treat these as constant inputs to computed columns, // since the computed expression may differentiate between the // different forms of the same value. colID := cons.Columns.Get(0).ID() colTyp := tab.Column(tabID.ColumnOrdinal(colID)).DatumType() if colinfo.HasCompositeKeyEncoding(colTyp) { continue } span := cons.Spans.Get(0) if !span.HasSingleKey(c.e.evalCtx) { continue } datum := span.StartKey().Value(0) if datum != tree.DNull { constFilterCols[colID] = c.e.f.ConstructConstVal(datum, colTyp) } } } } // tryFoldComputedCol tries to reduce the computed column with the given column // ID into a constant value, by evaluating it with respect to a set of other // columns that are constant. If the computed column is constant, enter it into // the constCols map and return false. Otherwise, return false. func (c *CustomFuncs) tryFoldComputedCol( tabMeta *opt.TableMeta, computedColID opt.ColumnID, constCols map[opt.ColumnID]opt.ScalarExpr, ) bool { // Check whether computed column has already been folded. if _, ok := constCols[computedColID]; ok { return true } var replace func(e opt.Expr) opt.Expr replace = func(e opt.Expr) opt.Expr { if variable, ok := e.(*memo.VariableExpr); ok { // Can variable be folded? if constVal, ok := constCols[variable.Col]; ok { // Yes, so replace it with its constant value. return constVal } // No, but that may be because the variable refers to a dependent // computed column. In that case, try to recursively fold that // computed column. There are no infinite loops possible because the // dependency graph is guaranteed to be acyclic. if _, ok := tabMeta.ComputedCols[variable.Col]; ok { if c.tryFoldComputedCol(tabMeta, variable.Col, constCols) { return constCols[variable.Col] } } return e } return c.e.f.Replace(e, replace) } computedCol := tabMeta.ComputedCols[computedColID] replaced := replace(computedCol).(opt.ScalarExpr) // If the computed column is constant, enter it into the constCols map. if opt.IsConstValueOp(replaced) { constCols[computedColID] = replaced return true } return false } // inBetweenFilters returns a set of filters that are required to cover all the // in-between spans given a set of partition values. This is required for // correctness reasons; although values are unlikely to exist between defined // partitions, they may exist and so the constraints of the scan must incorporate // these spans. func (c *CustomFuncs) inBetweenFilters( tabID opt.TableID, index cat.Index, partitionValues []tree.Datums, ) memo.FiltersExpr { var inBetween memo.ScalarListExpr if len(partitionValues) == 0 { return memo.EmptyFiltersExpr } // Sort the partitionValues lexicographically. sort.Slice(partitionValues, func(i, j int) bool { return partitionValues[i].Compare(c.e.evalCtx, partitionValues[j]) < 0 }) // Add the beginning span. beginExpr := c.columnComparison(tabID, index, partitionValues[0], -1) inBetween = append(inBetween, beginExpr) // Add the end span. endExpr := c.columnComparison(tabID, index, partitionValues[len(partitionValues)-1], 1) inBetween = append(inBetween, endExpr) // Add the in-between spans. for i := 1; i < len(partitionValues); i++ { lowerPartition := partitionValues[i-1] higherPartition := partitionValues[i] // The between spans will be greater than the lower partition but smaller // than the higher partition. var largerThanLower opt.ScalarExpr if c.isPrefixOf(lowerPartition, higherPartition) { // Since the lower partition is a prefix of the higher partition, the span // must begin with the values defined in the lower partition. Consider the // partitions ('us') and ('us', 'cali'). In this case the in-between span // should be [/'us - /'us'/'cali'). largerThanLower = c.columnComparison(tabID, index, lowerPartition, 0) } else { largerThanLower = c.columnComparison(tabID, index, lowerPartition, 1) } smallerThanHigher := c.columnComparison(tabID, index, higherPartition, -1) // Add the in-between span to the list of inBetween spans. betweenExpr := c.e.f.ConstructAnd(largerThanLower, smallerThanHigher) inBetween = append(inBetween, betweenExpr) } // Return an Or expression between all the expressions. return memo.FiltersExpr{c.e.f.ConstructFiltersItem(c.constructOr(inBetween))} } // constructOr constructs an expression that is an OR between all the // provided conditions func (c *CustomFuncs) constructOr(conditions memo.ScalarListExpr) opt.ScalarExpr { if len(conditions) == 0 { return c.e.f.ConstructFalse() } orExpr := conditions[0] for i := 1; i < len(conditions); i++ { orExpr = c.e.f.ConstructOr(conditions[i], orExpr) } return orExpr } // columnComparison returns a filter that compares the index columns to the // given values. The comp parameter can be -1, 0 or 1 to indicate whether the // comparison type of the filter should be a Lt, Eq or Gt. func (c *CustomFuncs) columnComparison( tabID opt.TableID, index cat.Index, values tree.Datums, comp int, ) opt.ScalarExpr { colTypes := make([]*types.T, len(values)) for i := range values { colTypes[i] = values[i].ResolvedType() } columnVariables := make(memo.ScalarListExpr, len(values)) scalarValues := make(memo.ScalarListExpr, len(values)) for i, val := range values { colID := tabID.IndexColumnID(index, i) columnVariables[i] = c.e.f.ConstructVariable(colID) scalarValues[i] = c.e.f.ConstructConstVal(val, val.ResolvedType()) } colsTuple := c.e.f.ConstructTuple(columnVariables, types.MakeTuple(colTypes)) valsTuple := c.e.f.ConstructTuple(scalarValues, types.MakeTuple(colTypes)) if comp == 0 { return c.e.f.ConstructEq(colsTuple, valsTuple) } else if comp > 0 { return c.e.f.ConstructGt(colsTuple, valsTuple) } return c.e.f.ConstructLt(colsTuple, valsTuple) } // inPartitionFilters returns a FiltersExpr that is required to cover // all the partition spans. For each partition defined, inPartitionFilters // will contain a FilterItem that restricts the index columns by // the partition values. Use inBetweenFilters to generate filters that // cover all the spans that the partitions don't cover. func (c *CustomFuncs) inPartitionFilters( tabID opt.TableID, index cat.Index, partitionValues []tree.Datums, ) memo.FiltersExpr { var partitions memo.ScalarListExpr // Sort the partition values so the most selective ones are first. sort.Slice(partitionValues, func(i, j int) bool { return len(partitionValues[i]) >= len(partitionValues[j]) }) // Construct all the partition filters. for i, partition := range partitionValues { // Only add this partition if a more selective partition hasn't // been defined on the same partition. partitionSeen := false for j, moreSelectivePartition := range partitionValues { if j >= i { break } // At this point we know whether the current partition was seen before. partitionSeen = c.isPrefixOf(partition, moreSelectivePartition) if partitionSeen { break } } // This partition is a prefix of a more selective partition and so, // will be taken care of by the in-between partitions. if partitionSeen { continue } // Get an expression that restricts the values of the index to the // partition values. inPartition := c.columnComparison(tabID, index, partition, 0) partitions = append(partitions, inPartition) } // Return an Or expression between all the expressions. return memo.FiltersExpr{c.e.f.ConstructFiltersItem(c.constructOr(partitions))} } // isPrefixOf returns whether pre is a prefix of other. func (c *CustomFuncs) isPrefixOf(pre []tree.Datum, other []tree.Datum) bool { if len(pre) > len(other) { // Pre can't be a prefix of other as it is larger. return false } for i := range pre { if pre[i].Compare(c.e.evalCtx, other[i]) != 0 { return false } } return true } // partitionValuesFilters constructs filters with the purpose of // constraining an index scan using the partition values similar to // the filters added from the check constraints (see // checkConstraintFilters). It returns two sets of filters, one to // create the partition spans, and one to create the spans for all // the in between ranges that are not part of any partitions. // // For example consider the following table and partitioned index: // // CREATE TABLE orders ( // region STRING NOT NULL, id INT8 NOT NULL, total DECIMAL NOT NULL, seq_num INT NOT NULL, // PRIMARY KEY (region, id) // ) // // CREATE INDEX orders_by_seq_num // ON orders (region, seq_num, id) // STORING (total) // PARTITION BY LIST (region) // ( // PARTITION us_east1 VALUES IN ('us-east1'), // PARTITION us_west1 VALUES IN ('us-west1'), // PARTITION europe_west2 VALUES IN ('europe-west2') // ) // // Now consider the following query: // SELECT sum(total) FROM orders WHERE seq_num >= 100 AND seq_num < 200 // // Normally, the index would not be utilized but because we know what the // partition values are for the prefix of the index, we can generate // filters that allow us to use the index (adding the appropriate in-between // filters to catch all the values that are not part of the partitions). // By doing so, we get the following plan: // scalar-group-by // ├── select // │ ├── scan orders@orders_by_seq_num // │ │ └── constraint: /1/4/2: [ - /'europe-west2') // │ │ [/'europe-west2'/100 - /'europe-west2'/199] // │ │ [/e'europe-west2\x00'/100 - /'us-east1') // │ │ [/'us-east1'/100 - /'us-east1'/199] // │ │ [/e'us-east1\x00'/100 - /'us-west1') // │ │ [/'us-west1'/100 - /'us-west1'/199] // │ │ [/e'us-west1\x00'/100 - ] // │ └── filters // │ └── (seq_num >= 100) AND (seq_num < 200) // └── aggregations // └── sum // └── variable: total // func (c *CustomFuncs) partitionValuesFilters( tabID opt.TableID, index cat.Index, ) (partitionFilter, inBetweenFilter memo.FiltersExpr) { // Find all the partition values partitionValues := index.PartitionByListPrefixes() if len(partitionValues) == 0 { return partitionFilter, inBetweenFilter } // Get the in partition expressions. inPartition := c.inPartitionFilters(tabID, index, partitionValues) // Get the in between expressions. inBetween := c.inBetweenFilters(tabID, index, partitionValues) return inPartition, inBetween } // GenerateInvertedIndexScans enumerates all inverted indexes on the Scan // operator's table and generates an alternate Scan operator for each inverted // index that can service the query. // // The resulting Scan operator is pre-constrained and requires an IndexJoin to // project columns other than the primary key columns. The reason it's pre- // constrained is that we cannot treat an inverted index in the same way as a // regular index, since it does not actually contain the indexed column. func (c *CustomFuncs) GenerateInvertedIndexScans( grp memo.RelExpr, scanPrivate *memo.ScanPrivate, filters memo.FiltersExpr, ) { var sb indexScanBuilder sb.init(c, scanPrivate.Table) // Generate implicit filters from constraints and computed columns as // optional filters to help constrain an index scan. optionalFilters := c.checkConstraintFilters(scanPrivate.Table) computedColFilters := c.computedColFilters(scanPrivate.Table, filters, optionalFilters) optionalFilters = append(optionalFilters, computedColFilters...) // Iterate over all inverted indexes. var iter scanIndexIter iter.Init(c.e.mem, &c.im, scanPrivate, filters, rejectNonInvertedIndexes) iter.ForEach(func(index cat.Index, filters memo.FiltersExpr, indexCols opt.ColSet, isCovering bool) { var spanExpr *invertedexpr.SpanExpression var pfState *invertedexpr.PreFiltererStateForInvertedFilterer var spansToRead invertedexpr.InvertedSpans var constraint *constraint.Constraint var filterOk, constraintOk bool // Check whether the filter can constrain the index. // TODO(rytaft): Unify these two cases so both return a spanExpr. spanExpr, constraint, remainingFilters, pfState, filterOk := invertedidx.TryFilterInvertedIndex( c.e.evalCtx, c.e.f, filters, optionalFilters, scanPrivate.Table, index, ) if filterOk { spansToRead = spanExpr.SpansToRead // Override the filters with remainingFilters. If the index is a // multi-column inverted index, the non-inverted prefix columns are // constrained by the constraint. It may be possible to reduce the // filters if the constraint fully describes some of // sub-expressions. The remainingFilters are the filters that are // not fully expressed by the constraint. // // Consider the example: // // CREATE TABLE t (a INT, b INT, g GEOMETRY, INVERTED INDEX (b, g)) // // SELECT * FROM t WHERE a = 1 AND b = 2 AND ST_Intersects(.., g) // // The constraint would constrain b to [/2 - /2], guaranteeing that // the inverted index scan would only produce rows where (b = 2). // Reapplying the (b = 2) filter after the scan would be // unnecessary, so the remainingFilters in this case would be // (a = 1 AND ST_Intersects(.., g)). filters = remainingFilters } else { constraint, filters, constraintOk = c.tryConstrainIndex( filters, nil, /* optionalFilters */ scanPrivate.Table, index.Ordinal(), true, /* isInverted */ ) if !constraintOk { return } } // Construct new ScanOpDef with the new index and constraint. newScanPrivate := *scanPrivate newScanPrivate.Index = index.Ordinal() newScanPrivate.Constraint = constraint newScanPrivate.InvertedConstraint = spansToRead // We will need an inverted filter above the scan if the spanExpr might // produce duplicate primary keys or requires at least one UNION or // INTERSECTION. In this case, we must scan both the primary key columns // and the inverted key column. needInvertedFilter := spanExpr != nil && (!spanExpr.Unique || spanExpr.Operator != invertedexpr.None) pkCols := sb.primaryKeyCols() newScanPrivate.Cols = pkCols.Copy() var invertedCol opt.ColumnID if needInvertedFilter { invertedCol = scanPrivate.Table.ColumnID(index.VirtualInvertedColumn().Ordinal()) newScanPrivate.Cols.Add(invertedCol) } // The Scan operator always goes in a new group, since it's always nested // underneath the IndexJoin. The IndexJoin may also go into its own group, // if there's a remaining filter above it. // TODO(mgartner): We don't always need to create an index join. The // index join will be removed by EliminateIndexJoinInsideProject, but // it'd be more efficient to not create the index join in the first // place. sb.setScan(&newScanPrivate) // Add an inverted filter if needed. if needInvertedFilter { sb.addInvertedFilter(spanExpr, pfState, invertedCol) } // If remaining filter exists, split it into one part that can be pushed // below the IndexJoin, and one part that needs to stay above. filters = sb.addSelectAfterSplit(filters, pkCols) sb.addIndexJoin(scanPrivate.Cols) sb.addSelect(filters) sb.build(grp) }) } // tryConstrainIndex tries to derive a constraint for the given index from the // specified filter. If a constraint is derived, it is returned along with any // filter remaining after extracting the constraint. If no constraint can be // derived, then tryConstrainIndex returns ok = false. func (c *CustomFuncs) tryConstrainIndex( requiredFilters, optionalFilters memo.FiltersExpr, tabID opt.TableID, indexOrd int, isInverted bool, ) (constraint *constraint.Constraint, remainingFilters memo.FiltersExpr, ok bool) { // Start with fast check to rule out indexes that cannot be constrained. if !isInverted && !c.canMaybeConstrainNonInvertedIndex(requiredFilters, tabID, indexOrd) && !c.canMaybeConstrainNonInvertedIndex(optionalFilters, tabID, indexOrd) { return nil, nil, false } ic := c.initIdxConstraintForIndex(requiredFilters, optionalFilters, tabID, indexOrd, isInverted) constraint = ic.Constraint() if constraint.IsUnconstrained() { return nil, nil, false } // Return 0 if no remaining filter. remaining := ic.RemainingFilters() // Make copy of constraint so that idxconstraint instance is not referenced. copy := *constraint return &copy, remaining, true } // allInvIndexConstraints tries to derive all constraints for the specified inverted // index that can be derived. If no constraint is derived, then it returns ok = false, // similar to tryConstrainIndex. func (c *CustomFuncs) allInvIndexConstraints( filters memo.FiltersExpr, tabID opt.TableID, indexOrd int, ) (constraints []*constraint.Constraint, ok bool) { ic := c.initIdxConstraintForIndex(filters, nil /* optionalFilters */, tabID, indexOrd, true /* isInverted */) constraints, err := ic.AllInvertedIndexConstraints() if err != nil { return nil, false } // As long as there was no error, AllInvertedIndexConstraints is guaranteed // to add at least one constraint to the slice. It will be set to // unconstrained if no constraints could be derived for this index. constraint := constraints[0] if constraint.IsUnconstrained() { return constraints, false } return constraints, true } // canMaybeConstrainNonInvertedIndex returns true if we should try to constrain // a given non-inverted index by the given filter. It returns false if it is // impossible for the filter can constrain the scan. // // If any of the three following statements are true, then it is // possible that the index can be constrained: // // 1. The filter references the first index column. // 2. The constraints are not tight (see props.Scalar.TightConstraints). // 3. Any of the filter's constraints start with the first index column. // func (c *CustomFuncs) canMaybeConstrainNonInvertedIndex( filters memo.FiltersExpr, tabID opt.TableID, indexOrd int, ) bool { md := c.e.mem.Metadata() index := md.Table(tabID).Index(indexOrd) for i := range filters { filterProps := filters[i].ScalarProps() // If the filter involves the first index column, then the index can // possibly be constrained. firstIndexCol := tabID.IndexColumnID(index, 0) if filterProps.OuterCols.Contains(firstIndexCol) { return true } // If the constraints are not tight, then the index can possibly be // constrained, because index constraint generation supports more // expressions than filter constraint generation. if !filterProps.TightConstraints { return true } // If any constraint involves the first index column, then the index can // possibly be constrained. cset := filterProps.Constraints for i := 0; i < cset.Length(); i++ { firstCol := cset.Constraint(i).Columns.Get(0).ID() if firstCol == firstIndexCol { return true } } } return false } // GenerateZigzagJoins generates zigzag joins for all pairs of indexes of the // Scan table which contain one of the constant columns in the FiltersExpr as // its prefix. // // Similar to the lookup join, if the selected index pair does not contain // all the columns in the output of the scan, we wrap the zigzag join // in another index join (implemented as a lookup join) on the primary index. // The index join is implemented with a lookup join since the index join does // not support arbitrary input sources that are not plain index scans. func (c *CustomFuncs) GenerateZigzagJoins( grp memo.RelExpr, scanPrivate *memo.ScanPrivate, filters memo.FiltersExpr, ) { tab := c.e.mem.Metadata().Table(scanPrivate.Table) // Short circuit unless zigzag joins are explicitly enabled. if !c.e.evalCtx.SessionData.ZigzagJoinEnabled { return } fixedCols := memo.ExtractConstColumns(filters, c.e.evalCtx) if fixedCols.Len() == 0 { // Zigzagging isn't helpful in the absence of fixed columns. return } // Zigzag joins aren't currently equipped to produce system columns, so // don't generate any if some system columns are requested. foundSystemCol := false scanPrivate.Cols.ForEach(func(colID opt.ColumnID) { if tab.Column(scanPrivate.Table.ColumnOrdinal(colID)).Kind() == cat.System { foundSystemCol = true } }) if foundSystemCol { return } // Iterate through indexes, looking for those prefixed with fixedEq cols. // Efficiently finding a set of indexes that make the most efficient zigzag // join, with no limit on the number of indexes selected, is an instance of // this NP-hard problem: // https://en.wikipedia.org/wiki/Maximum_coverage_problem // // A formal definition would be: Suppose we have a set of fixed columns F // (defined as fixedCols in the code above), and a set of indexes I. The // "fixed prefix" of every index, in this context, refers to the longest // prefix of each index's columns that is in F. In other words, we stop // adding to the prefix when we come across the first non-fixed column // in an index. // // We want to find at most k = 2 indexes from I (in the future k could be // >= 2 when the zigzag joiner supports 2+ index zigzag joins) that cover // the maximum number of columns in F. An index is defined to have covered // a column if that column is in the index's fixed prefix. // // Since only 2-way zigzag joins are currently supported, the naive // approach is bounded at n^2. For now, just do that - a quadratic // iteration through all indexes. // // TODO(itsbilal): Implement the greedy or weighted version of the // algorithm laid out here: // https://en.wikipedia.org/wiki/Maximum_coverage_problem // // TODO(mgartner): We should consider primary indexes when it has multiple // columns and only the first is being constrained. var iter scanIndexIter iter.Init(c.e.mem, &c.im, scanPrivate, filters, rejectPrimaryIndex|rejectInvertedIndexes) iter.ForEach(func(leftIndex cat.Index, outerFilters memo.FiltersExpr, leftCols opt.ColSet, _ bool) { leftFixed := c.indexConstrainedCols(leftIndex, scanPrivate.Table, fixedCols) // Short-circuit quickly if the first column in the index is not a fixed // column. if leftFixed.Len() == 0 { return } var iter2 scanIndexIter iter2.Init(c.e.mem, &c.im, scanPrivate, outerFilters, rejectPrimaryIndex|rejectInvertedIndexes) iter2.SetOriginalFilters(filters) iter2.ForEachStartingAfter(leftIndex.Ordinal(), func(rightIndex cat.Index, innerFilters memo.FiltersExpr, rightCols opt.ColSet, _ bool) { rightFixed := c.indexConstrainedCols(rightIndex, scanPrivate.Table, fixedCols) // If neither side contributes a fixed column not contributed by the // other, then there's no reason to zigzag on this pair of indexes. if leftFixed.SubsetOf(rightFixed) || rightFixed.SubsetOf(leftFixed) { return } // Columns that are in both indexes are, by definition, equal. eqCols := leftCols.Intersection(rightCols) eqCols.DifferenceWith(fixedCols) if eqCols.Len() == 0 { // A simple index join is more efficient in such cases. return } // If there are any equalities across the columns of the two indexes, // push them into the zigzag join spec. leftEq, rightEq := memo.ExtractJoinEqualityColumns( leftCols, rightCols, innerFilters, ) leftEqCols, rightEqCols := eqColsForZigzag( tab, scanPrivate.Table, leftIndex, rightIndex, fixedCols, leftEq, rightEq, ) if len(leftEqCols) == 0 || len(rightEqCols) == 0 { // One of the indexes is not sorted by any of the equality // columns, because the equality columns do not immediately // succeed the fixed columns. A zigzag join cannot be planned. return } // Confirm the primary key columns are in both leftEqCols and // rightEqCols. The conversion of a select with filters to a // zigzag join requires the primary key columns to be in the output // for output correctness; otherwise, we could be outputting more // results than there should be (due to an equality on a non-unique // non-required value). pkIndex := tab.Index(cat.PrimaryIndex) pkCols := make(opt.ColList, pkIndex.KeyColumnCount()) pkColsFound := true for i := range pkCols { pkCols[i] = scanPrivate.Table.IndexColumnID(pkIndex, i) if _, ok := leftEqCols.Find(pkCols[i]); !ok { pkColsFound = false break } if _, ok := rightEqCols.Find(pkCols[i]); !ok { pkColsFound = false break } } if !pkColsFound { return } leftFixedCols, leftVals, leftTypes := c.fixedColsForZigzag( leftIndex, scanPrivate.Table, innerFilters, ) rightFixedCols, rightVals, rightTypes := c.fixedColsForZigzag( rightIndex, scanPrivate.Table, innerFilters, ) // If the fixed cols have been reduced during partial index // implication, then a zigzag join cannot be planned. A single index // scan should be more efficient. if len(leftFixedCols) != leftFixed.Len() || len(rightFixedCols) != rightFixed.Len() { return } zigzagJoin := memo.ZigzagJoinExpr{ On: innerFilters, ZigzagJoinPrivate: memo.ZigzagJoinPrivate{ LeftTable: scanPrivate.Table, LeftIndex: leftIndex.Ordinal(), RightTable: scanPrivate.Table, RightIndex: rightIndex.Ordinal(), LeftEqCols: leftEqCols, RightEqCols: rightEqCols, LeftFixedCols: leftFixedCols, RightFixedCols: rightFixedCols, }, } leftTupleTyp := types.MakeTuple(leftTypes) rightTupleTyp := types.MakeTuple(rightTypes) zigzagJoin.FixedVals = memo.ScalarListExpr{ c.e.f.ConstructTuple(leftVals, leftTupleTyp), c.e.f.ConstructTuple(rightVals, rightTupleTyp), } zigzagJoin.On = memo.ExtractRemainingJoinFilters( innerFilters, zigzagJoin.LeftEqCols, zigzagJoin.RightEqCols, ) zigzagCols := leftCols.Copy() zigzagCols.UnionWith(rightCols) if scanPrivate.Cols.SubsetOf(zigzagCols) { // Case 1 (zigzagged indexes contain all requested columns). zigzagJoin.Cols = scanPrivate.Cols c.e.mem.AddZigzagJoinToGroup(&zigzagJoin, grp) return } if scanPrivate.Flags.NoIndexJoin { return } // Case 2 (wrap zigzag join in an index join). var indexJoin memo.LookupJoinExpr // Ensure the zigzag join returns pk columns. zigzagJoin.Cols = scanPrivate.Cols.Intersection(zigzagCols) for i := range pkCols { zigzagJoin.Cols.Add(pkCols[i]) } if c.FiltersBoundBy(zigzagJoin.On, zigzagCols) { // The ON condition refers only to the columns available in the zigzag // indices. indexJoin.On = memo.TrueFilter } else { // ON has some conditions that are bound by the columns in the index (at // the very least, the equality conditions we used for EqCols and FixedCols), // and some conditions that refer to other table columns. We can put // the former in the lower ZigzagJoin and the latter in the index join. conditions := zigzagJoin.On zigzagJoin.On = c.ExtractBoundConditions(conditions, zigzagCols) indexJoin.On = c.ExtractUnboundConditions(conditions, zigzagCols) } indexJoin.Input = c.e.f.ConstructZigzagJoin( zigzagJoin.On, &zigzagJoin.ZigzagJoinPrivate, ) indexJoin.JoinType = opt.InnerJoinOp indexJoin.Table = scanPrivate.Table indexJoin.Index = cat.PrimaryIndex indexJoin.KeyCols = pkCols indexJoin.Cols = scanPrivate.Cols indexJoin.LookupColsAreTableKey = true // Create the LookupJoin for the index join in the same group as the // original select. c.e.mem.AddLookupJoinToGroup(&indexJoin, grp) }) }) } // eqColsForZigzag is a helper function to generate eqCol lists for the zigzag // joiner. The zigzag joiner requires that the equality columns immediately // follow the fixed columns in the index. Fixed here refers to columns that // have been constrained to a constant value. // // There are two kinds of equality columns that this function takes care of: // columns that have the same ColumnID on both sides (i.e. the same column), // as well as columns that have been equated in some ON filter (i.e. they are // contained in leftEqCols and rightEqCols at the same index). // // This function iterates through all columns of the indexes in order, // skips past the fixed columns, and then generates however many eqCols // there are that meet the above criteria. // // Returns a list of column ordinals for each index. // // See the comment in pkg/sql/rowexec/zigzag_joiner.go for more details // on the role eqCols and fixed cols play in zigzag joins. func eqColsForZigzag( tab cat.Table, tabID opt.TableID, leftIndex cat.Index, rightIndex cat.Index, fixedCols opt.ColSet, leftEqCols opt.ColList, rightEqCols opt.ColList, ) (leftEqPrefix, rightEqPrefix opt.ColList) { leftEqPrefix = make(opt.ColList, 0, len(leftEqCols)) rightEqPrefix = make(opt.ColList, 0, len(rightEqCols)) // We can only zigzag on columns present in the key component of the index, // so use the LaxKeyColumnCount here because that's the longest prefix of the // columns in the index which is guaranteed to exist in the key component. // Using KeyColumnCount is invalid, because if we have a unique index with // nullable columns, the "key columns" include the primary key of the table, // which is only present in the key component if one of the other columns is // NULL. i, leftCnt := 0, leftIndex.LaxKeyColumnCount() j, rightCnt := 0, rightIndex.LaxKeyColumnCount() for ; i < leftCnt; i++ { colID := tabID.IndexColumnID(leftIndex, i) if !fixedCols.Contains(colID) { break } } for ; j < rightCnt; j++ { colID := tabID.IndexColumnID(rightIndex, j) if !fixedCols.Contains(colID) { break } } for i < leftCnt && j < rightCnt { leftColID := tabID.IndexColumnID(leftIndex, i) rightColID := tabID.IndexColumnID(rightIndex, j) i++ j++ if leftColID == rightColID { leftEqPrefix = append(leftEqPrefix, leftColID) rightEqPrefix = append(rightEqPrefix, rightColID) continue } leftIdx, leftOk := leftEqCols.Find(leftColID) rightIdx, rightOk := rightEqCols.Find(rightColID) // If both columns are at the same index in their respective // EqCols lists, they were equated in the filters. if leftOk && rightOk && leftIdx == rightIdx { leftEqPrefix = append(leftEqPrefix, leftColID) rightEqPrefix = append(rightEqPrefix, rightColID) continue } else { // We've reached the first non-equal column; the zigzag // joiner does not support non-contiguous/non-prefix equal // columns. break } } return leftEqPrefix, rightEqPrefix } // fixedColsForZigzag is a helper function to generate FixedCols lists for the // zigzag join expression. This function iterates through the columns of the // specified index in order until it comes across the first column ID that is // not constrained to a constant. func (c *CustomFuncs) fixedColsForZigzag( index cat.Index, tabID opt.TableID, filters memo.FiltersExpr, ) (fixedCols opt.ColList, vals memo.ScalarListExpr, typs []*types.T) { for i, cnt := 0, index.ColumnCount(); i < cnt; i++ { colID := tabID.IndexColumnID(index, i) val := memo.ExtractValueForConstColumn(filters, c.e.evalCtx, colID) if val == nil { break } if vals == nil { vals = make(memo.ScalarListExpr, 0, cnt-i) typs = make([]*types.T, 0, cnt-i) fixedCols = make(opt.ColList, 0, cnt-i) } dt := val.ResolvedType() vals = append(vals, c.e.f.ConstructConstVal(val, dt)) typs = append(typs, dt) fixedCols = append(fixedCols, colID) } return fixedCols, vals, typs } // indexConstrainedCols computes the set of columns in allFixedCols which form // a prefix of the key columns in idx. func (c *CustomFuncs) indexConstrainedCols( idx cat.Index, tab opt.TableID, allFixedCols opt.ColSet, ) opt.ColSet { var constrained opt.ColSet for i, n := 0, idx.ColumnCount(); i < n; i++ { col := tab.IndexColumnID(idx, i) if allFixedCols.Contains(col) { constrained.Add(col) } else { break } } return constrained } // GenerateInvertedIndexZigzagJoins generates zigzag joins for constraints on // inverted index. It looks for cases where one inverted index can satisfy // two constraints, and it produces zigzag joins with the same index on both // sides of the zigzag join for those cases, fixed on different constant values. func (c *CustomFuncs) GenerateInvertedIndexZigzagJoins( grp memo.RelExpr, scanPrivate *memo.ScanPrivate, filters memo.FiltersExpr, ) { // Short circuit unless zigzag joins are explicitly enabled. if !c.e.evalCtx.SessionData.ZigzagJoinEnabled { return } var sb indexScanBuilder sb.init(c, scanPrivate.Table) // Iterate over all inverted indexes. var iter scanIndexIter iter.Init(c.e.mem, &c.im, scanPrivate, filters, rejectNonInvertedIndexes) iter.ForEach(func(index cat.Index, filters memo.FiltersExpr, indexCols opt.ColSet, _ bool) { // See if there are two or more constraints that can be satisfied // by this inverted index. This is possible with inverted indexes as // opposed to secondary indexes, because one row in the primary index // can often correspond to multiple rows in an inverted index. This // function generates all constraints it can derive for this index; // not all of which might get used in this function. constraints, ok := c.allInvIndexConstraints( filters, scanPrivate.Table, index.Ordinal(), ) if !ok || len(constraints) < 2 { return } // In theory, we could explore zigzag joins on all constraint pairs. // However, in the absence of stats on inverted indexes, we will not // be able to distinguish more selective constraints from less // selective ones anyway, so just pick the first two constraints. // // TODO(itsbilal): Use the remaining constraints to build a remaining // filters expression, instead of just reusing filters from the scan. constraint := constraints[0] constraint2 := constraints[1] minPrefix := constraint.ExactPrefix(c.e.evalCtx) if otherPrefix := constraint2.ExactPrefix(c.e.evalCtx); otherPrefix < minPrefix { minPrefix = otherPrefix } if minPrefix == 0 { return } zigzagJoin := memo.ZigzagJoinExpr{ On: filters, ZigzagJoinPrivate: memo.ZigzagJoinPrivate{ LeftTable: scanPrivate.Table, LeftIndex: index.Ordinal(), RightTable: scanPrivate.Table, RightIndex: index.Ordinal(), }, } // Get constant values from each constraint. Add them to FixedVals as // tuples, with associated Column IDs in both {Left,Right}FixedCols. leftVals := make(memo.ScalarListExpr, minPrefix) leftTypes := make([]*types.T, minPrefix) rightVals := make(memo.ScalarListExpr, minPrefix) rightTypes := make([]*types.T, minPrefix) zigzagJoin.LeftFixedCols = make(opt.ColList, minPrefix) zigzagJoin.RightFixedCols = make(opt.ColList, minPrefix) for i := 0; i < minPrefix; i++ { leftVal := constraint.Spans.Get(0).StartKey().Value(i) rightVal := constraint2.Spans.Get(0).StartKey().Value(i) leftVals[i] = c.e.f.ConstructConstVal(leftVal, leftVal.ResolvedType()) leftTypes[i] = leftVal.ResolvedType() rightVals[i] = c.e.f.ConstructConstVal(rightVal, rightVal.ResolvedType()) rightTypes[i] = rightVal.ResolvedType() zigzagJoin.LeftFixedCols[i] = constraint.Columns.Get(i).ID() zigzagJoin.RightFixedCols[i] = constraint.Columns.Get(i).ID() } leftTupleTyp := types.MakeTuple(leftTypes) rightTupleTyp := types.MakeTuple(rightTypes) zigzagJoin.FixedVals = memo.ScalarListExpr{ c.e.f.ConstructTuple(leftVals, leftTupleTyp), c.e.f.ConstructTuple(rightVals, rightTupleTyp), } // Set equality columns - all remaining columns after the fixed prefix // need to be equal. eqColLen := index.ColumnCount() - minPrefix zigzagJoin.LeftEqCols = make(opt.ColList, eqColLen) zigzagJoin.RightEqCols = make(opt.ColList, eqColLen) for i := minPrefix; i < index.ColumnCount(); i++ { colID := scanPrivate.Table.IndexColumnID(index, i) zigzagJoin.LeftEqCols[i-minPrefix] = colID zigzagJoin.RightEqCols[i-minPrefix] = colID } zigzagJoin.On = filters // Don't output the first column (i.e. the inverted index's JSON key // col) from the zigzag join. It could contain partial values, so // presenting it in the output or checking ON conditions against // it makes little sense. zigzagCols := indexCols for i, cnt := 0, index.KeyColumnCount(); i < cnt; i++ { colID := scanPrivate.Table.IndexColumnID(index, i) zigzagCols.Remove(colID) } tab := c.e.mem.Metadata().Table(scanPrivate.Table) pkIndex := tab.Index(cat.PrimaryIndex) pkCols := make(opt.ColList, pkIndex.KeyColumnCount()) for i := range pkCols { pkCols[i] = scanPrivate.Table.IndexColumnID(pkIndex, i) // Ensure primary key columns are always retrieved from the zigzag // join. zigzagCols.Add(pkCols[i]) } // Case 1 (zigzagged indexes contain all requested columns). if scanPrivate.Cols.SubsetOf(zigzagCols) { zigzagJoin.Cols = scanPrivate.Cols c.e.mem.AddZigzagJoinToGroup(&zigzagJoin, grp) return } if scanPrivate.Flags.NoIndexJoin { return } // Case 2 (wrap zigzag join in an index join). var indexJoin memo.LookupJoinExpr // Ensure the zigzag join returns pk columns. zigzagJoin.Cols = scanPrivate.Cols.Intersection(zigzagCols) for i := range pkCols { zigzagJoin.Cols.Add(pkCols[i]) } if c.FiltersBoundBy(zigzagJoin.On, zigzagCols) { // The ON condition refers only to the columns available in the zigzag // indices. indexJoin.On = memo.TrueFilter } else { // ON has some conditions that are bound by the columns in the index (at // the very least, the equality conditions we used for EqCols and FixedCols), // and some conditions that refer to other table columns. We can put // the former in the lower ZigzagJoin and the latter in the index join. conditions := zigzagJoin.On zigzagJoin.On = c.ExtractBoundConditions(conditions, zigzagCols) indexJoin.On = c.ExtractUnboundConditions(conditions, zigzagCols) } indexJoin.Input = c.e.f.ConstructZigzagJoin( zigzagJoin.On, &zigzagJoin.ZigzagJoinPrivate, ) indexJoin.JoinType = opt.InnerJoinOp indexJoin.Table = scanPrivate.Table indexJoin.Index = cat.PrimaryIndex indexJoin.KeyCols = pkCols indexJoin.Cols = scanPrivate.Cols indexJoin.LookupColsAreTableKey = true // Create the LookupJoin for the index join in the same group as the // original select. c.e.mem.AddLookupJoinToGroup(&indexJoin, grp) }) } // ExprPair stores a left and right ScalarExpr. ExprPairForSplitDisjunction // returns ExprPair, which can be deconstructed later, to avoid extra // computation in determining the left and right expression groups. type ExprPair struct { left opt.ScalarExpr right opt.ScalarExpr itemToReplace *memo.FiltersItem } // ExprPairLeft returns the left ScalarExpr in an ExprPair. func (c *CustomFuncs) ExprPairLeft(ep ExprPair) opt.ScalarExpr { return ep.left } // ExprPairRight returns the right ScalarExpr in an ExprPair. func (c *CustomFuncs) ExprPairRight(ep ExprPair) opt.ScalarExpr { return ep.right } // ExprPairFiltersItemToReplace returns the original FiltersItem that the // ExprPair was generated from. This FiltersItem should be replaced by // ExprPairLeft and ExprPairRight in the newly generated filters in // SplitDisjunction(AddKey). func (c *CustomFuncs) ExprPairFiltersItemToReplace(ep ExprPair) *memo.FiltersItem { return ep.itemToReplace } // ExprPairSucceeded returns true if the ExprPair is not nil. func (c *CustomFuncs) ExprPairSucceeded(ep ExprPair) bool { return ep != ExprPair{} } // ExprPairForSplitDisjunction finds the first "interesting" ExprPair in the // filters and returns it. If an "interesting" ExprPair is not found, an empty // ExprPair is returned. // // For details on what makes an ExprPair "interesting", see // buildExprPairForSplitDisjunction. func (c *CustomFuncs) ExprPairForSplitDisjunction( sp *memo.ScanPrivate, filters memo.FiltersExpr, ) ExprPair { for i := range filters { if filters[i].Condition.Op() == opt.OrOp { ep := c.buildExprPairForSplitDisjunction(sp, &filters[i]) if (ep != ExprPair{}) { return ep } } } return ExprPair{} } // buildExprPairForSplitDisjunction groups disjuction sub-expressions into an // "interesting" ExprPair. // // An "interesting" ExprPair is one where: // // 1. The column sets of both expressions in the pair are not // equal. // 2. Two index scans can potentially be constrained by both expressions in // the pair. // // Consider the expression: // // u = 1 OR v = 2 // // If an index exists on u and another on v, an "interesting" ExprPair exists, // ("u = 1", "v = 1"). If both indexes do not exist, there is no "interesting" // ExprPair possible. // // Now consider the expression: // // u = 1 OR u = 2 // // There is no possible "interesting" ExprPair here because the left and right // sides of the disjunction share the same columns. // // buildExprPairForSplitDisjunction groups all sub-expressions adjacent to the // input's top-level OrExpr into left and right expression groups. These two // groups form the new filter expressions on the left and right side of the // generated UnionAll in SplitDisjunction(AddKey). // // All sub-expressions with the same columns as the left-most sub-expression // are grouped in the left group. All other sub-expressions are grouped in the // right group. // // buildExprPairForSplitDisjunction returns an empty ExprPair if all // sub-expressions have the same columns. It also returns an empty ExprPair if // either expression in the pair found is not likely to constrain an index // scan. See canMaybeConstrainIndexWithCols for details on how this is // determined. func (c *CustomFuncs) buildExprPairForSplitDisjunction( sp *memo.ScanPrivate, filter *memo.FiltersItem, ) ExprPair { var leftExprs memo.ScalarListExpr var rightExprs memo.ScalarListExpr var leftColSet opt.ColSet var rightColSet opt.ColSet // Traverse all adjacent OrExpr. var collect func(opt.ScalarExpr) collect = func(expr opt.ScalarExpr) { switch t := expr.(type) { case *memo.OrExpr: collect(t.Left) collect(t.Right) return } cols := c.OuterCols(expr) // Set the left-most non-Or expression as the left ColSet to match (or // not match) on. if leftColSet.Empty() { leftColSet = cols } // If the current expression ColSet matches leftColSet, add the expr to // the left group. Otherwise, add it to the right group. if leftColSet.Equals(cols) { leftExprs = append(leftExprs, expr) } else { rightColSet.UnionWith(cols) rightExprs = append(rightExprs, expr) } } collect(filter.Condition) // Return an empty pair if either of the groups is empty or if either the // left or right groups are unlikely to constrain an index scan. if len(leftExprs) == 0 || len(rightExprs) == 0 || !c.canMaybeConstrainIndexWithCols(sp, leftColSet) || !c.canMaybeConstrainIndexWithCols(sp, rightColSet) { return ExprPair{} } return ExprPair{ left: c.constructOr(leftExprs), right: c.constructOr(rightExprs), itemToReplace: filter, } } // canMaybeConstrainIndexWithCols returns true if any indexes on the // ScanPrivate's table could be constrained by cols. It is a fast check for // SplitDisjunction to avoid matching a large number of queries that won't // obviously be improved by the rule. // // canMaybeConstrainIndexWithCols checks for an intersection between the input // columns and an index's columns (both indexed columns and columns referenced // in a partial index predicate). An intersection between column sets implies // that cols could constrain a scan on that index. For example, the columns "a" // would constrain a scan on an index over columns "a, b", because the "a" is a // subset of the index columns. Likewise, the columns "a" and "b" would // constrain a scan on an index over column "a", because "a" and "b" are a // superset of the index columns. // // Notice that this function can return both false positives and false // negatives. As an example of a false negative, consider the following table // and query. // // CREATE TABLE t ( // k PRIMARY KEY, // a INT, // hash INT AS (a % 4) STORED, // INDEX hash (hash) // ) // // SELECT * FROM t WHERE a = 5 // // The expression "a = 5" can constrain a scan over the hash index: The columns // "hash" must be a constant value of 1 because it is dependent on column "a" // with a constant value of 5. However, canMaybeConstrainIndexWithCols will // return false in this case because "a" does not intersect with the index // column, "hash". func (c *CustomFuncs) canMaybeConstrainIndexWithCols( scanPrivate *memo.ScanPrivate, cols opt.ColSet, ) bool { md := c.e.mem.Metadata() tabMeta := md.TableMeta(scanPrivate.Table) // Iterate through all indexes of the table and return true if cols // intersect with the index's key columns. for i := 0; i < tabMeta.Table.IndexCount(); i++ { index := tabMeta.Table.Index(i) for j, n := 0, index.KeyColumnCount(); j < n; j++ { col := index.Column(j) ord := col.Ordinal() if col.Kind() == cat.VirtualInverted { ord = col.InvertedSourceColumnOrdinal() } if cols.Contains(tabMeta.MetaID.ColumnID(ord)) { return true } } // If a partial index's predicate references some of cols, it may be // possible to generate an unconstrained partial index scan, which may // lead to better query plans. if _, isPartialIndex := index.Predicate(); isPartialIndex { p, ok := tabMeta.PartialIndexPredicates[i] if !ok { // A partial index predicate expression was not built for the // partial index. See Builder.buildScan for details on when this // can occur. continue } pred := *p.(*memo.FiltersExpr) if pred.OuterCols().Intersects(cols) { return true } } } return false } // MapScanFilterCols returns a new FiltersExpr with all the src column IDs in // the input expression replaced with column IDs in dst. // // NOTE: Every ColumnID in src must map to the a ColumnID in dst with the same // relative position in the ColSets. For example, if src and dst are (1, 5, 6) // and (7, 12, 15), then the following mapping would be applied: // // 1 => 7 // 5 => 12 // 6 => 15 func (c *CustomFuncs) MapScanFilterCols( filters memo.FiltersExpr, src *memo.ScanPrivate, dst *memo.ScanPrivate, ) memo.FiltersExpr { return c.mapFilterCols(filters, src.Cols, dst.Cols) } // mapFilterCols returns a new FiltersExpr with all the src column IDs in // the input expression replaced with column IDs in dst. // // NOTE: Every ColumnID in src must map to the a ColumnID in dst with the same // relative position in the ColSets. For example, if src and dst are (1, 5, 6) // and (7, 12, 15), then the following mapping would be applied: // // 1 => 7 // 5 => 12 // 6 => 15 func (c *CustomFuncs) mapFilterCols( filters memo.FiltersExpr, src, dst opt.ColSet, ) memo.FiltersExpr { if src.Len() != dst.Len() { panic(errors.AssertionFailedf( "src and dst must have the same number of columns, src: %v, dst: %v", src, dst, )) } // Map each column in src to a column in dst based on the relative position // of both the src and dst ColumnIDs in the ColSet. var colMap opt.ColMap dstCol, _ := dst.Next(0) for srcCol, ok := src.Next(0); ok; srcCol, ok = src.Next(srcCol + 1) { colMap.Set(int(srcCol), int(dstCol)) dstCol, _ = dst.Next(dstCol + 1) } newFilters := c.RemapCols(&filters, colMap).(*memo.FiltersExpr) return *newFilters } // MakeSetPrivateForSplitDisjunction constructs a new SetPrivate with column sets // from the left and right ScanPrivate. We use the same ColList for the // LeftCols and OutCols of the SetPrivate because we've used the original // ScanPrivate column IDs for the left ScanPrivate and those are safe to use as // output column IDs of the Union expression. func (c *CustomFuncs) MakeSetPrivateForSplitDisjunction( left, right *memo.ScanPrivate, ) *memo.SetPrivate { leftAndOutCols := opt.ColSetToList(left.Cols) return &memo.SetPrivate{ LeftCols: leftAndOutCols, RightCols: opt.ColSetToList(right.Cols), OutCols: leftAndOutCols, } } // AddPrimaryKeyColsToScanPrivate creates a new ScanPrivate that is the same as // the input ScanPrivate, but has primary keys added to the ColSet. func (c *CustomFuncs) AddPrimaryKeyColsToScanPrivate(sp *memo.ScanPrivate) *memo.ScanPrivate { keyCols := c.PrimaryKeyCols(sp.Table) return &memo.ScanPrivate{ Table: sp.Table, Cols: sp.Cols.Union(keyCols), Flags: sp.Flags, Locking: sp.Locking, } }
pkg/sql/opt/xform/select_funcs.go
1
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.1342889964580536, 0.002882967237383127, 0.0001624215510673821, 0.0002982948790304363, 0.01221084501594305 ]
{ "id": 0, "code_window": [ "\tcol, _ := s.Next(0)\n", "\treturn col\n", "}\n", "\n", "// TranslateColSet is used to translate a ColSet from one set of column IDs\n", "// to an equivalent set. This is relevant for set operations such as UNION,\n", "// INTERSECT and EXCEPT, and can be used to map a ColSet defined on the left\n", "// relation to an equivalent ColSet on the right relation (or between any two\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "// ToList converts the set to a ColList, in column ID order.\n", "func (s ColSet) ToList() ColList {\n", "\tres := make(ColList, 0, s.Len())\n", "\ts.ForEach(func(x ColumnID) {\n", "\t\tres = append(res, x)\n", "\t})\n", "\treturn res\n", "}\n", "\n" ], "file_path": "pkg/sql/opt/colset.go", "type": "add", "edit_start_line_idx": 101 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package ts import ( "context" "fmt" "math" "unsafe" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/ts/tspb" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/mon" ) // Compute the size of various structures to use when tracking memory usage. var ( sizeOfTimeSeriesData = int64(unsafe.Sizeof(roachpb.InternalTimeSeriesData{})) sizeOfSample = int64(unsafe.Sizeof(roachpb.InternalTimeSeriesSample{})) sizeOfDataPoint = int64(unsafe.Sizeof(tspb.TimeSeriesDatapoint{})) sizeOfInt32 = int64(unsafe.Sizeof(int32(0))) sizeOfUint32 = int64(unsafe.Sizeof(uint32(0))) sizeOfFloat64 = int64(unsafe.Sizeof(float64(0))) sizeOfTimestamp = int64(unsafe.Sizeof(hlc.Timestamp{})) ) // QueryMemoryOptions represents the adjustable options of a QueryMemoryContext. type QueryMemoryOptions struct { // BudgetBytes is the maximum number of bytes that should be reserved by this // query at any one time. BudgetBytes int64 // EstimatedSources is an estimate of the number of distinct sources that this // query will encounter on disk. This is needed to better estimate how much // memory a query will actually consume. EstimatedSources int64 // InterpolationLimitNanos determines the maximum gap size for which missing // values will be interpolated. By making this limit explicit, we can put a // hard limit on the timespan that needs to be read from disk to satisfy // a query. InterpolationLimitNanos int64 // If true, memory will be computed assuming the columnar layout. Columnar bool } // QueryMemoryContext encapsulates the memory-related parameters of a time // series query. These same parameters are often repeated across numerous // queries. type QueryMemoryContext struct { workerMonitor *mon.BytesMonitor resultAccount *mon.BoundAccount QueryMemoryOptions } // MakeQueryMemoryContext constructs a new query memory context from the // given parameters. func MakeQueryMemoryContext( workerMonitor, resultMonitor *mon.BytesMonitor, opts QueryMemoryOptions, ) QueryMemoryContext { resultAccount := resultMonitor.MakeBoundAccount() return QueryMemoryContext{ workerMonitor: workerMonitor, resultAccount: &resultAccount, QueryMemoryOptions: opts, } } // Close closes any resources held by the queryMemoryContext. func (qmc QueryMemoryContext) Close(ctx context.Context) { if qmc.resultAccount != nil { qmc.resultAccount.Close(ctx) } } // overflowSafeMultiply64 is a check for signed integer multiplication taken // from https://github.com/JohnCGriffin/overflow/blob/master/overflow_impl.go func overflowSafeMultiply64(a, b int64) (int64, bool) { if a == 0 || b == 0 { return 0, true } c := a * b if (c < 0) == ((a < 0) != (b < 0)) { if c/b == a { return c, true } } return c, false } // GetMaxTimespan computes the longest timespan that can be safely queried while // remaining within the given memory budget. Inputs are the resolution of data // being queried, the budget, the estimated number of sources, and the // interpolation limit being used for the query. func (qmc QueryMemoryContext) GetMaxTimespan(r Resolution) (int64, error) { slabDuration := r.SlabDuration() // Compute the size of a slab. sizeOfSlab := qmc.computeSizeOfSlab(r) // InterpolationBuffer is the number of slabs outside of the query range // needed to satisfy the interpolation limit. Extra slabs may be queried // on both sides of the target range. interpolationBufferOneSide := int64(math.Ceil(float64(qmc.InterpolationLimitNanos) / float64(slabDuration))) interpolationBuffer := interpolationBufferOneSide * 2 // If the (interpolation buffer timespan - interpolation limit) is less than // half of a slab, then it is possible for one additional slab to be queried // that would not have otherwise been queried. This can occur when the queried // timespan does not start on an even slab boundary. if (interpolationBufferOneSide*slabDuration)-qmc.InterpolationLimitNanos < slabDuration/2 { interpolationBuffer++ } // The number of slabs that can be queried safely is perSeriesMem/sizeOfSlab, // less the interpolation buffer. perSourceMem := qmc.BudgetBytes / qmc.EstimatedSources numSlabs := perSourceMem/sizeOfSlab - interpolationBuffer if numSlabs <= 0 { return 0, fmt.Errorf("insufficient memory budget to attempt query") } maxDuration, valid := overflowSafeMultiply64(numSlabs, slabDuration) if valid { return maxDuration, nil } return math.MaxInt64, nil } // GetMaxRollupSlabs returns the maximum number of rows that should be processed // at one time when rolling up the given resolution. func (qmc QueryMemoryContext) GetMaxRollupSlabs(r Resolution) int64 { // Rollup computations only occur when columnar is true. return qmc.BudgetBytes / qmc.computeSizeOfSlab(r) } // computeSizeOfSlab returns the size of a completely full data slab for the supplied // data resolution. func (qmc QueryMemoryContext) computeSizeOfSlab(r Resolution) int64 { slabDuration := r.SlabDuration() var sizeOfSlab int64 if qmc.Columnar { // Contains an Offset (int32) and Last (float64) for each sample. sizeOfColumns := (sizeOfInt32 + sizeOfFloat64) if r.IsRollup() { // Five additional float64 (First, Min, Max, Sum, Variance) and one uint32 // (count) per sample sizeOfColumns += 5*sizeOfFloat64 + sizeOfUint32 } sizeOfSlab = sizeOfTimeSeriesData + (slabDuration/r.SampleDuration())*sizeOfColumns } else { // Contains a sample structure for each sample. sizeOfSlab = sizeOfTimeSeriesData + (slabDuration/r.SampleDuration())*sizeOfSample } return sizeOfSlab }
pkg/ts/memory.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.0005879339878447354, 0.00021383365674410015, 0.00016358851280529052, 0.00017322966596111655, 0.00009938827861333266 ]
{ "id": 0, "code_window": [ "\tcol, _ := s.Next(0)\n", "\treturn col\n", "}\n", "\n", "// TranslateColSet is used to translate a ColSet from one set of column IDs\n", "// to an equivalent set. This is relevant for set operations such as UNION,\n", "// INTERSECT and EXCEPT, and can be used to map a ColSet defined on the left\n", "// relation to an equivalent ColSet on the right relation (or between any two\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "// ToList converts the set to a ColList, in column ID order.\n", "func (s ColSet) ToList() ColList {\n", "\tres := make(ColList, 0, s.Len())\n", "\ts.ForEach(func(x ColumnID) {\n", "\t\tres = append(res, x)\n", "\t})\n", "\treturn res\n", "}\n", "\n" ], "file_path": "pkg/sql/opt/colset.go", "type": "add", "edit_start_line_idx": 101 }
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: kv/kvserver/kvserverpb/lease_status.proto package kvserverpb import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" import livenesspb "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" import roachpb "github.com/cockroachdb/cockroach/pkg/roachpb" import hlc "github.com/cockroachdb/cockroach/pkg/util/hlc" import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type LeaseState int32 const ( // ERROR indicates that the lease can't be used or acquired. LeaseState_ERROR LeaseState = 0 // VALID indicates that the lease can be used. LeaseState_VALID LeaseState = 1 // STASIS indicates that the lease has not expired, but can't be // used because it is close to expiration (a stasis period at the // end of each lease is one of the ways we handle clock // uncertainty). A lease in STASIS may become VALID for the same // leaseholder after a successful RequestLease (for expiration-based // leases) or Heartbeat (for epoch-based leases). A lease may not // change hands while it is in stasis; would-be acquirers must wait // for the stasis period to expire. // // The point of the stasis period is to prevent reads on the old leaseholder // (the one whose stasis we're talking about) from missing to see writes // performed under the next lease (held by someone else) when these writes // should fall in the uncertainty window. Even without the stasis, writes // performed by the new leaseholder are guaranteed to have higher timestamps // than any reads served by the old leaseholder. However, a read at timestamp // T needs to observe all writes at timestamps [T, T+maxOffset] and so, // without the stasis, only the new leaseholder might have some of these // writes. In other words, without the stasis, a new leaseholder with a fast // clock could start performing writes ordered in real time before the old // leaseholder considers its lease to have expired. LeaseState_STASIS LeaseState = 2 // EXPIRED indicates that the lease can't be used. An expired lease // may become VALID for the same leaseholder on RequestLease or // Heartbeat, or it may be replaced by a new leaseholder with a // RequestLease (for expiration-based leases) or // IncrementEpoch+RequestLease (for epoch-based leases). LeaseState_EXPIRED LeaseState = 3 // PROSCRIBED indicates that the lease's proposed timestamp is // earlier than allowed. This is used to detect node restarts: a // node that has restarted will see its former incarnation's leases // as PROSCRIBED so it will renew them before using them. Note that // the PROSCRIBED state is only visible to the leaseholder; other // nodes will see this as a VALID lease. LeaseState_PROSCRIBED LeaseState = 4 ) var LeaseState_name = map[int32]string{ 0: "ERROR", 1: "VALID", 2: "STASIS", 3: "EXPIRED", 4: "PROSCRIBED", } var LeaseState_value = map[string]int32{ "ERROR": 0, "VALID": 1, "STASIS": 2, "EXPIRED": 3, "PROSCRIBED": 4, } func (x LeaseState) String() string { return proto.EnumName(LeaseState_name, int32(x)) } func (LeaseState) EnumDescriptor() ([]byte, []int) { return fileDescriptor_lease_status_019eb0d13224e9d6, []int{0} } // LeaseStatus holds the lease state, the timestamp at which the state // is accurate, the lease and optionally the liveness if the lease is // epoch-based. type LeaseStatus struct { // Lease which this status describes. Lease roachpb.Lease `protobuf:"bytes,1,opt,name=lease,proto3" json:"lease"` // Timestamp that the lease was evaluated at. Timestamp hlc.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp"` // State of the lease at timestamp. State LeaseState `protobuf:"varint,3,opt,name=state,proto3,enum=cockroach.kv.kvserver.storagepb.LeaseState" json:"state,omitempty"` // Liveness if this is an epoch-based lease. Liveness livenesspb.Liveness `protobuf:"bytes,4,opt,name=liveness,proto3" json:"liveness"` } func (m *LeaseStatus) Reset() { *m = LeaseStatus{} } func (m *LeaseStatus) String() string { return proto.CompactTextString(m) } func (*LeaseStatus) ProtoMessage() {} func (*LeaseStatus) Descriptor() ([]byte, []int) { return fileDescriptor_lease_status_019eb0d13224e9d6, []int{0} } func (m *LeaseStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *LeaseStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } func (dst *LeaseStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_LeaseStatus.Merge(dst, src) } func (m *LeaseStatus) XXX_Size() int { return m.Size() } func (m *LeaseStatus) XXX_DiscardUnknown() { xxx_messageInfo_LeaseStatus.DiscardUnknown(m) } var xxx_messageInfo_LeaseStatus proto.InternalMessageInfo func init() { proto.RegisterType((*LeaseStatus)(nil), "cockroach.kv.kvserver.storagepb.LeaseStatus") proto.RegisterEnum("cockroach.kv.kvserver.storagepb.LeaseState", LeaseState_name, LeaseState_value) } func (m *LeaseStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ i = encodeVarintLeaseStatus(dAtA, i, uint64(m.Lease.Size())) n1, err := m.Lease.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 dAtA[i] = 0x12 i++ i = encodeVarintLeaseStatus(dAtA, i, uint64(m.Timestamp.Size())) n2, err := m.Timestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 if m.State != 0 { dAtA[i] = 0x18 i++ i = encodeVarintLeaseStatus(dAtA, i, uint64(m.State)) } dAtA[i] = 0x22 i++ i = encodeVarintLeaseStatus(dAtA, i, uint64(m.Liveness.Size())) n3, err := m.Liveness.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n3 return i, nil } func encodeVarintLeaseStatus(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func (m *LeaseStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.Lease.Size() n += 1 + l + sovLeaseStatus(uint64(l)) l = m.Timestamp.Size() n += 1 + l + sovLeaseStatus(uint64(l)) if m.State != 0 { n += 1 + sovLeaseStatus(uint64(m.State)) } l = m.Liveness.Size() n += 1 + l + sovLeaseStatus(uint64(l)) return n } func sovLeaseStatus(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } func sozLeaseStatus(x uint64) (n int) { return sovLeaseStatus(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *LeaseStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLeaseStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLeaseStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthLeaseStatus } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Lease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLeaseStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthLeaseStatus } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } m.State = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLeaseStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.State |= (LeaseState(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Liveness", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLeaseStatus } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthLeaseStatus } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Liveness.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLeaseStatus(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthLeaseStatus } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipLeaseStatus(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLeaseStatus } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLeaseStatus } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLeaseStatus } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } iNdEx += length if length < 0 { return 0, ErrInvalidLengthLeaseStatus } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLeaseStatus } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipLeaseStatus(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthLeaseStatus = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowLeaseStatus = fmt.Errorf("proto: integer overflow") ) func init() { proto.RegisterFile("kv/kvserver/kvserverpb/lease_status.proto", fileDescriptor_lease_status_019eb0d13224e9d6) } var fileDescriptor_lease_status_019eb0d13224e9d6 = []byte{ // 379 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xcf, 0x6a, 0xea, 0x40, 0x18, 0xc5, 0x33, 0xfe, 0xbb, 0xd7, 0x11, 0x24, 0x0c, 0x77, 0x11, 0x84, 0x3b, 0xca, 0x5d, 0x79, 0xff, 0x30, 0x01, 0xbd, 0x2f, 0x10, 0x6b, 0x16, 0xa1, 0x82, 0x32, 0xb1, 0xa5, 0x74, 0x53, 0x92, 0x74, 0x50, 0x49, 0x6c, 0x42, 0x66, 0xcc, 0x73, 0xf4, 0x11, 0xfa, 0x38, 0x2e, 0x5d, 0xba, 0x2a, 0x6d, 0x7c, 0x91, 0x92, 0x49, 0x4c, 0xdc, 0x94, 0xee, 0x0e, 0xf9, 0xce, 0xf9, 0xbe, 0x1f, 0x27, 0x03, 0x7f, 0xfb, 0x89, 0xee, 0x27, 0x9c, 0xc5, 0x09, 0x8b, 0x4b, 0x11, 0xb9, 0x7a, 0xc0, 0x1c, 0xce, 0x1e, 0xb8, 0x70, 0xc4, 0x8e, 0x93, 0x28, 0x0e, 0x45, 0x88, 0xfa, 0x5e, 0xe8, 0xf9, 0x71, 0xe8, 0x78, 0x6b, 0xe2, 0x27, 0xe4, 0xec, 0x25, 0x5c, 0x84, 0xb1, 0xb3, 0x62, 0x91, 0xdb, 0x43, 0x72, 0x18, 0xb9, 0xfa, 0xa3, 0x23, 0x9c, 0x3c, 0xd4, 0x23, 0x97, 0xfb, 0x83, 0x4d, 0xc2, 0x9e, 0x18, 0xe7, 0xa5, 0xc8, 0x0e, 0x15, 0xb2, 0xf0, 0x6b, 0x3b, 0xb1, 0x09, 0xf4, 0x75, 0xe0, 0xe9, 0x62, 0xb3, 0x65, 0x5c, 0x38, 0xdb, 0xa8, 0x98, 0xfc, 0x58, 0x85, 0xab, 0x50, 0x4a, 0x3d, 0x53, 0xf9, 0xd7, 0x5f, 0x2f, 0x35, 0xd8, 0x99, 0x65, 0xac, 0xb6, 0x44, 0x45, 0xff, 0x61, 0x53, 0xa2, 0x6b, 0x60, 0x00, 0x86, 0x9d, 0x91, 0x46, 0x2a, 0xe8, 0x82, 0x8e, 0x48, 0xfb, 0xa4, 0xb1, 0x7f, 0xed, 0x2b, 0x34, 0x37, 0x23, 0x03, 0xb6, 0xcb, 0x73, 0x5a, 0x4d, 0x26, 0x7f, 0x5e, 0x24, 0x33, 0x26, 0xb2, 0x0e, 0x3c, 0xb2, 0x3c, 0x9b, 0x8a, 0x78, 0x95, 0x42, 0x06, 0x6c, 0x66, 0x6d, 0x31, 0xad, 0x3e, 0x00, 0xc3, 0xee, 0xe8, 0x2f, 0xf9, 0xa2, 0x2d, 0x52, 0x52, 0x33, 0x9a, 0x27, 0xd1, 0x0d, 0xfc, 0x7e, 0x6e, 0x43, 0x6b, 0x48, 0x88, 0xf1, 0x27, 0x5b, 0xca, 0xd2, 0xaa, 0x22, 0xc9, 0xac, 0x90, 0x05, 0x5a, 0xb9, 0xea, 0xcf, 0x35, 0x84, 0xd5, 0x2d, 0xd4, 0x86, 0x4d, 0x93, 0xd2, 0x39, 0x55, 0x95, 0x4c, 0xde, 0x1a, 0x33, 0x6b, 0xaa, 0x02, 0x04, 0x61, 0xcb, 0x5e, 0x1a, 0xb6, 0x65, 0xab, 0x35, 0xd4, 0x81, 0xdf, 0xcc, 0xbb, 0x85, 0x45, 0xcd, 0xa9, 0x5a, 0x47, 0x5d, 0x08, 0x17, 0x74, 0x6e, 0x5f, 0x51, 0x6b, 0x62, 0x4e, 0xd5, 0xc6, 0xe4, 0xdf, 0xfe, 0x1d, 0x2b, 0xfb, 0x14, 0x83, 0x43, 0x8a, 0xc1, 0x31, 0xc5, 0xe0, 0x2d, 0xc5, 0xe0, 0xf9, 0x84, 0x95, 0xc3, 0x09, 0x2b, 0xc7, 0x13, 0x56, 0xee, 0x61, 0xf5, 0x8a, 0xdc, 0x96, 0xfc, 0x49, 0xe3, 0x8f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x40, 0xf7, 0xda, 0xa1, 0x66, 0x02, 0x00, 0x00, }
pkg/kv/kvserver/kvserverpb/lease_status.pb.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.0015853740042075515, 0.00020215341646689922, 0.00016280826821457595, 0.00016862392658367753, 0.00019501548376865685 ]
{ "id": 0, "code_window": [ "\tcol, _ := s.Next(0)\n", "\treturn col\n", "}\n", "\n", "// TranslateColSet is used to translate a ColSet from one set of column IDs\n", "// to an equivalent set. This is relevant for set operations such as UNION,\n", "// INTERSECT and EXCEPT, and can be used to map a ColSet defined on the left\n", "// relation to an equivalent ColSet on the right relation (or between any two\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "// ToList converts the set to a ColList, in column ID order.\n", "func (s ColSet) ToList() ColList {\n", "\tres := make(ColList, 0, s.Len())\n", "\ts.ForEach(func(x ColumnID) {\n", "\t\tres = append(res, x)\n", "\t})\n", "\treturn res\n", "}\n", "\n" ], "file_path": "pkg/sql/opt/colset.go", "type": "add", "edit_start_line_idx": 101 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package storage import ( "encoding/json" "fmt" "io/ioutil" "os" "path/filepath" "github.com/cockroachdb/errors/oserror" ) type storageVersion int const ( versionNoFile storageVersion = iota versionBeta20160331 versionFileRegistry ) const ( versionFilename = "COCKROACHDB_VERSION" versionFilenameTemp = "COCKROACHDB_VERSION_TEMP" versionMinimum = versionNoFile versionCurrent = versionFileRegistry ) // Version stores all the version information for all stores and is used as // the format for the version file. type Version struct { Version storageVersion } // getVersionFilename returns the filename for the version file stored in the // data directory. func getVersionFilename(dir string) string { return filepath.Join(dir, versionFilename) } // getVersion returns the current on disk cockroach version from the version // file in the passed in directory. If there is no version file yet, it // returns 0. func getVersion(dir string) (storageVersion, error) { filename := getVersionFilename(dir) b, err := ioutil.ReadFile(filename) if err != nil { if oserror.IsNotExist(err) { return versionNoFile, nil } return 0, err } var ver Version if err := json.Unmarshal(b, &ver); err != nil { return 0, fmt.Errorf("version file %s is not formatted correctly; %s", filename, err) } return ver.Version, nil } // writeVersionFile overwrites the version file to contain the specified version. func writeVersionFile(dir string, ver storageVersion) error { tempFilename := filepath.Join(dir, versionFilenameTemp) filename := getVersionFilename(dir) b, err := json.Marshal(Version{ver}) if err != nil { return err } // First write to a temp file. if err := ioutil.WriteFile(tempFilename, b, 0644); err != nil { return err } // Atomically rename the file to overwrite the version file on disk. return os.Rename(tempFilename, filename) }
pkg/storage/version.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.000307803216855973, 0.00018779658421408385, 0.0001658370456425473, 0.0001735750847728923, 0.00004269962664693594 ]
{ "id": 1, "code_window": [ "\t}\n", "\treturn true\n", "}\n", "\n", "// ColSetToList converts a column id set to a list, in column id order.\n", "func ColSetToList(set ColSet) ColList {\n", "\tres := make(ColList, 0, set.Len())\n", "\tset.ForEach(func(x ColumnID) {\n", "\t\tres = append(res, x)\n", "\t})\n", "\treturn res\n", "}\n", "\n", "// ColMap provides a 1:1 mapping from one column id to another. It is used by\n", "// operators that need to match columns from its inputs.\n", "type ColMap = util.FastIntMap\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/opt/column_meta.go", "type": "replace", "edit_start_line_idx": 89 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package memo import ( "bytes" "context" "fmt" "sort" "strings" "unicode" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/treeprinter" "github.com/cockroachdb/errors" ) // ScalarFmtInterceptor is a callback that can be set to a custom formatting // function. If the function returns a non-empty string, the normal formatting // code is bypassed. var ScalarFmtInterceptor func(f *ExprFmtCtx, expr opt.ScalarExpr) string // ExprFmtFlags controls which properties of the expression are shown in // formatted output. type ExprFmtFlags int const ( // ExprFmtShowAll shows all properties of the expression. ExprFmtShowAll ExprFmtFlags = 0 // ExprFmtHideMiscProps does not show outer columns, row cardinality, provided // orderings, side effects, or error text in the output. ExprFmtHideMiscProps ExprFmtFlags = 1 << (iota - 1) // ExprFmtHideConstraints does not show inferred constraints in the output. ExprFmtHideConstraints // ExprFmtHideFuncDeps does not show functional dependencies in the output. ExprFmtHideFuncDeps // ExprFmtHideRuleProps does not show rule-specific properties in the output. ExprFmtHideRuleProps // ExprFmtHideStats does not show statistics in the output. ExprFmtHideStats // ExprFmtHideCost does not show expression cost in the output. ExprFmtHideCost // ExprFmtHideQualifications removes the qualification from column labels // (except when a shortened name would be ambiguous). ExprFmtHideQualifications // ExprFmtHideScalars removes subtrees that contain only scalars and replaces // them with the SQL expression (if possible). ExprFmtHideScalars // ExprFmtHidePhysProps hides all required physical properties, except for // Presentation (see ExprFmtHideColumns). ExprFmtHidePhysProps // ExprFmtHideTypes hides type information from columns and scalar // expressions. ExprFmtHideTypes // ExprFmtHideNotNull hides the !null specifier from columns. ExprFmtHideNotNull // ExprFmtHideColumns removes column information. ExprFmtHideColumns // ExprFmtHideAll shows only the basic structure of the expression. // Note: this flag should be used judiciously, as its meaning changes whenever // we add more flags. ExprFmtHideAll ExprFmtFlags = (1 << iota) - 1 ) // HasFlags tests whether the given flags are all set. func (f ExprFmtFlags) HasFlags(subset ExprFmtFlags) bool { return f&subset == subset } // FormatExpr returns a string representation of the given expression, formatted // according to the specified flags. func FormatExpr(e opt.Expr, flags ExprFmtFlags, mem *Memo, catalog cat.Catalog) string { if catalog == nil { // Automatically hide qualifications if we have no catalog. flags |= ExprFmtHideQualifications } f := MakeExprFmtCtx(flags, mem, catalog) f.FormatExpr(e) return f.Buffer.String() } // ExprFmtCtx is passed as context to expression formatting functions, which // need to know the formatting flags and memo in order to format. In addition, // a reusable bytes buffer avoids unnecessary allocations. type ExprFmtCtx struct { Buffer *bytes.Buffer // Flags controls how the expression is formatted. Flags ExprFmtFlags // Memo must contain any expression that is formatted. Memo *Memo // Catalog must be set unless the ExprFmtHideQualifications flag is set. Catalog cat.Catalog // nameGen is used to generate a unique name for each relational // subexpression when Memo.saveTablesPrefix is non-empty. These names // correspond to the tables that would be saved if the query were run // with the session variable `save_tables_prefix` set to the same value. nameGen *ExprNameGenerator } // MakeExprFmtCtx creates an expression formatting context from a new buffer. func MakeExprFmtCtx(flags ExprFmtFlags, mem *Memo, catalog cat.Catalog) ExprFmtCtx { return MakeExprFmtCtxBuffer(&bytes.Buffer{}, flags, mem, catalog) } // MakeExprFmtCtxBuffer creates an expression formatting context from an // existing buffer. func MakeExprFmtCtxBuffer( buf *bytes.Buffer, flags ExprFmtFlags, mem *Memo, catalog cat.Catalog, ) ExprFmtCtx { var nameGen *ExprNameGenerator if mem != nil && mem.saveTablesPrefix != "" { nameGen = NewExprNameGenerator(mem.saveTablesPrefix) } return ExprFmtCtx{Buffer: buf, Flags: flags, Memo: mem, Catalog: catalog, nameGen: nameGen} } // HasFlags tests whether the given flags are all set. func (f *ExprFmtCtx) HasFlags(subset ExprFmtFlags) bool { return f.Flags.HasFlags(subset) } // FormatExpr constructs a treeprinter view of the given expression for testing // and debugging, according to the flags in this context. func (f *ExprFmtCtx) FormatExpr(e opt.Expr) { tp := treeprinter.New() f.formatExpr(e, tp) f.Buffer.Reset() f.Buffer.WriteString(tp.String()) } func (f *ExprFmtCtx) space() { f.Buffer.WriteByte(' ') } func (f *ExprFmtCtx) formatExpr(e opt.Expr, tp treeprinter.Node) { scalar, ok := e.(opt.ScalarExpr) if ok { f.formatScalar(scalar, tp) } else { f.formatRelational(e.(RelExpr), tp) } } func (f *ExprFmtCtx) formatRelational(e RelExpr, tp treeprinter.Node) { md := f.Memo.Metadata() relational := e.Relational() required := e.RequiredPhysical() if required == nil { // required can be nil before optimization has taken place. required = physical.MinRequired } // Special cases for merge-join and lookup-join: we want the type of the join // to show up first. f.Buffer.Reset() switch t := e.(type) { case *MergeJoinExpr: fmt.Fprintf(f.Buffer, "%v (merge)", t.JoinType) case *LookupJoinExpr: fmt.Fprintf(f.Buffer, "%v (lookup", t.JoinType) FormatPrivate(f, e.Private(), required) f.Buffer.WriteByte(')') case *InvertedJoinExpr: fmt.Fprintf(f.Buffer, "%v (inverted", t.JoinType) FormatPrivate(f, e.Private(), required) f.Buffer.WriteByte(')') case *ZigzagJoinExpr: fmt.Fprintf(f.Buffer, "%v (zigzag", opt.InnerJoinOp) FormatPrivate(f, e.Private(), required) f.Buffer.WriteByte(')') case *ScanExpr, *IndexJoinExpr, *ShowTraceForSessionExpr, *InsertExpr, *UpdateExpr, *UpsertExpr, *DeleteExpr, *SequenceSelectExpr, *WindowExpr, *OpaqueRelExpr, *OpaqueMutationExpr, *OpaqueDDLExpr, *AlterTableSplitExpr, *AlterTableUnsplitExpr, *AlterTableUnsplitAllExpr, *AlterTableRelocateExpr, *ControlJobsExpr, *CancelQueriesExpr, *CancelSessionsExpr, *CreateViewExpr, *ExportExpr: fmt.Fprintf(f.Buffer, "%v", e.Op()) FormatPrivate(f, e.Private(), required) case *SortExpr: if t.InputOrdering.Any() { fmt.Fprintf(f.Buffer, "%v", e.Op()) } else { fmt.Fprintf(f.Buffer, "%v (segmented)", e.Op()) } case *WithExpr: fmt.Fprintf(f.Buffer, "%v &%d", e.Op(), t.ID) if t.Name != "" { fmt.Fprintf(f.Buffer, " (%s)", t.Name) } case *WithScanExpr: fmt.Fprintf(f.Buffer, "%v &%d", e.Op(), t.With) if t.Name != "" { fmt.Fprintf(f.Buffer, " (%s)", t.Name) } default: fmt.Fprintf(f.Buffer, "%v", e.Op()) if opt.IsJoinNonApplyOp(t) { // All join ops that weren't handled above execute as a hash join. if leftEqCols, _ := ExtractJoinEqualityColumns( e.Child(0).(RelExpr).Relational().OutputCols, e.Child(1).(RelExpr).Relational().OutputCols, *e.Child(2).(*FiltersExpr), ); len(leftEqCols) == 0 { // The case where there are no equality columns is executed as a // degenerate case of hash join; let's be explicit about that. f.Buffer.WriteString(" (cross)") } else { f.Buffer.WriteString(" (hash)") } } } tp = tp.Child(f.Buffer.String()) if f.nameGen != nil { name := f.nameGen.GenerateName(e.Op()) tp.Childf("save-table-name: %s", name) } var colList opt.ColList // Special handling to improve the columns display for certain ops. switch t := e.(type) { case *ProjectExpr: // We want the synthesized column IDs to map 1-to-1 to the projections, // and the pass-through columns at the end. // Get the list of columns from the ProjectionsOp, which has the natural // order. for i := range t.Projections { colList = append(colList, t.Projections[i].Col) } // Add pass-through columns. t.Passthrough.ForEach(func(i opt.ColumnID) { colList = append(colList, i) }) case *ValuesExpr: colList = t.Cols case *UnionExpr, *IntersectExpr, *ExceptExpr, *UnionAllExpr, *IntersectAllExpr, *ExceptAllExpr: colList = e.Private().(*SetPrivate).OutCols default: // Fall back to writing output columns in column id order. colList = opt.ColSetToList(e.Relational().OutputCols) } f.formatColumns(e, tp, colList, required.Presentation) switch t := e.(type) { // Special-case handling for GroupBy private; print grouping columns // and internal ordering in addition to full set of columns. case *GroupByExpr, *ScalarGroupByExpr, *DistinctOnExpr, *EnsureDistinctOnExpr, *UpsertDistinctOnExpr, *EnsureUpsertDistinctOnExpr: private := e.Private().(*GroupingPrivate) if !f.HasFlags(ExprFmtHideColumns) && !private.GroupingCols.Empty() { f.formatColList(e, tp, "grouping columns:", opt.ColSetToList(private.GroupingCols)) } if !f.HasFlags(ExprFmtHidePhysProps) && !private.Ordering.Any() { tp.Childf("internal-ordering: %s", private.Ordering) } if !f.HasFlags(ExprFmtHideMiscProps) && private.ErrorOnDup != "" { tp.Childf("error: \"%s\"", private.ErrorOnDup) } case *LimitExpr: if !f.HasFlags(ExprFmtHidePhysProps) && !t.Ordering.Any() { tp.Childf("internal-ordering: %s", t.Ordering) } case *OffsetExpr: if !f.HasFlags(ExprFmtHidePhysProps) && !t.Ordering.Any() { tp.Childf("internal-ordering: %s", t.Ordering) } case *Max1RowExpr: if !f.HasFlags(ExprFmtHideMiscProps) { tp.Childf("error: \"%s\"", t.ErrorText) } // Special-case handling for set operators to show the left and right // input columns that correspond to the output columns. case *UnionExpr, *IntersectExpr, *ExceptExpr, *UnionAllExpr, *IntersectAllExpr, *ExceptAllExpr: if !f.HasFlags(ExprFmtHideColumns) { private := e.Private().(*SetPrivate) f.formatColList(e, tp, "left columns:", private.LeftCols) f.formatColList(e, tp, "right columns:", private.RightCols) } case *ScanExpr: if t.IsCanonical() { // For the canonical scan, show the expressions attached to the TableMeta. tab := md.TableMeta(t.Table) if tab.Constraints != nil { c := tp.Childf("check constraint expressions") for i := 0; i < tab.Constraints.ChildCount(); i++ { f.formatExpr(tab.Constraints.Child(i), c) } } if len(tab.ComputedCols) > 0 { c := tp.Childf("computed column expressions") cols := make(opt.ColList, 0, len(tab.ComputedCols)) for col := range tab.ComputedCols { cols = append(cols, col) } sort.Slice(cols, func(i, j int) bool { return cols[i] < cols[j] }) for _, col := range cols { f.Buffer.Reset() f.formatExpr(tab.ComputedCols[col], c.Child(f.ColumnString(col))) } } if tab.PartialIndexPredicates != nil { c := tp.Child("partial index predicates") indexOrds := make([]cat.IndexOrdinal, 0, len(tab.PartialIndexPredicates)) for ord := range tab.PartialIndexPredicates { indexOrds = append(indexOrds, ord) } sort.Ints(indexOrds) for _, ord := range indexOrds { name := string(tab.Table.Index(ord).Name()) f.Buffer.Reset() f.formatScalarWithLabel(name, tab.PartialIndexPredicates[ord], c) } } } if c := t.Constraint; c != nil { if c.IsContradiction() { tp.Childf("constraint: contradiction") } else if c.Spans.Count() == 1 { tp.Childf("constraint: %s: %s", c.Columns.String(), c.Spans.Get(0).String()) } else { n := tp.Childf("constraint: %s", c.Columns.String()) for i := 0; i < c.Spans.Count(); i++ { n.Child(c.Spans.Get(i).String()) } } } if ic := t.InvertedConstraint; ic != nil { idx := md.Table(t.Table).Index(t.Index) var b strings.Builder for i := idx.NonInvertedPrefixColumnCount(); i < idx.KeyColumnCount(); i++ { b.WriteRune('/') b.WriteString(fmt.Sprintf("%d", t.Table.ColumnID(idx.Column(i).Ordinal()))) } n := tp.Childf("inverted constraint: %s", b.String()) ic.Format(n, "spans") } if t.HardLimit.IsSet() { tp.Childf("limit: %s", t.HardLimit) } if !t.Flags.Empty() { if t.Flags.NoIndexJoin { tp.Childf("flags: no-index-join") } else if t.Flags.ForceIndex { idx := md.Table(t.Table).Index(t.Flags.Index) dir := "" switch t.Flags.Direction { case tree.DefaultDirection: case tree.Ascending: dir = ",fwd" case tree.Descending: dir = ",rev" } tp.Childf("flags: force-index=%s%s", idx.Name(), dir) } } if t.Locking != nil { strength := "" switch t.Locking.Strength { case tree.ForNone: case tree.ForKeyShare: strength = "for-key-share" case tree.ForShare: strength = "for-share" case tree.ForNoKeyUpdate: strength = "for-no-key-update" case tree.ForUpdate: strength = "for-update" default: panic(errors.AssertionFailedf("unexpected strength")) } wait := "" switch t.Locking.WaitPolicy { case tree.LockWaitBlock: case tree.LockWaitSkip: wait = ",skip-locked" case tree.LockWaitError: wait = ",nowait" default: panic(errors.AssertionFailedf("unexpected wait policy")) } tp.Childf("locking: %s%s", strength, wait) } case *InvertedFilterExpr: var b strings.Builder b.WriteRune('/') b.WriteString(fmt.Sprintf("%d", t.InvertedColumn)) n := tp.Childf("inverted expression: %s", b.String()) t.InvertedExpression.Format(n, false /* includeSpansToRead */) if t.PreFiltererState != nil { n := tp.Childf("pre-filterer expression") f.formatExpr(t.PreFiltererState.Expr, n) } case *LookupJoinExpr: if !t.Flags.Empty() { tp.Childf("flags: %s", t.Flags.String()) } if !f.HasFlags(ExprFmtHideColumns) { idxCols := make(opt.ColList, len(t.KeyCols)) idx := md.Table(t.Table).Index(t.Index) for i := range idxCols { idxCols[i] = t.Table.ColumnID(idx.Column(i).Ordinal()) } tp.Childf("key columns: %v = %v", t.KeyCols, idxCols) } if t.LookupColsAreTableKey { tp.Childf("lookup columns are key") } case *InvertedJoinExpr: if !t.Flags.Empty() { tp.Childf("flags: %s", t.Flags.String()) } if !f.HasFlags(ExprFmtHideColumns) && len(t.PrefixKeyCols) > 0 { idxCols := make(opt.ColList, len(t.PrefixKeyCols)) idx := md.Table(t.Table).Index(t.Index) for i := range idxCols { idxCols[i] = t.Table.ColumnID(idx.Column(i).Ordinal()) } tp.Childf("prefix key columns: %v = %v", t.PrefixKeyCols, idxCols) } n := tp.Child("inverted-expr") f.formatExpr(t.InvertedExpr, n) case *ZigzagJoinExpr: if !f.HasFlags(ExprFmtHideColumns) { tp.Childf("eq columns: %v = %v", t.LeftEqCols, t.RightEqCols) leftVals := make([]tree.Datum, len(t.LeftFixedCols)) rightVals := make([]tree.Datum, len(t.RightFixedCols)) // FixedVals is always going to be a ScalarListExpr, containing tuples, // containing one ScalarListExpr, containing ConstExprs. for i := range t.LeftFixedCols { leftVals[i] = ExtractConstDatum(t.FixedVals[0].Child(0).Child(i)) } for i := range t.RightFixedCols { rightVals[i] = ExtractConstDatum(t.FixedVals[1].Child(0).Child(i)) } tp.Childf("left fixed columns: %v = %v", t.LeftFixedCols, leftVals) tp.Childf("right fixed columns: %v = %v", t.RightFixedCols, rightVals) } case *MergeJoinExpr: if !t.Flags.Empty() { tp.Childf("flags: %s", t.Flags.String()) } if !f.HasFlags(ExprFmtHidePhysProps) { tp.Childf("left ordering: %s", t.LeftEq) tp.Childf("right ordering: %s", t.RightEq) } case *InsertExpr: if !f.HasFlags(ExprFmtHideColumns) { if len(colList) == 0 { tp.Child("columns: <none>") } f.formatArbiters(tp, t.Arbiters, t.Table) f.formatMutationCols(e, tp, "insert-mapping:", t.InsertCols, t.Table) f.formatColList(e, tp, "check columns:", t.CheckCols) f.formatColList(e, tp, "partial index put columns:", t.PartialIndexPutCols) f.formatMutationCommon(tp, &t.MutationPrivate) } case *UpdateExpr: if !f.HasFlags(ExprFmtHideColumns) { if len(colList) == 0 { tp.Child("columns: <none>") } f.formatColList(e, tp, "fetch columns:", t.FetchCols) f.formatMutationCols(e, tp, "update-mapping:", t.UpdateCols, t.Table) f.formatColList(e, tp, "check columns:", t.CheckCols) f.formatColList(e, tp, "partial index put columns:", t.PartialIndexPutCols) f.formatColList(e, tp, "partial index del columns:", t.PartialIndexDelCols) f.formatMutationCommon(tp, &t.MutationPrivate) } case *UpsertExpr: if !f.HasFlags(ExprFmtHideColumns) { if len(colList) == 0 { tp.Child("columns: <none>") } if t.CanaryCol != 0 { f.formatArbiters(tp, t.Arbiters, t.Table) f.formatColList(e, tp, "canary column:", opt.ColList{t.CanaryCol}) f.formatColList(e, tp, "fetch columns:", t.FetchCols) f.formatMutationCols(e, tp, "insert-mapping:", t.InsertCols, t.Table) f.formatMutationCols(e, tp, "update-mapping:", t.UpdateCols, t.Table) f.formatMutationCols(e, tp, "return-mapping:", t.ReturnCols, t.Table) } else { f.formatMutationCols(e, tp, "upsert-mapping:", t.InsertCols, t.Table) } f.formatColList(e, tp, "check columns:", t.CheckCols) f.formatColList(e, tp, "partial index put columns:", t.PartialIndexPutCols) f.formatColList(e, tp, "partial index del columns:", t.PartialIndexDelCols) f.formatMutationCommon(tp, &t.MutationPrivate) } case *DeleteExpr: if !f.HasFlags(ExprFmtHideColumns) { if len(colList) == 0 { tp.Child("columns: <none>") } f.formatColList(e, tp, "fetch columns:", t.FetchCols) f.formatColList(e, tp, "partial index del columns:", t.PartialIndexDelCols) f.formatMutationCommon(tp, &t.MutationPrivate) } case *WithExpr: if t.Mtr.Set { if t.Mtr.Materialize { tp.Child("materialized") } else { tp.Child("not-materialized") } } case *WithScanExpr: if !f.HasFlags(ExprFmtHideColumns) { child := tp.Child("mapping:") for i := range t.InCols { f.Buffer.Reset() f.space() f.formatCol("" /* label */, t.InCols[i], opt.ColSet{} /* notNullCols */) f.Buffer.WriteString(" => ") f.formatCol("" /* label */, t.OutCols[i], opt.ColSet{} /* notNullCols */) child.Child(f.Buffer.String()) } } case *CreateTableExpr: tp.Child(t.Syntax.String()) case *CreateViewExpr: tp.Child(t.ViewQuery) f.Buffer.Reset() f.Buffer.WriteString("columns:") for _, col := range t.Columns { f.space() f.formatCol(col.Alias, col.ID, opt.ColSet{} /* notNullCols */) } tp.Child(f.Buffer.String()) n := tp.Child("dependencies") for _, dep := range t.Deps { f.Buffer.Reset() name := dep.DataSource.Name() f.Buffer.WriteString(name.String()) if dep.SpecificIndex { fmt.Fprintf(f.Buffer, "@%s", dep.DataSource.(cat.Table).Index(dep.Index).Name()) } colNames, isTable := dep.GetColumnNames() if len(colNames) > 0 { fmt.Fprintf(f.Buffer, " [columns:") for _, colName := range colNames { fmt.Fprintf(f.Buffer, " %s", colName) } fmt.Fprintf(f.Buffer, "]") } else if isTable { fmt.Fprintf(f.Buffer, " [no columns]") } n.Child(f.Buffer.String()) } case *CreateStatisticsExpr: tp.Child(t.Syntax.String()) case *ExportExpr: tp.Childf("format: %s", t.FileFormat) case *ExplainExpr: // ExplainPlan is the default, don't show it. m := "" if t.Options.Mode != tree.ExplainPlan { m = strings.ToLower(t.Options.Mode.String()) } if t.Options.Flags[tree.ExplainFlagVerbose] { if m != "" { m += ", " } m += "verbose" } if m != "" { tp.Childf("mode: %s", m) } case *RecursiveCTEExpr: if !f.HasFlags(ExprFmtHideColumns) { tp.Childf("working table binding: &%d", t.WithID) f.formatColList(e, tp, "initial columns:", t.InitialCols) f.formatColList(e, tp, "recursive columns:", t.RecursiveCols) } default: if opt.IsJoinOp(t) { p := t.Private().(*JoinPrivate) if !p.Flags.Empty() { tp.Childf("flags: %s", p.Flags.String()) } } } if !f.HasFlags(ExprFmtHideMiscProps) { if !relational.OuterCols.Empty() { tp.Childf("outer: %s", relational.OuterCols.String()) } if relational.Cardinality != props.AnyCardinality { // Suppress cardinality for Scan ops if it's redundant with Limit field. if scan, ok := e.(*ScanExpr); !ok || !scan.HardLimit.IsSet() { tp.Childf("cardinality: %s", relational.Cardinality) } } if join, ok := e.(joinWithMultiplicity); ok { mult := join.getMultiplicity() if s := mult.Format(e.Op()); s != "" { tp.Childf("multiplicity: %s", s) } } f.Buffer.Reset() writeFlag := func(name string) { if f.Buffer.Len() != 0 { f.Buffer.WriteString(", ") } f.Buffer.WriteString(name) } if !relational.VolatilitySet.IsLeakProof() { writeFlag(relational.VolatilitySet.String()) } if relational.CanMutate { writeFlag("mutations") } if relational.HasPlaceholder { writeFlag("has-placeholder") } if f.Buffer.Len() != 0 { tp.Child(f.Buffer.String()) } } if !f.HasFlags(ExprFmtHideStats) { tp.Childf("stats: %s", &relational.Stats) } if !f.HasFlags(ExprFmtHideCost) { cost := e.Cost() if cost != 0 { tp.Childf("cost: %.9g", cost) } } // Format functional dependencies. if !f.HasFlags(ExprFmtHideFuncDeps) { // Show the key separately from the rest of the FDs. if key, ok := relational.FuncDeps.StrictKey(); ok { tp.Childf("key: %s", key) } else if key, ok := relational.FuncDeps.LaxKey(); ok { tp.Childf("lax-key: %s", key) } if fdStr := relational.FuncDeps.StringOnlyFDs(); fdStr != "" { tp.Childf("fd: %s", fdStr) } } if !f.HasFlags(ExprFmtHidePhysProps) { if !required.Ordering.Any() { if f.HasFlags(ExprFmtHideMiscProps) { tp.Childf("ordering: %s", required.Ordering.String()) } else { // Show the provided ordering as well, unless it's exactly the same. provided := e.ProvidedPhysical().Ordering reqStr := required.Ordering.String() provStr := provided.String() if provStr == reqStr { tp.Childf("ordering: %s", required.Ordering.String()) } else { tp.Childf("ordering: %s [actual: %s]", required.Ordering.String(), provided.String()) } } } if required.LimitHint != 0 { tp.Childf("limit hint: %.2f", required.LimitHint) } } if !f.HasFlags(ExprFmtHideRuleProps) { r := &relational.Rule if !r.PruneCols.Empty() { tp.Childf("prune: %s", r.PruneCols.String()) } if !r.RejectNullCols.Empty() { tp.Childf("reject-nulls: %s", r.RejectNullCols.String()) } if len(r.InterestingOrderings) > 0 { tp.Childf("interesting orderings: %s", r.InterestingOrderings.String()) } if !r.UnfilteredCols.Empty() { tp.Childf("unfiltered-cols: %s", r.UnfilteredCols.String()) } if withUses := relational.Shared.Rule.WithUses; len(withUses) > 0 { n := tp.Childf("cte-uses") ids := make([]opt.WithID, 0, len(withUses)) for id := range withUses { ids = append(ids, id) } sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] }) for _, id := range ids { info := withUses[id] n.Childf("&%d: count=%d used-columns=%s", id, info.Count, info.UsedCols) } } } switch t := e.(type) { case *CreateTableExpr: // Do not print dummy input expression if there was no AS clause. if !t.Syntax.As() { return } } for i, n := 0, e.ChildCount(); i < n; i++ { f.formatExpr(e.Child(i), tp) } } func (f *ExprFmtCtx) formatScalar(scalar opt.ScalarExpr, tp treeprinter.Node) { f.formatScalarWithLabel("", scalar, tp) } func (f *ExprFmtCtx) formatScalarWithLabel( label string, scalar opt.ScalarExpr, tp treeprinter.Node, ) { f.Buffer.Reset() if label != "" { f.Buffer.WriteString(label) f.Buffer.WriteString(": ") } switch scalar.Op() { case opt.ProjectionsOp, opt.AggregationsOp, opt.UniqueChecksOp, opt.FKChecksOp, opt.KVOptionsOp: // Omit empty lists (except filters). if scalar.ChildCount() == 0 { return } case opt.FiltersOp: // Show empty Filters expression as "filters (true)". if scalar.ChildCount() == 0 { f.Buffer.WriteString("filters (true)") tp.Child(f.Buffer.String()) return } case opt.IfErrOp: fmt.Fprintf(f.Buffer, "%v", scalar.Op()) f.FormatScalarProps(scalar) tp = tp.Child(f.Buffer.String()) f.formatExpr(scalar.Child(0), tp) if scalar.Child(1).ChildCount() > 0 { f.formatExpr(scalar.Child(1), tp.Child("else")) } if scalar.Child(2).ChildCount() > 0 { f.formatExpr(scalar.Child(2), tp.Child("err-code")) } return case opt.AggFilterOp: fmt.Fprintf(f.Buffer, "%v", scalar.Op()) f.FormatScalarProps(scalar) tp = tp.Child(f.Buffer.String()) f.formatExpr(scalar.Child(0), tp) f.formatExpr(scalar.Child(1), tp.Child("filter")) return case opt.ScalarListOp: // Don't show scalar-list as a separate node, as it's redundant with its // parent. for i, n := 0, scalar.ChildCount(); i < n; i++ { f.formatExpr(scalar.Child(i), tp) } return } // Omit various list items from the output, but show some of their properties // along with the properties of their child. var scalarProps []string switch scalar.Op() { case opt.FiltersItemOp, opt.ProjectionsItemOp, opt.AggregationsItemOp, opt.ZipItemOp, opt.WindowsItemOp: emitProp := func(format string, args ...interface{}) { scalarProps = append(scalarProps, fmt.Sprintf(format, args...)) } switch item := scalar.(type) { case *ProjectionsItem: if !f.HasFlags(ExprFmtHideColumns) { emitProp("as=%s", f.ColumnString(item.Col)) } case *AggregationsItem: if !f.HasFlags(ExprFmtHideColumns) { emitProp("as=%s", f.ColumnString(item.Col)) } case *ZipItem: // TODO(radu): show the item.Cols case *WindowsItem: if !f.HasFlags(ExprFmtHideColumns) { emitProp("as=%s", f.ColumnString(item.Col)) } // Only show the frame if it differs from the default. def := WindowFrame{ Mode: tree.RANGE, StartBoundType: tree.UnboundedPreceding, EndBoundType: tree.CurrentRow, FrameExclusion: tree.NoExclusion, } if item.Frame != def { emitProp("frame=%q", item.Frame.String()) } } scalarProps = append(scalarProps, f.scalarPropsStrings(scalar)...) scalar = scalar.Child(0).(opt.ScalarExpr) default: scalarProps = f.scalarPropsStrings(scalar) } var intercepted bool if f.HasFlags(ExprFmtHideScalars) && ScalarFmtInterceptor != nil { if str := ScalarFmtInterceptor(f, scalar); str != "" { f.Buffer.WriteString(str) intercepted = true } } if !intercepted { fmt.Fprintf(f.Buffer, "%v", scalar.Op()) f.formatScalarPrivate(scalar) } if len(scalarProps) != 0 { f.Buffer.WriteString(" [") f.Buffer.WriteString(strings.Join(scalarProps, ", ")) f.Buffer.WriteByte(']') } tp = tp.Child(f.Buffer.String()) if !intercepted { for i, n := 0, scalar.ChildCount(); i < n; i++ { f.formatExpr(scalar.Child(i), tp) } } } // scalarPropsStrings returns a slice of strings, each describing a property; // for example: // {"type=bool", "outer=(1)", "constraints=(/1: [/1 - /1]; tight)"} func (f *ExprFmtCtx) scalarPropsStrings(scalar opt.ScalarExpr) []string { typ := scalar.DataType() if typ == nil { if scalar.Op() == opt.UniqueChecksItemOp || scalar.Op() == opt.FKChecksItemOp || scalar.Op() == opt.KVOptionsItemOp { // These are not true scalars and have no properties. return nil } // Don't panic if scalar properties don't yet exist when printing // expression. return []string{"type=undefined"} } var res []string emitProp := func(format string, args ...interface{}) { res = append(res, fmt.Sprintf(format, args...)) } if !f.HasFlags(ExprFmtHideTypes) && typ.Family() != types.AnyFamily { emitProp("type=%s", typ) } if propsExpr, ok := scalar.(ScalarPropsExpr); ok { scalarProps := propsExpr.ScalarProps() if !f.HasFlags(ExprFmtHideMiscProps) { if !scalarProps.OuterCols.Empty() { emitProp("outer=%s", scalarProps.OuterCols) } if !scalarProps.VolatilitySet.IsLeakProof() { emitProp(scalarProps.VolatilitySet.String()) } if scalarProps.HasCorrelatedSubquery { emitProp("correlated-subquery") } else if scalarProps.HasSubquery { emitProp("subquery") } } if !f.HasFlags(ExprFmtHideConstraints) { if scalarProps.Constraints != nil && !scalarProps.Constraints.IsUnconstrained() { var tight string if scalarProps.TightConstraints { tight = "; tight" } emitProp("constraints=(%s%s)", scalarProps.Constraints, tight) } } if !f.HasFlags(ExprFmtHideFuncDeps) && !scalarProps.FuncDeps.Empty() { emitProp("fd=%s", scalarProps.FuncDeps) } } return res } // FormatScalarProps writes out a string representation of the scalar // properties (with a preceding space); for example: // " [type=bool, outer=(1), constraints=(/1: [/1 - /1]; tight)]" func (f *ExprFmtCtx) FormatScalarProps(scalar opt.ScalarExpr) { props := f.scalarPropsStrings(scalar) if len(props) != 0 { f.Buffer.WriteString(" [") f.Buffer.WriteString(strings.Join(props, ", ")) f.Buffer.WriteByte(']') } } func (f *ExprFmtCtx) formatScalarPrivate(scalar opt.ScalarExpr) { var private interface{} switch t := scalar.(type) { case *NullExpr, *TupleExpr, *CollateExpr: // Private is redundant with logical type property. private = nil case *AnyExpr: // We don't want to show the OriginalExpr; just show Cmp. private = t.Cmp case *ArrayFlattenExpr: if t.Input.Relational().OutputCols.Len() != 1 { fmt.Fprintf(f.Buffer, " col=%v", t.RequestedCol) } case *SubqueryExpr, *ExistsExpr: // We don't want to show the OriginalExpr. private = nil case *CastExpr: private = t.Typ.SQLString() case *KVOptionsItem: fmt.Fprintf(f.Buffer, " %s", t.Key) case *UniqueChecksItem: tab := f.Memo.metadata.TableMeta(t.Table) constraint := tab.Table.Unique(t.CheckOrdinal) fmt.Fprintf(f.Buffer, ": %s(", tab.Alias.ObjectName) for i := 0; i < constraint.ColumnCount(); i++ { if i > 0 { f.Buffer.WriteByte(',') } col := tab.Table.Column(constraint.ColumnOrdinal(tab.Table, i)) f.Buffer.WriteString(string(col.ColName())) } f.Buffer.WriteByte(')') case *FKChecksItem: origin := f.Memo.metadata.TableMeta(t.OriginTable) referenced := f.Memo.metadata.TableMeta(t.ReferencedTable) var fk cat.ForeignKeyConstraint if t.FKOutbound { fk = origin.Table.OutboundForeignKey(t.FKOrdinal) } else { fk = referenced.Table.InboundForeignKey(t.FKOrdinal) } // Print the FK as: // child(a,b) -> parent(a,b) // // TODO(radu): maybe flip these if we are deleting from the parent (i.e. // FKOutbound=false)? fmt.Fprintf(f.Buffer, ": %s(", origin.Alias.ObjectName) for i := 0; i < fk.ColumnCount(); i++ { if i > 0 { f.Buffer.WriteByte(',') } col := origin.Table.Column(fk.OriginColumnOrdinal(origin.Table, i)) f.Buffer.WriteString(string(col.ColName())) } fmt.Fprintf(f.Buffer, ") -> %s(", referenced.Alias.ObjectName) for i := 0; i < fk.ColumnCount(); i++ { if i > 0 { f.Buffer.WriteByte(',') } col := referenced.Table.Column(fk.ReferencedColumnOrdinal(referenced.Table, i)) f.Buffer.WriteString(string(col.ColName())) } f.Buffer.WriteByte(')') default: private = scalar.Private() } if private != nil { f.Buffer.WriteRune(':') FormatPrivate(f, private, &physical.Required{}) } } // formatIndex outputs the specified index into the context's buffer with the // format: // // table_alias@index_name // // If reverse is true, ",rev" is appended. // // If the index is a partial index, ",partial" is appended. // // If the table is aliased, " [as=alias]" is appended. func (f *ExprFmtCtx) formatIndex(tabID opt.TableID, idxOrd cat.IndexOrdinal, reverse bool) { md := f.Memo.Metadata() tabMeta := md.TableMeta(tabID) index := tabMeta.Table.Index(idxOrd) if idxOrd == cat.PrimaryIndex { // Don't output the index name if it's the primary index. fmt.Fprintf(f.Buffer, " %s", tableName(f, tabID)) } else { fmt.Fprintf(f.Buffer, " %s@%s", tableName(f, tabID), index.Name()) } if reverse { f.Buffer.WriteString(",rev") } if _, isPartial := index.Predicate(); isPartial { f.Buffer.WriteString(",partial") } alias := md.TableMeta(tabID).Alias.Table() if alias != string(tabMeta.Table.Name()) { fmt.Fprintf(f.Buffer, " [as=%s]", alias) } } // formatArbiters constructs a new treeprinter child containing the // specified list of arbiter indexes. func (f *ExprFmtCtx) formatArbiters( tp treeprinter.Node, arbiters cat.IndexOrdinals, tabID opt.TableID, ) { md := f.Memo.Metadata() tab := md.Table(tabID) if len(arbiters) > 0 { f.Buffer.Reset() f.Buffer.WriteString("arbiter indexes:") for _, idx := range arbiters { name := string(tab.Index(idx).Name()) f.space() f.Buffer.WriteString(name) } tp.Child(f.Buffer.String()) } } func (f *ExprFmtCtx) formatColumns( nd RelExpr, tp treeprinter.Node, cols opt.ColList, presentation physical.Presentation, ) { if f.HasFlags(ExprFmtHideColumns) { return } if presentation.Any() { f.formatColList(nd, tp, "columns:", cols) return } // When a particular column presentation is required of the expression, then // print columns using that information. Include information about columns // that are hidden by the presentation separately. hidden := cols.ToSet() notNullCols := nd.Relational().NotNullCols f.Buffer.Reset() f.Buffer.WriteString("columns:") for _, col := range presentation { hidden.Remove(col.ID) f.space() f.formatCol(col.Alias, col.ID, notNullCols) } if !hidden.Empty() { f.Buffer.WriteString(" [hidden:") for _, col := range cols { if hidden.Contains(col) { f.space() f.formatCol("" /* label */, col, notNullCols) } } f.Buffer.WriteString("]") } tp.Child(f.Buffer.String()) } // formatColList constructs a new treeprinter child containing the specified // list of columns formatted using the formatCol method. func (f *ExprFmtCtx) formatColList( nd RelExpr, tp treeprinter.Node, heading string, colList opt.ColList, ) { if len(colList) > 0 { notNullCols := nd.Relational().NotNullCols f.Buffer.Reset() f.Buffer.WriteString(heading) for _, col := range colList { if col != 0 { f.space() f.formatCol("" /* label */, col, notNullCols) } } tp.Child(f.Buffer.String()) } } // formatMutationCols adds a new treeprinter child for each non-zero column in the // given list. Each child shows how the column will be mutated, with the id of // the "before" and "after" columns, similar to this: // // a:1 => x:4 // func (f *ExprFmtCtx) formatMutationCols( nd RelExpr, tp treeprinter.Node, heading string, colList opt.ColList, tabID opt.TableID, ) { if len(colList) == 0 { return } tpChild := tp.Child(heading) for i, col := range colList { if col != 0 { tpChild.Child(fmt.Sprintf("%s => %s", f.ColumnString(col), f.ColumnString(tabID.ColumnID(i)))) } } } // formatMutationCommon shows the MutationPrivate fields that format the same // for all types of mutations. func (f *ExprFmtCtx) formatMutationCommon(tp treeprinter.Node, p *MutationPrivate) { if p.WithID != 0 { tp.Childf("input binding: &%d", p.WithID) } if len(p.FKCascades) > 0 { c := tp.Childf("cascades") for i := range p.FKCascades { c.Child(p.FKCascades[i].FKName) } } } // ColumnString returns the column in the same format as formatColSimple. func (f *ExprFmtCtx) ColumnString(id opt.ColumnID) string { var buf bytes.Buffer f.formatColSimpleToBuffer(&buf, "" /* label */, id) return buf.String() } // formatColSimple outputs the specified column into the context's buffer using the // following format: // label:id // // The :id part is omitted if the formatting flags include ExprFmtHideColumns. // // If a label is given, then it is used. Otherwise, a "best effort" label is // used from query metadata. func (f *ExprFmtCtx) formatColSimple(label string, id opt.ColumnID) { f.formatColSimpleToBuffer(f.Buffer, label, id) } func (f *ExprFmtCtx) formatColSimpleToBuffer(buf *bytes.Buffer, label string, id opt.ColumnID) { if label == "" { if f.Memo != nil { md := f.Memo.metadata fullyQualify := !f.HasFlags(ExprFmtHideQualifications) label = md.QualifiedAlias(id, fullyQualify, f.Catalog) } else { label = fmt.Sprintf("unknown%d", id) } } if !isSimpleColumnName(label) { // Add quotations around the column name if it is not composed of simple // ASCII characters. label = "\"" + label + "\"" } buf.WriteString(label) if !f.HasFlags(ExprFmtHideColumns) { buf.WriteByte(':') fmt.Fprintf(buf, "%d", id) } } // formatCol outputs the specified column into the context's buffer using the // following format: // label:id(type) // // If the column is not nullable, then this is the format: // label:id(type!null) // // Some of the components can be omitted depending on formatting flags. // // If a label is given, then it is used. Otherwise, a "best effort" label is // used from query metadata. func (f *ExprFmtCtx) formatCol(label string, id opt.ColumnID, notNullCols opt.ColSet) { f.formatColSimple(label, id) parenOpen := false if !f.HasFlags(ExprFmtHideTypes) && f.Memo != nil { f.Buffer.WriteByte('(') parenOpen = true f.Buffer.WriteString(f.Memo.metadata.ColumnMeta(id).Type.String()) } if !f.HasFlags(ExprFmtHideNotNull) && notNullCols.Contains(id) { f.Buffer.WriteString("!null") } if parenOpen { f.Buffer.WriteByte(')') } } // ScanIsReverseFn is a callback that is used to figure out if a scan needs to // happen in reverse (the code lives in the ordering package, and depending on // that directly would be a dependency loop). var ScanIsReverseFn func(md *opt.Metadata, s *ScanPrivate, required *physical.OrderingChoice) bool // FormatPrivate outputs a description of the private to f.Buffer. func FormatPrivate(f *ExprFmtCtx, private interface{}, physProps *physical.Required) { if private == nil { return } switch t := private.(type) { case *opt.ColumnID: f.space() f.formatColSimple("" /* label */, *t) case *opt.ColList: for _, col := range *t { f.space() f.formatColSimple("" /* label */, col) } case *TupleOrdinal: fmt.Fprintf(f.Buffer, " %d", *t) case *ScanPrivate: f.formatIndex(t.Table, t.Index, ScanIsReverseFn(f.Memo.Metadata(), t, &physProps.Ordering)) case *SequenceSelectPrivate: seq := f.Memo.metadata.Sequence(t.Sequence) fmt.Fprintf(f.Buffer, " %s", seq.Name()) case *MutationPrivate: f.formatIndex(t.Table, cat.PrimaryIndex, false /* reverse */) case *OrdinalityPrivate: if !t.Ordering.Any() { fmt.Fprintf(f.Buffer, " ordering=%s", t.Ordering) } case *GroupingPrivate: fmt.Fprintf(f.Buffer, " cols=%s", t.GroupingCols.String()) if !t.Ordering.Any() { fmt.Fprintf(f.Buffer, ",ordering=%s", t.Ordering) } case *IndexJoinPrivate: tab := f.Memo.metadata.Table(t.Table) fmt.Fprintf(f.Buffer, " %s", tab.Name()) case *InvertedFilterPrivate: col := f.Memo.metadata.ColumnMeta(t.InvertedColumn) fmt.Fprintf(f.Buffer, " %s", col.Alias) case *LookupJoinPrivate: f.formatIndex(t.Table, t.Index, false /* reverse */) case *InvertedJoinPrivate: f.formatIndex(t.Table, t.Index, false /* reverse */) case *ValuesPrivate: fmt.Fprintf(f.Buffer, " id=v%d", t.ID) case *ZigzagJoinPrivate: f.formatIndex(t.LeftTable, t.LeftIndex, false /* reverse */) f.formatIndex(t.RightTable, t.RightIndex, false /* reverse */) case *MergeJoinPrivate: fmt.Fprintf(f.Buffer, " %s,%s,%s", t.JoinType, t.LeftEq, t.RightEq) case *FunctionPrivate: fmt.Fprintf(f.Buffer, " %s", t.Name) case *WindowsItemPrivate: fmt.Fprintf(f.Buffer, " frame=%q", &t.Frame) case *WindowPrivate: fmt.Fprintf(f.Buffer, " partition=%s", t.Partition) if !t.Ordering.Any() { fmt.Fprintf(f.Buffer, " ordering=%s", t.Ordering) } case *physical.OrderingChoice: if !t.Any() { fmt.Fprintf(f.Buffer, " ordering=%s", t) } case *OpaqueRelPrivate: f.space() f.Buffer.WriteString(t.Metadata.String()) case *AlterTableSplitPrivate: f.formatIndex(t.Table, t.Index, false /* reverse */) case *AlterTableRelocatePrivate: FormatPrivate(f, &t.AlterTableSplitPrivate, nil) if t.RelocateLease { f.Buffer.WriteString(" [lease]") } case *ControlJobsPrivate: fmt.Fprintf(f.Buffer, " (%s)", tree.JobCommandToStatement[t.Command]) case *CancelPrivate: if t.IfExists { f.Buffer.WriteString(" [if-exists]") } case *CreateViewPrivate: schema := f.Memo.Metadata().Schema(t.Schema) fmt.Fprintf(f.Buffer, " %s.%s", schema.Name(), t.ViewName) case *JoinPrivate: // Nothing to show; flags are shown separately. case *ExplainPrivate, *opt.ColSet, *SetPrivate, *types.T, *ExportPrivate: // Don't show anything, because it's mostly redundant. default: fmt.Fprintf(f.Buffer, " %v", private) } } // tableName returns the table name to be used for pretty-printing. If // ExprFmtHideQualifications is not set, the fully qualified table name is // returned. func tableName(f *ExprFmtCtx, tabID opt.TableID) string { tabMeta := f.Memo.metadata.TableMeta(tabID) if f.HasFlags(ExprFmtHideQualifications) { return string(tabMeta.Table.Name()) } tn, err := f.Catalog.FullyQualifiedName(context.TODO(), tabMeta.Table) if err != nil { panic(err) } return tn.FQString() } // isSimpleColumnName returns true if the given label consists of only ASCII // letters, numbers, underscores, quotation marks, and periods ("."). It is // used to determine whether to enclose a column name in quotation marks for // nicer display. func isSimpleColumnName(label string) bool { for i, r := range label { if r > unicode.MaxASCII { return false } if i == 0 { if r != '"' && !unicode.IsLetter(r) { // The first character must be a letter or quotation mark. return false } } else if r != '.' && r != '_' && r != '"' && !unicode.IsNumber(r) && !unicode.IsLetter(r) { return false } } return true }
pkg/sql/opt/memo/expr_format.go
1
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.9989699125289917, 0.02095780149102211, 0.000161991105414927, 0.00017336195742245764, 0.13610593974590302 ]
{ "id": 1, "code_window": [ "\t}\n", "\treturn true\n", "}\n", "\n", "// ColSetToList converts a column id set to a list, in column id order.\n", "func ColSetToList(set ColSet) ColList {\n", "\tres := make(ColList, 0, set.Len())\n", "\tset.ForEach(func(x ColumnID) {\n", "\t\tres = append(res, x)\n", "\t})\n", "\treturn res\n", "}\n", "\n", "// ColMap provides a 1:1 mapping from one column id to another. It is used by\n", "// operators that need to match columns from its inputs.\n", "type ColMap = util.FastIntMap\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/opt/column_meta.go", "type": "replace", "edit_start_line_idx": 89 }
# The following tests have results equivalent to Postgres (differences # in string representation and number of decimals returned, but otherwise # the same). These do not pass using the inf package. The inf package # (http://gopkg.in/inf.v0) is what we used to use, but it had various problems # (for example, all the test cases below), and was replaced with apd. # inf returns 0 query R SELECT (1.4238790346995263e-40::DECIMAL / 6.011482313728436e+41::DECIMAL) ---- 2.3685988919035999994E-82 # inf returns -108.4851126682386588 query R SELECT ln(7.682705743584112e-48::DECIMAL) ---- -108.48511266823882051 # inf returns 0 query R SELECT sqrt(9.789765531128956e-34::DECIMAL) ---- 3.1288601009199749773E-17 # inf returns 0.1547300000000000 query R SELECT pow(4.727998800941528e-14::DECIMAL, 0.06081860494226844::DECIMAL) ---- 0.15472926640705911955 # inf returns 0, 0 query RR SELECT pow(sqrt(1e-10::DECIMAL), 2), sqrt(pow(1e-5::DECIMAL, 2)) ---- 1E-10 0.00001 # inf returns 1e-16, 0, 2e-16 query RRR SELECT 1e-16::DECIMAL / 2, 1e-16::DECIMAL / 3, 1e-16::DECIMAL / 2 * 2 ---- 5E-17 3.3333333333333333333E-17 1.0E-16 # inf returns 1e-8, 0, 0, 0 query RRRR SELECT pow(1e-4::DECIMAL, 2), pow(1e-5::DECIMAL, 2), pow(1e-8::DECIMAL, 2), pow(1e-9::DECIMAL, 2) ---- 1E-8 1E-10 1E-16 1E-18 # inf returns argument too large query R SELECT pow(1e-10::DECIMAL, 2) ---- 1E-20 # inf panics (#13051) query RR SELECT 'NaN'::FLOAT::DECIMAL, 'NaN'::DECIMAL ---- NaN NaN # Ensure trailing zeros are kept for decimal types with no listed scale, # and enforced when the scale is listed. statement ok CREATE TABLE t (d decimal, v decimal(3, 1)) statement ok INSERT INTO t VALUES (0.000::decimal, 0.00::decimal), (1.00::decimal, 1.00::decimal), (2.0::decimal, 2.0::decimal), (3::decimal, 3::decimal) query RR SELECT * FROM t ORDER BY d ---- 0.000 0.0 1.00 1.0 2.0 2.0 3 3.0 # Ensure trailing zeros are kept in an index. statement ok CREATE TABLE t2 (d decimal, v decimal(3, 1), primary key (d, v)) statement ok INSERT INTO t2 VALUES (1.00::decimal, 1.00::decimal), (2.0::decimal, 2.0::decimal), (3::decimal, 3::decimal), ('NaN'::decimal, 'NaN'::decimal), ('Inf'::decimal, 'Inf'::decimal), ('-Inf'::decimal, '-Inf'::decimal), ('-0.0000'::decimal, '-0.0000'::decimal) query RR SELECT * FROM t2 ORDER BY d ---- NaN NaN -Infinity -Infinity 0.0000 0.0 1.00 1.0 2.0 2.0 3 3.0 Infinity Infinity # Ensure uniqueness in PK columns with +/- NaN and 0. statement error duplicate key value INSERT INTO t2 VALUES ('-NaN'::decimal, '-NaN'::decimal) statement error duplicate key value INSERT INTO t2 VALUES (0, 0) # Ensure NaN cannot be signaling or negative. query RRRR SELECT 'NaN'::decimal, '-NaN'::decimal, 'sNaN'::decimal, '-sNaN'::decimal ---- NaN NaN NaN NaN query RR SELECT * FROM t2 WHERE d IS NaN and v IS NaN ---- NaN NaN query RR SELECT * FROM t2 WHERE d = 'Infinity' and v = 'Infinity' ---- Infinity Infinity query RR SELECT * FROM t2 WHERE d = '-Infinity' and v = '-Infinity' ---- -Infinity -Infinity # Ensure special values are handled correctly. statement ok CREATE TABLE s (d decimal null, index (d)) statement ok INSERT INTO s VALUES (null), ('NaN'::decimal), ('-NaN'::decimal), ('Inf'::decimal), ('-Inf'::decimal), ('0'::decimal), (1), (-1) statement ok INSERT INTO s VALUES ('-0'::decimal), ('-0.0'::decimal), ('-0.00'::decimal), ('-0.00E-1'::decimal), ('-0.0E-3'::decimal) query R rowsort SELECT * FROM s WHERE d = 0 ---- 0 0 0.0 0.00 0.000 0.0000 query R SELECT * FROM s WHERE d IS NAN ---- NaN NaN query R SELECT * FROM s WHERE d = 'inf'::decimal ---- Infinity query R SELECT * FROM s WHERE d = 'NaN' ---- NaN NaN # In the following tests, the various zero values all compare equal to # each other so we must use two ORDER BY clauses to obtain a stable result. # Check the ordering of decimal values. query R SELECT d FROM s ORDER BY d, d::TEXT ---- NULL NaN NaN -Infinity -1 0 0 0.0 0.00 0.000 0.0000 1 Infinity # Just test the NaN-ness of the values. query RBBB SELECT d, d IS NaN, d = 'NaN', isnan(d) FROM s@{FORCE_INDEX=primary} ORDER BY d, d::TEXT ---- NULL NULL NULL NULL NaN true true true NaN true true true -Infinity false false false -1 false false false 0 false false false 0 false false false 0.0 false false false 0.00 false false false 0.000 false false false 0.0000 false false false 1 false false false Infinity false false false # Just test the NaN-ness of the values in secondary index query RBBB SELECT d, d IS NaN, d = 'NaN', isnan(d) FROM s@{FORCE_INDEX=s_d_idx} ORDER BY d, d::TEXT ---- NULL NULL NULL NULL NaN true true true NaN true true true -Infinity false false false -1 false false false 0 false false false 0 false false false 0.0 false false false 0.00 false false false 0.000 false false false 0.0000 false false false 1 false false false Infinity false false false query RB select d, d > 'NaN' from s@{FORCE_INDEX=primary} where d > 'NaN' ORDER BY d, d::TEXT ---- -Infinity true -1 true 0 true 0 true 0.0 true 0.00 true 0.000 true 0.0000 true 1 true Infinity true query RB select d, d > 'NaN' from s@{FORCE_INDEX=s_d_idx} where d > 'NaN' ORDER BY d, d::TEXT ---- -Infinity true -1 true 0 true 0 true 0.0 true 0.00 true 0.000 true 0.0000 true 1 true Infinity true # Verify that decimals don't lose trailing 0s even when used for an index. statement ok CREATE INDEX idx ON s (d) query R rowsort SELECT * FROM s@idx WHERE d = 0 ---- 0 0 0.0 0.00 0.000 0.0000 statement ok INSERT INTO s VALUES ('10'::decimal), ('10.0'::decimal), ('10.00'::decimal), ('10.000'::decimal), ('100000E-4'::decimal), ('1000000E-5'::decimal), ('1.0000000E+1'::decimal) query R rowsort SELECT * FROM s@primary WHERE d = 10 ---- 10 10.0 10.00 10.000 10.0000 10.00000 10.000000 query R rowsort SELECT * FROM s@idx WHERE d = 10 ---- 10 10.0 10.00 10.000 10.0000 10.00000 10.000000 query R SELECT 1.00::decimal(6,4) ---- 1.0000 statement error value with precision 6, scale 4 must round to an absolute value less than 10\^2 SELECT 101.00::decimal(6,4) statement error scale \(6\) must be between 0 and precision \(4\) SELECT 101.00::decimal(4,6) statement error value with precision 2, scale 2 must round to an absolute value less than 1 SELECT 1::decimal(2, 2) # Regression test for #16081 statement CREATE TABLE a (b DECIMAL) statement INSERT INTO a VALUES (142378208485490985369999605144727062141206925976498256305323716858805588894693616552055968571135475510700810219028167653516982373238641332965927953273383572708760984694356069974208844865675206339235758647159337463780100273189720943242182911961627806424621091859596571173867825568394327041453823674373002756096) query R SELECT * FROM a ---- 142378208485490985369999605144727062141206925976498256305323716858805588894693616552055968571135475510700810219028167653516982373238641332965927953273383572708760984694356069974208844865675206339235758647159337463780100273189720943242182911961627806424621091859596571173867825568394327041453823674373002756096 # Verify that NaNs are returned instead of invalid operation. query R SELECT 'inf'::decimal + '-inf'::decimal ---- NaN # Regression test for #40327 query R SELECT 1.0 / 'Infinity' + 2 FROM a; ---- 2.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 query R SELECT 2.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 ---- 2.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
pkg/sql/logictest/testdata/logic_test/decimal
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.005195521283894777, 0.0003076522843912244, 0.00016158077050931752, 0.0001685780007392168, 0.0008262073970399797 ]
{ "id": 1, "code_window": [ "\t}\n", "\treturn true\n", "}\n", "\n", "// ColSetToList converts a column id set to a list, in column id order.\n", "func ColSetToList(set ColSet) ColList {\n", "\tres := make(ColList, 0, set.Len())\n", "\tset.ForEach(func(x ColumnID) {\n", "\t\tres = append(res, x)\n", "\t})\n", "\treturn res\n", "}\n", "\n", "// ColMap provides a 1:1 mapping from one column id to another. It is used by\n", "// operators that need to match columns from its inputs.\n", "type ColMap = util.FastIntMap\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/opt/column_meta.go", "type": "replace", "edit_start_line_idx": 89 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. import React from "react"; import "./styles.styl"; interface IChipProps { title: string; type?: "green" | "lightgreen" | "grey" | "blue" | "lightblue" | "yellow"; } // tslint:disable-next-line: variable-name export const Chip: React.SFC <IChipProps> = ({ title, type }) => <span className={`Chip Chip--${type}`}>{title}</span>;
pkg/ui/src/views/app/components/chip/index.tsx
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.00021207451936788857, 0.0001886114478111267, 0.00017472532636020333, 0.00017903445404954255, 0.000016683914509485476 ]
{ "id": 1, "code_window": [ "\t}\n", "\treturn true\n", "}\n", "\n", "// ColSetToList converts a column id set to a list, in column id order.\n", "func ColSetToList(set ColSet) ColList {\n", "\tres := make(ColList, 0, set.Len())\n", "\tset.ForEach(func(x ColumnID) {\n", "\t\tres = append(res, x)\n", "\t})\n", "\treturn res\n", "}\n", "\n", "// ColMap provides a 1:1 mapping from one column id to another. It is used by\n", "// operators that need to match columns from its inputs.\n", "type ColMap = util.FastIntMap\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [], "file_path": "pkg/sql/opt/column_meta.go", "type": "replace", "edit_start_line_idx": 89 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package batcheval import ( "bytes" "context" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/ctpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/spanset" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/errors" ) func init() { RegisterReadWriteCommand(roachpb.Subsume, declareKeysSubsume, Subsume) } func declareKeysSubsume( _ *roachpb.RangeDescriptor, header roachpb.Header, req roachpb.Request, latchSpans, _ *spanset.SpanSet, ) { // Subsume must not run concurrently with any other command. It declares a // non-MVCC write over every addressable key in the range; this guarantees // that it conflicts with any other command because every command must declare // at least one addressable key. It does not, in fact, write any keys. // // We use the key bounds from the range descriptor in the request instead // of the current range descriptor. Either would be fine because we verify // that these match during the evaluation of the Subsume request. args := req.(*roachpb.SubsumeRequest) desc := args.RightDesc latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{ Key: desc.StartKey.AsRawKey(), EndKey: desc.EndKey.AsRawKey(), }) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{ Key: keys.MakeRangeKeyPrefix(desc.StartKey), EndKey: keys.MakeRangeKeyPrefix(desc.EndKey).PrefixEnd(), }) rangeIDPrefix := keys.MakeRangeIDReplicatedPrefix(desc.RangeID) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{ Key: rangeIDPrefix, EndKey: rangeIDPrefix.PrefixEnd(), }) } // Subsume freezes a range for merging with its left-hand neighbor. When called // correctly, it provides important guarantees that ensure there is no moment in // time where the ranges involved in the merge could both process commands for // the same keys. // // Specifically, the receiving replica guarantees that: // // 1. it is the leaseholder at the time the request executes, // 2. when it responds, there are no commands in flight with a timestamp // greater than the FreezeStart timestamp provided in the response, // 3. the MVCC statistics in the response reflect the latest writes, // 4. it, and all future leaseholders for the range, will not process another // command until they refresh their range descriptor with a consistent read // from meta2, and // 5. if it or any future leaseholder for the range finds that its range // descriptor has been deleted, it self destructs. // // To achieve guarantees four and five, when issuing a Subsume request, the // caller must have a merge transaction open that has already placed deletion // intents on both the local and meta2 copy of the right-hand range descriptor. // The intent on the meta2 allows the leaseholder to block until the merge // transaction completes by performing a consistent read for its meta2 // descriptor. The intent on the local descriptor allows future leaseholders to // efficiently check whether a merge is in progress by performing a read of its // local descriptor after acquiring the lease. // // The period of time after intents have been placed but before the merge // transaction is complete is called the merge's "critical phase". func Subsume( ctx context.Context, readWriter storage.ReadWriter, cArgs CommandArgs, resp roachpb.Response, ) (result.Result, error) { args := cArgs.Args.(*roachpb.SubsumeRequest) reply := resp.(*roachpb.SubsumeResponse) // Verify that the Subsume request was sent to the correct range and that // the range's bounds have not changed during the merge transaction. desc := cArgs.EvalCtx.Desc() if !bytes.Equal(desc.StartKey, args.RightDesc.StartKey) || !bytes.Equal(desc.EndKey, args.RightDesc.EndKey) { return result.Result{}, errors.Errorf("RHS range bounds do not match: %s != %s", args.RightDesc, desc) } // Sanity check that the requesting range is our left neighbor. The ordering // of operations in the AdminMerge transaction should make it impossible for // these ranges to be nonadjacent, but double check. if !bytes.Equal(args.LeftDesc.EndKey, desc.StartKey) { return result.Result{}, errors.Errorf("ranges are not adjacent: %s != %s", args.LeftDesc.EndKey, desc.StartKey) } // Sanity check the caller has initiated a merge transaction by checking for // a deletion intent on the local range descriptor. descKey := keys.RangeDescriptorKey(desc.StartKey) _, intent, err := storage.MVCCGet(ctx, readWriter, descKey, cArgs.Header.Timestamp, storage.MVCCGetOptions{Inconsistent: true}) if err != nil { return result.Result{}, errors.Errorf("fetching local range descriptor: %s", err) } else if intent == nil { return result.Result{}, errors.New("range missing intent on its local descriptor") } val, _, err := storage.MVCCGetAsTxn(ctx, readWriter, descKey, cArgs.Header.Timestamp, intent.Txn) if err != nil { return result.Result{}, errors.Errorf("fetching local range descriptor as txn: %s", err) } else if val != nil { return result.Result{}, errors.New("non-deletion intent on local range descriptor") } // We prevent followers of the RHS from being able to serve follower reads on // timestamps that fall in the timestamp window representing the range's // subsumed state (i.e. between the subsumption time (FreezeStart) and the // timestamp at which the merge transaction commits or aborts), by requiring // follower replicas to catch up to an MLAI that succeeds the range's current // LeaseAppliedIndex (note that we're tracking lai + 1 below instead of lai). // In case the merge successfully commits, this MLAI will never be caught up // to since the RHS will be destroyed. In case the merge aborts, this ensures // that the followers can only activate the newer closed timestamps once they // catch up to the LAI associated with the merge abort. We need to do this // because the closed timestamps that are broadcast by RHS in this subsumed // state are not going to be reflected in the timestamp cache of the LHS range // after the merge, which can cause a serializability violation. // // Note that we are essentially lying to the closed timestamp tracker here in // order to achieve the effect of unactionable closed timestamp updates until // the merge concludes. Tracking lai + 1 here ensures that the follower // replicas need to catch up to at least that index before they are able to // activate _any of the closed timestamps from this point onwards_. In other // words, we will never publish a closed timestamp update for this range below // this lai, regardless of whether a different proposal untracks a lower lai // at any point in the future. // // NB: The above statement relies on the invariant that the LAI that follows a // Subsume request will be applied only after the merge aborts. More // specifically, this means that no intervening request can bump the LAI of // range while it is subsumed. This invariant is upheld because the only Raft // proposals allowed after a range has been subsumed are lease requests, which // do not bump the LAI. In case there is lease transfer on this range while it // is subsumed, we ensure that the initial MLAI update broadcast by the new // leaseholder respects the invariant in question, in much the same way we do // here. Take a look at `EmitMLAI()` in replica_closedts.go for more details. _, untrack := cArgs.EvalCtx.GetTracker().Track(ctx) lease, _ := cArgs.EvalCtx.GetLease() lai := cArgs.EvalCtx.GetLeaseAppliedIndex() untrack(ctx, ctpb.Epoch(lease.Epoch), desc.RangeID, ctpb.LAI(lai+1)) // NOTE: the deletion intent on the range's meta2 descriptor is just as // important to correctness as the deletion intent on the local descriptor, // but the check is too expensive as it would involve a network roundtrip on // most nodes. reply.MVCCStats = cArgs.EvalCtx.GetMVCCStats() reply.LeaseAppliedIndex = lai reply.FreezeStart = cArgs.EvalCtx.Clock().Now() return result.Result{ Local: result.LocalResult{FreezeStart: reply.FreezeStart}, }, nil }
pkg/kv/kvserver/batcheval/cmd_subsume.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.00017957590171135962, 0.00017119626863859594, 0.00016106714610941708, 0.00017116722301580012, 0.0000036918609112035483 ]
{ "id": 3, "code_window": [ "\n", "\tdefault:\n", "\t\t// Fall back to writing output columns in column id order.\n", "\t\tcolList = opt.ColSetToList(e.Relational().OutputCols)\n", "\t}\n", "\n", "\tf.formatColumns(e, tp, colList, required.Presentation)\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tcolList = e.Relational().OutputCols.ToList()\n" ], "file_path": "pkg/sql/opt/memo/expr_format.go", "type": "replace", "edit_start_line_idx": 284 }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package xform import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/ordering" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" ) // LimitScanPrivate constructs a new ScanPrivate value that is based on the // given ScanPrivate. The new private's HardLimit is set to the given limit, // which must be a constant int datum value. The other fields are inherited from // the existing private. func (c *CustomFuncs) LimitScanPrivate( scanPrivate *memo.ScanPrivate, limit tree.Datum, required physical.OrderingChoice, ) *memo.ScanPrivate { // Determine the scan direction necessary to provide the required ordering. _, reverse := ordering.ScanPrivateCanProvide(c.e.mem.Metadata(), scanPrivate, &required) newScanPrivate := *scanPrivate newScanPrivate.HardLimit = memo.MakeScanLimit(int64(*limit.(*tree.DInt)), reverse) return &newScanPrivate } // CanLimitFilteredScan returns true if the given scan has not already been // limited, and is constrained or scans a partial index. This is only possible // when the required ordering of the rows to be limited can be satisfied by the // Scan operator. // // NOTE: Limiting unconstrained, non-partial index scans is done by the // GenerateLimitedScans rule, since that can require IndexJoin operators // to be generated. func (c *CustomFuncs) CanLimitFilteredScan( scanPrivate *memo.ScanPrivate, required physical.OrderingChoice, ) bool { if scanPrivate.HardLimit != 0 { // Don't push limit into scan if scan is already limited. This would // usually only happen when normalizations haven't run, as otherwise // redundant Limit operators would be discarded. return false } md := c.e.mem.Metadata() if scanPrivate.Constraint == nil && scanPrivate.PartialIndexPredicate(md) == nil { // This is not a constrained scan nor a partial index scan, so skip it. // The GenerateLimitedScans rule is responsible for limited // unconstrained scans on non-partial indexes. return false } ok, _ := ordering.ScanPrivateCanProvide(c.e.mem.Metadata(), scanPrivate, &required) return ok } // GenerateLimitedScans enumerates all non-inverted and non-partial secondary // indexes on the Scan operator's table and tries to create new limited Scan // operators from them. Since this only needs to be done once per table, // GenerateLimitedScans should only be called on the original unaltered primary // index Scan operator (i.e. not constrained or limited). // // For a secondary index that "covers" the columns needed by the scan, a single // limited Scan operator is created. For a non-covering index, an IndexJoin is // constructed to add missing columns to the limited Scan. // // Inverted index scans are not guaranteed to produce a specific number // of result rows because they contain multiple entries for a single row // indexed. Therefore, they cannot be considered for limited scans. // // Partial indexes do not index every row in the table and they can only be used // in cases where a query filter implies the partial index predicate. // GenerateLimitedScans deals with limits, but no filters, so it cannot generate // limited partial index scans. Limiting partial indexes is done by the // PushLimitIntoFilteredScans rule. func (c *CustomFuncs) GenerateLimitedScans( grp memo.RelExpr, scanPrivate *memo.ScanPrivate, limit tree.Datum, required physical.OrderingChoice, ) { limitVal := int64(*limit.(*tree.DInt)) var sb indexScanBuilder sb.init(c, scanPrivate.Table) // Iterate over all non-inverted, non-partial indexes, looking for those // that can be limited. var iter scanIndexIter iter.Init(c.e.mem, &c.im, scanPrivate, nil /* filters */, rejectInvertedIndexes|rejectPartialIndexes) iter.ForEach(func(index cat.Index, filters memo.FiltersExpr, indexCols opt.ColSet, isCovering bool) { newScanPrivate := *scanPrivate newScanPrivate.Index = index.Ordinal() // If the alternate index does not conform to the ordering, then skip it. // If reverse=true, then the scan needs to be in reverse order to match // the required ordering. ok, reverse := ordering.ScanPrivateCanProvide( c.e.mem.Metadata(), &newScanPrivate, &required, ) if !ok { return } newScanPrivate.HardLimit = memo.MakeScanLimit(limitVal, reverse) // If the alternate index includes the set of needed columns, then construct // a new Scan operator using that index. if isCovering { sb.setScan(&newScanPrivate) sb.build(grp) return } // Otherwise, try to construct an IndexJoin operator that provides the // columns missing from the index. if scanPrivate.Flags.NoIndexJoin { return } // Scan whatever columns we need which are available from the index, plus // the PK columns. newScanPrivate.Cols = indexCols.Intersection(scanPrivate.Cols) newScanPrivate.Cols.UnionWith(sb.primaryKeyCols()) sb.setScan(&newScanPrivate) // The Scan operator will go into its own group (because it projects a // different set of columns), and the IndexJoin operator will be added to // the same group as the original Limit operator. sb.addIndexJoin(scanPrivate.Cols) sb.build(grp) }) } // ScanIsLimited returns true if the scan operator with the given ScanPrivate is // limited. func (c *CustomFuncs) ScanIsLimited(sp *memo.ScanPrivate) bool { return sp.HardLimit != 0 } // ScanIsInverted returns true if the index of the given ScanPrivate is an // inverted index. func (c *CustomFuncs) ScanIsInverted(sp *memo.ScanPrivate) bool { md := c.e.mem.Metadata() idx := md.Table(sp.Table).Index(sp.Index) return idx.IsInverted() } // SplitScanIntoUnionScans returns a Union of Scan operators with hard limits // that each scan over a single key from the original scan's constraints. This // is beneficial in cases where the original scan had to scan over many rows but // had relatively few keys to scan over. // TODO(drewk): handle inverted scans. func (c *CustomFuncs) SplitScanIntoUnionScans( limitOrdering physical.OrderingChoice, scan memo.RelExpr, sp *memo.ScanPrivate, limit tree.Datum, ) memo.RelExpr { const maxScanCount = 16 const threshold = 4 cons, ok := c.getKnownScanConstraint(sp) if !ok { // No valid constraint was found. return nil } keyCtx := constraint.MakeKeyContext(&cons.Columns, c.e.evalCtx) limitVal := int(*limit.(*tree.DInt)) spans := cons.Spans // Retrieve the number of keys in the spans. keyCount, ok := spans.KeyCount(&keyCtx) if !ok { return nil } if keyCount <= 1 { // We need more than one key in order to split the existing Scan into // multiple Scans. return nil } if int(keyCount) > maxScanCount { // The number of new Scans created would exceed maxScanCount. return nil } // Check that the number of rows scanned by the new plan will be smaller than // the number scanned by the old plan by at least a factor of "threshold". if float64(int(keyCount)*limitVal*threshold) >= scan.Relational().Stats.RowCount { // Splitting the scan may not be worth the overhead; creating a sequence of // scans unioned together is expensive, so we don't want to create the plan // only for the optimizer to use something else. We only want to create the // plan if it is likely to be used. return nil } // Retrieve the length of the keys. All keys are required to be the same // length (this will be checked later) so we can simply use the length of the // first key. keyLength := spans.Get(0).StartKey().Length() // If the index ordering has a prefix of columns of length keyLength followed // by the limitOrdering columns, the scan can be split. Otherwise, return nil. hasLimitOrderingSeq, reverse := indexHasOrderingSequence( c.e.mem.Metadata(), scan, sp, limitOrdering, keyLength) if !hasLimitOrderingSeq { return nil } // Construct a hard limit for the new scans using the result of // hasLimitOrderingSeq. newHardLimit := memo.MakeScanLimit(int64(limitVal), reverse) // Construct a new Spans object containing a new Span for each key in the // original Scan's spans. newSpans, ok := spans.ExtractSingleKeySpans(&keyCtx, maxScanCount) if !ok { // Single key spans could not be created. return nil } // Construct a new ScanExpr for each span and union them all together. We // output the old ColumnIDs from each union. oldColList := opt.ColSetToList(scan.Relational().OutputCols) last := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(0)) for i, cnt := 1, newSpans.Count(); i < cnt; i++ { newScan := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(i)) last = c.e.f.ConstructUnion(last, newScan, &memo.SetPrivate{ LeftCols: opt.ColSetToList(last.Relational().OutputCols), RightCols: opt.ColSetToList(newScan.Relational().OutputCols), OutCols: oldColList, }) } return last } // indexHasOrderingSequence returns whether the scan can provide a given // ordering under the assumption that we are scanning a single-key span with the // given keyLength (and if so, whether we need to scan it in reverse). // For example: // // index: +1/-2/+3, // limitOrdering: -2/+3, // keyLength: 1, // => // hasSequence: True, reverse: False // // index: +1/-2/+3, // limitOrdering: +2/-3, // keyLength: 1, // => // hasSequence: True, reverse: True // // index: +1/-2/+3/+4, // limitOrdering: +3/+4, // keyLength: 1, // => // hasSequence: False, reverse: False // func indexHasOrderingSequence( md *opt.Metadata, scan memo.RelExpr, sp *memo.ScanPrivate, limitOrdering physical.OrderingChoice, keyLength int, ) (hasSequence, reverse bool) { tableMeta := md.TableMeta(sp.Table) index := tableMeta.Table.Index(sp.Index) if keyLength > index.ColumnCount() { // The key contains more columns than the index. The limit ordering sequence // cannot be part of the index ordering. return false, false } // Create a copy of the Scan's FuncDepSet, and add the first 'keyCount' // columns from the index as constant columns. The columns are constant // because the span contains only a single key on those columns. var fds props.FuncDepSet fds.CopyFrom(&scan.Relational().FuncDeps) prefixCols := opt.ColSet{} for i := 0; i < keyLength; i++ { col := sp.Table.IndexColumnID(index, i) prefixCols.Add(col) } fds.AddConstants(prefixCols) // Use fds to simplify a copy of the limit ordering; the prefix columns will // become part of the optional ColSet. requiredOrdering := limitOrdering.Copy() requiredOrdering.Simplify(&fds) // If the ScanPrivate can satisfy requiredOrdering, it must return columns // ordered by a prefix of length keyLength, followed by the columns of // limitOrdering. return ordering.ScanPrivateCanProvide(md, sp, &requiredOrdering) } // makeNewScan constructs a new Scan operator with a new TableID and the given // limit and span. All ColumnIDs and references to those ColumnIDs are // replaced with new ones from the new TableID. All other fields are simply // copied from the old ScanPrivate. func (c *CustomFuncs) makeNewScan( sp *memo.ScanPrivate, columns constraint.Columns, newHardLimit memo.ScanLimit, span *constraint.Span, ) memo.RelExpr { newScanPrivate := c.DuplicateScanPrivate(sp) // duplicateScanPrivate does not initialize the Constraint or HardLimit // fields, so we do that now. newScanPrivate.HardLimit = newHardLimit // Construct the new Constraint field with the given span and remapped // ordering columns. var newSpans constraint.Spans newSpans.InitSingleSpan(span) newConstraint := &constraint.Constraint{ Columns: columns.RemapColumns(sp.Table, newScanPrivate.Table), Spans: newSpans, } newScanPrivate.Constraint = newConstraint return c.e.f.ConstructScan(newScanPrivate) } // getKnownScanConstraint returns a Constraint that is known to hold true for // the output of the Scan operator with the given ScanPrivate. If the // ScanPrivate has a Constraint, the scan Constraint is returned. Otherwise, an // effort is made to retrieve a Constraint from the underlying table's check // constraints. getKnownScanConstraint assumes that the scan is not inverted. func (c *CustomFuncs) getKnownScanConstraint( sp *memo.ScanPrivate, ) (cons *constraint.Constraint, found bool) { if sp.Constraint != nil { // The ScanPrivate has a constraint, so return it. cons = sp.Constraint } else { // Build a constraint set with the check constraints of the underlying // table. filters := c.checkConstraintFilters(sp.Table) instance := c.initIdxConstraintForIndex( nil, /* requiredFilters */ filters, sp.Table, sp.Index, false, /* isInverted */ ) cons = instance.Constraint() } return cons, !cons.IsUnconstrained() }
pkg/sql/opt/xform/limit_funcs.go
1
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.00090082420501858, 0.00022350714425556362, 0.00016053019498940557, 0.00016924944065976888, 0.00016420769679825753 ]
{ "id": 3, "code_window": [ "\n", "\tdefault:\n", "\t\t// Fall back to writing output columns in column id order.\n", "\t\tcolList = opt.ColSetToList(e.Relational().OutputCols)\n", "\t}\n", "\n", "\tf.formatColumns(e, tp, colList, required.Presentation)\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tcolList = e.Relational().OutputCols.ToList()\n" ], "file_path": "pkg/sql/opt/memo/expr_format.go", "type": "replace", "edit_start_line_idx": 284 }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package txnwait import ( "time" "github.com/cockroachdb/cockroach/pkg/util/metric" ) // Metrics contains all the txnqueue related metrics. type Metrics struct { PusheeWaiting *metric.Gauge PusherWaiting *metric.Gauge QueryWaiting *metric.Gauge PusherSlow *metric.Gauge PusherWaitTime *metric.Histogram QueryWaitTime *metric.Histogram DeadlocksTotal *metric.Counter } // NewMetrics creates a new Metrics instance with all related metric fields. func NewMetrics(histogramWindowInterval time.Duration) *Metrics { return &Metrics{ PusheeWaiting: metric.NewGauge( metric.Metadata{ Name: "txnwaitqueue.pushee.waiting", Help: "Number of pushees on the txn wait queue", Measurement: "Waiting Pushees", Unit: metric.Unit_COUNT, }, ), PusherWaiting: metric.NewGauge( metric.Metadata{ Name: "txnwaitqueue.pusher.waiting", Help: "Number of pushers on the txn wait queue", Measurement: "Waiting Pushers", Unit: metric.Unit_COUNT, }, ), QueryWaiting: metric.NewGauge( metric.Metadata{ Name: "txnwaitqueue.query.waiting", Help: "Number of transaction status queries waiting for an updated transaction record", Measurement: "Waiting Queries", Unit: metric.Unit_COUNT, }, ), PusherSlow: metric.NewGauge( metric.Metadata{ Name: "txnwaitqueue.pusher.slow", Help: "The total number of cases where a pusher waited more than the excessive wait threshold", Measurement: "Slow Pushers", Unit: metric.Unit_COUNT, }, ), PusherWaitTime: metric.NewHistogram( metric.Metadata{ Name: "txnwaitqueue.pusher.wait_time", Help: "Histogram of durations spent in queue by pushers", Measurement: "Pusher wait time", Unit: metric.Unit_NANOSECONDS, }, histogramWindowInterval, time.Hour.Nanoseconds(), 1, ), QueryWaitTime: metric.NewHistogram( metric.Metadata{ Name: "txnwaitqueue.query.wait_time", Help: "Histogram of durations spent in queue by queries", Measurement: "Query wait time", Unit: metric.Unit_NANOSECONDS, }, histogramWindowInterval, time.Hour.Nanoseconds(), 1, ), DeadlocksTotal: metric.NewCounter( metric.Metadata{ Name: "txnwaitqueue.deadlocks_total", Help: "Number of deadlocks detected by the txn wait queue", Measurement: "Deadlocks", Unit: metric.Unit_COUNT, }, ), } }
pkg/kv/kvserver/txnwait/metrics.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.00018138937593903393, 0.0001736736303428188, 0.0001673679507803172, 0.00017363335064146668, 0.000004090904894837877 ]
{ "id": 3, "code_window": [ "\n", "\tdefault:\n", "\t\t// Fall back to writing output columns in column id order.\n", "\t\tcolList = opt.ColSetToList(e.Relational().OutputCols)\n", "\t}\n", "\n", "\tf.formatColumns(e, tp, colList, required.Presentation)\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tcolList = e.Relational().OutputCols.ToList()\n" ], "file_path": "pkg/sql/opt/memo/expr_format.go", "type": "replace", "edit_start_line_idx": 284 }
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: util/tracing/tracingpb/recorded_span.proto package tracingpb import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" import types "github.com/gogo/protobuf/types" import time "time" import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf var _ = time.Kitchen // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // LogRecord is a log message recorded in a traced span. type LogRecord struct { // Time of the log record. Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` // Fields with values converted to strings. Fields []LogRecord_Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields"` } func (m *LogRecord) Reset() { *m = LogRecord{} } func (m *LogRecord) String() string { return proto.CompactTextString(m) } func (*LogRecord) ProtoMessage() {} func (*LogRecord) Descriptor() ([]byte, []int) { return fileDescriptor_recorded_span_ac7a781f9ecf6725, []int{0} } func (m *LogRecord) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } func (dst *LogRecord) XXX_Merge(src proto.Message) { xxx_messageInfo_LogRecord.Merge(dst, src) } func (m *LogRecord) XXX_Size() int { return m.Size() } func (m *LogRecord) XXX_DiscardUnknown() { xxx_messageInfo_LogRecord.DiscardUnknown(m) } var xxx_messageInfo_LogRecord proto.InternalMessageInfo type LogRecord_Field struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } func (m *LogRecord_Field) Reset() { *m = LogRecord_Field{} } func (m *LogRecord_Field) String() string { return proto.CompactTextString(m) } func (*LogRecord_Field) ProtoMessage() {} func (*LogRecord_Field) Descriptor() ([]byte, []int) { return fileDescriptor_recorded_span_ac7a781f9ecf6725, []int{0, 0} } func (m *LogRecord_Field) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *LogRecord_Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } func (dst *LogRecord_Field) XXX_Merge(src proto.Message) { xxx_messageInfo_LogRecord_Field.Merge(dst, src) } func (m *LogRecord_Field) XXX_Size() int { return m.Size() } func (m *LogRecord_Field) XXX_DiscardUnknown() { xxx_messageInfo_LogRecord_Field.DiscardUnknown(m) } var xxx_messageInfo_LogRecord_Field proto.InternalMessageInfo // RecordedSpan is the data recorded by a trace span. It // needs to be able to cross RPC boundaries so that the // complete recording of the trace can be constructed. type RecordedSpan struct { // ID of the trace; spans that are part of the same hierarchy share // the same trace ID. TraceID uint64 `protobuf:"varint,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` // ID of the span. SpanID uint64 `protobuf:"varint,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` // Span ID of the parent span. ParentSpanID uint64 `protobuf:"varint,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` // Operation name. Operation string `protobuf:"bytes,4,opt,name=operation,proto3" json:"operation,omitempty"` // Baggage items get passed from parent to child spans (even through gRPC). // Notably, verbose tracing uses a special baggage item. Baggage map[string]string `protobuf:"bytes,5,rep,name=baggage,proto3" json:"baggage,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Tags associated with the span. Tags map[string]string `protobuf:"bytes,6,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Time when the span was started. StartTime time.Time `protobuf:"bytes,7,opt,name=start_time,json=startTime,proto3,stdtime" json:"start_time"` // Duration is the span's duration, measured from start to Finish(). // // A spans whose recording is collected before it's finished will have the // duration set as the time of collection - start_time. Such a span will have // an "unfinished" tag. Duration time.Duration `protobuf:"bytes,8,opt,name=duration,proto3,stdduration" json:"duration"` // Events logged in the span. Logs []LogRecord `protobuf:"bytes,9,rep,name=logs,proto3" json:"logs"` // Stats collected in this span. Stats *types.Any `protobuf:"bytes,10,opt,name=stats,proto3" json:"stats,omitempty"` } func (m *RecordedSpan) Reset() { *m = RecordedSpan{} } func (*RecordedSpan) ProtoMessage() {} func (*RecordedSpan) Descriptor() ([]byte, []int) { return fileDescriptor_recorded_span_ac7a781f9ecf6725, []int{1} } func (m *RecordedSpan) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RecordedSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } func (dst *RecordedSpan) XXX_Merge(src proto.Message) { xxx_messageInfo_RecordedSpan.Merge(dst, src) } func (m *RecordedSpan) XXX_Size() int { return m.Size() } func (m *RecordedSpan) XXX_DiscardUnknown() { xxx_messageInfo_RecordedSpan.DiscardUnknown(m) } var xxx_messageInfo_RecordedSpan proto.InternalMessageInfo // NormalizedSpan is a representation of a RecordedSpan from a trace with all // its children embedded, recursively. This JSON serialization of this proto is // used in the system.statement_diagnostics.trace column. // // See RecordedSpan for the description of the fields. type NormalizedSpan struct { Operation string `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"` Tags map[string]string `protobuf:"bytes,2,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` StartTime time.Time `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3,stdtime" json:"start_time"` Duration time.Duration `protobuf:"bytes,4,opt,name=duration,proto3,stdduration" json:"duration"` Logs []LogRecord `protobuf:"bytes,5,rep,name=logs,proto3" json:"logs"` Children []NormalizedSpan `protobuf:"bytes,6,rep,name=children,proto3" json:"children"` } func (m *NormalizedSpan) Reset() { *m = NormalizedSpan{} } func (m *NormalizedSpan) String() string { return proto.CompactTextString(m) } func (*NormalizedSpan) ProtoMessage() {} func (*NormalizedSpan) Descriptor() ([]byte, []int) { return fileDescriptor_recorded_span_ac7a781f9ecf6725, []int{2} } func (m *NormalizedSpan) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *NormalizedSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } func (dst *NormalizedSpan) XXX_Merge(src proto.Message) { xxx_messageInfo_NormalizedSpan.Merge(dst, src) } func (m *NormalizedSpan) XXX_Size() int { return m.Size() } func (m *NormalizedSpan) XXX_DiscardUnknown() { xxx_messageInfo_NormalizedSpan.DiscardUnknown(m) } var xxx_messageInfo_NormalizedSpan proto.InternalMessageInfo func init() { proto.RegisterType((*LogRecord)(nil), "cockroach.util.tracing.tracingpb.LogRecord") proto.RegisterType((*LogRecord_Field)(nil), "cockroach.util.tracing.tracingpb.LogRecord.Field") proto.RegisterType((*RecordedSpan)(nil), "cockroach.util.tracing.tracingpb.RecordedSpan") proto.RegisterMapType((map[string]string)(nil), "cockroach.util.tracing.tracingpb.RecordedSpan.BaggageEntry") proto.RegisterMapType((map[string]string)(nil), "cockroach.util.tracing.tracingpb.RecordedSpan.TagsEntry") proto.RegisterType((*NormalizedSpan)(nil), "cockroach.util.tracing.tracingpb.NormalizedSpan") proto.RegisterMapType((map[string]string)(nil), "cockroach.util.tracing.tracingpb.NormalizedSpan.TagsEntry") } func (m *LogRecord) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *LogRecord) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } i += n1 if len(m.Fields) > 0 { for _, msg := range m.Fields { dAtA[i] = 0x12 i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } return i, nil } func (m *LogRecord_Field) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *LogRecord_Field) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Key) > 0 { dAtA[i] = 0xa i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if len(m.Value) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } return i, nil } func (m *RecordedSpan) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RecordedSpan) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.TraceID != 0 { dAtA[i] = 0x8 i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(m.TraceID)) } if m.SpanID != 0 { dAtA[i] = 0x10 i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(m.SpanID)) } if m.ParentSpanID != 0 { dAtA[i] = 0x18 i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(m.ParentSpanID)) } if len(m.Operation) > 0 { dAtA[i] = 0x22 i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(len(m.Operation))) i += copy(dAtA[i:], m.Operation) } if len(m.Baggage) > 0 { keysForBaggage := make([]string, 0, len(m.Baggage)) for k := range m.Baggage { keysForBaggage = append(keysForBaggage, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForBaggage) for _, k := range keysForBaggage { dAtA[i] = 0x2a i++ v := m.Baggage[string(k)] mapSize := 1 + len(k) + sovRecordedSpan(uint64(len(k))) + 1 + len(v) + sovRecordedSpan(uint64(len(v))) i = encodeVarintRecordedSpan(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(len(k))) i += copy(dAtA[i:], k) dAtA[i] = 0x12 i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(len(v))) i += copy(dAtA[i:], v) } } if len(m.Tags) > 0 { keysForTags := make([]string, 0, len(m.Tags)) for k := range m.Tags { keysForTags = append(keysForTags, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForTags) for _, k := range keysForTags { dAtA[i] = 0x32 i++ v := m.Tags[string(k)] mapSize := 1 + len(k) + sovRecordedSpan(uint64(len(k))) + 1 + len(v) + sovRecordedSpan(uint64(len(v))) i = encodeVarintRecordedSpan(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(len(k))) i += copy(dAtA[i:], k) dAtA[i] = 0x12 i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(len(v))) i += copy(dAtA[i:], v) } } dAtA[i] = 0x3a i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTime))) n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTime, dAtA[i:]) if err != nil { return 0, err } i += n2 dAtA[i] = 0x42 i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(m.Duration))) n3, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Duration, dAtA[i:]) if err != nil { return 0, err } i += n3 if len(m.Logs) > 0 { for _, msg := range m.Logs { dAtA[i] = 0x4a i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.Stats != nil { dAtA[i] = 0x52 i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(m.Stats.Size())) n4, err := m.Stats.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n4 } return i, nil } func (m *NormalizedSpan) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *NormalizedSpan) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Operation) > 0 { dAtA[i] = 0xa i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(len(m.Operation))) i += copy(dAtA[i:], m.Operation) } if len(m.Tags) > 0 { keysForTags := make([]string, 0, len(m.Tags)) for k := range m.Tags { keysForTags = append(keysForTags, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForTags) for _, k := range keysForTags { dAtA[i] = 0x12 i++ v := m.Tags[string(k)] mapSize := 1 + len(k) + sovRecordedSpan(uint64(len(k))) + 1 + len(v) + sovRecordedSpan(uint64(len(v))) i = encodeVarintRecordedSpan(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(len(k))) i += copy(dAtA[i:], k) dAtA[i] = 0x12 i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(len(v))) i += copy(dAtA[i:], v) } } dAtA[i] = 0x1a i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTime))) n5, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTime, dAtA[i:]) if err != nil { return 0, err } i += n5 dAtA[i] = 0x22 i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(m.Duration))) n6, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Duration, dAtA[i:]) if err != nil { return 0, err } i += n6 if len(m.Logs) > 0 { for _, msg := range m.Logs { dAtA[i] = 0x2a i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if len(m.Children) > 0 { for _, msg := range m.Children { dAtA[i] = 0x32 i++ i = encodeVarintRecordedSpan(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } return i, nil } func encodeVarintRecordedSpan(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func (m *LogRecord) Size() (n int) { if m == nil { return 0 } var l int _ = l l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) n += 1 + l + sovRecordedSpan(uint64(l)) if len(m.Fields) > 0 { for _, e := range m.Fields { l = e.Size() n += 1 + l + sovRecordedSpan(uint64(l)) } } return n } func (m *LogRecord_Field) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Key) if l > 0 { n += 1 + l + sovRecordedSpan(uint64(l)) } l = len(m.Value) if l > 0 { n += 1 + l + sovRecordedSpan(uint64(l)) } return n } func (m *RecordedSpan) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.TraceID != 0 { n += 1 + sovRecordedSpan(uint64(m.TraceID)) } if m.SpanID != 0 { n += 1 + sovRecordedSpan(uint64(m.SpanID)) } if m.ParentSpanID != 0 { n += 1 + sovRecordedSpan(uint64(m.ParentSpanID)) } l = len(m.Operation) if l > 0 { n += 1 + l + sovRecordedSpan(uint64(l)) } if len(m.Baggage) > 0 { for k, v := range m.Baggage { _ = k _ = v mapEntrySize := 1 + len(k) + sovRecordedSpan(uint64(len(k))) + 1 + len(v) + sovRecordedSpan(uint64(len(v))) n += mapEntrySize + 1 + sovRecordedSpan(uint64(mapEntrySize)) } } if len(m.Tags) > 0 { for k, v := range m.Tags { _ = k _ = v mapEntrySize := 1 + len(k) + sovRecordedSpan(uint64(len(k))) + 1 + len(v) + sovRecordedSpan(uint64(len(v))) n += mapEntrySize + 1 + sovRecordedSpan(uint64(mapEntrySize)) } } l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTime) n += 1 + l + sovRecordedSpan(uint64(l)) l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Duration) n += 1 + l + sovRecordedSpan(uint64(l)) if len(m.Logs) > 0 { for _, e := range m.Logs { l = e.Size() n += 1 + l + sovRecordedSpan(uint64(l)) } } if m.Stats != nil { l = m.Stats.Size() n += 1 + l + sovRecordedSpan(uint64(l)) } return n } func (m *NormalizedSpan) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Operation) if l > 0 { n += 1 + l + sovRecordedSpan(uint64(l)) } if len(m.Tags) > 0 { for k, v := range m.Tags { _ = k _ = v mapEntrySize := 1 + len(k) + sovRecordedSpan(uint64(len(k))) + 1 + len(v) + sovRecordedSpan(uint64(len(v))) n += mapEntrySize + 1 + sovRecordedSpan(uint64(mapEntrySize)) } } l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTime) n += 1 + l + sovRecordedSpan(uint64(l)) l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Duration) n += 1 + l + sovRecordedSpan(uint64(l)) if len(m.Logs) > 0 { for _, e := range m.Logs { l = e.Size() n += 1 + l + sovRecordedSpan(uint64(l)) } } if len(m.Children) > 0 { for _, e := range m.Children { l = e.Size() n += 1 + l + sovRecordedSpan(uint64(l)) } } return n } func sovRecordedSpan(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } func sozRecordedSpan(x uint64) (n int) { return sovRecordedSpan(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *LogRecord) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: LogRecord: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: LogRecord: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Fields = append(m.Fields, LogRecord_Field{}) if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRecordedSpan(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthRecordedSpan } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *LogRecord_Field) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Field: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Field: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRecordedSpan(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthRecordedSpan } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RecordedSpan) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RecordedSpan: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RecordedSpan: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) } m.TraceID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.TraceID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType) } m.SpanID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.SpanID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanID", wireType) } m.ParentSpanID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ParentSpanID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Operation = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Baggage", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Baggage == nil { m.Baggage = make(map[string]string) } var mapkey string var mapvalue string for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) if fieldNum == 1 { var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLenmapkey := int(stringLenmapkey) if intStringLenmapkey < 0 { return ErrInvalidLengthRecordedSpan } postStringIndexmapkey := iNdEx + intStringLenmapkey if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { var stringLenmapvalue uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLenmapvalue |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLenmapvalue := int(stringLenmapvalue) if intStringLenmapvalue < 0 { return ErrInvalidLengthRecordedSpan } postStringIndexmapvalue := iNdEx + intStringLenmapvalue if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) iNdEx = postStringIndexmapvalue } else { iNdEx = entryPreIndex skippy, err := skipRecordedSpan(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthRecordedSpan } if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF } iNdEx += skippy } } m.Baggage[mapkey] = mapvalue iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Tags == nil { m.Tags = make(map[string]string) } var mapkey string var mapvalue string for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) if fieldNum == 1 { var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLenmapkey := int(stringLenmapkey) if intStringLenmapkey < 0 { return ErrInvalidLengthRecordedSpan } postStringIndexmapkey := iNdEx + intStringLenmapkey if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { var stringLenmapvalue uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLenmapvalue |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLenmapvalue := int(stringLenmapvalue) if intStringLenmapvalue < 0 { return ErrInvalidLengthRecordedSpan } postStringIndexmapvalue := iNdEx + intStringLenmapvalue if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) iNdEx = postStringIndexmapvalue } else { iNdEx = entryPreIndex skippy, err := skipRecordedSpan(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthRecordedSpan } if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF } iNdEx += skippy } } m.Tags[mapkey] = mapvalue iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartTime, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Duration, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Logs = append(m.Logs, LogRecord{}) if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Stats == nil { m.Stats = &types.Any{} } if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRecordedSpan(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthRecordedSpan } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *NormalizedSpan) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: NormalizedSpan: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: NormalizedSpan: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Operation = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Tags == nil { m.Tags = make(map[string]string) } var mapkey string var mapvalue string for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) if fieldNum == 1 { var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLenmapkey := int(stringLenmapkey) if intStringLenmapkey < 0 { return ErrInvalidLengthRecordedSpan } postStringIndexmapkey := iNdEx + intStringLenmapkey if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { var stringLenmapvalue uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLenmapvalue |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLenmapvalue := int(stringLenmapvalue) if intStringLenmapvalue < 0 { return ErrInvalidLengthRecordedSpan } postStringIndexmapvalue := iNdEx + intStringLenmapvalue if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) iNdEx = postStringIndexmapvalue } else { iNdEx = entryPreIndex skippy, err := skipRecordedSpan(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthRecordedSpan } if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF } iNdEx += skippy } } m.Tags[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartTime, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Duration, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Logs = append(m.Logs, LogRecord{}) if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Children", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRecordedSpan } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthRecordedSpan } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Children = append(m.Children, NormalizedSpan{}) if err := m.Children[len(m.Children)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRecordedSpan(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthRecordedSpan } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipRecordedSpan(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowRecordedSpan } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowRecordedSpan } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowRecordedSpan } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } iNdEx += length if length < 0 { return 0, ErrInvalidLengthRecordedSpan } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowRecordedSpan } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipRecordedSpan(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthRecordedSpan = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowRecordedSpan = fmt.Errorf("proto: integer overflow") ) func init() { proto.RegisterFile("util/tracing/tracingpb/recorded_span.proto", fileDescriptor_recorded_span_ac7a781f9ecf6725) } var fileDescriptor_recorded_span_ac7a781f9ecf6725 = []byte{ // 626 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xcd, 0x6a, 0xdb, 0x4c, 0x14, 0xb5, 0x6c, 0xf9, 0x47, 0x37, 0x26, 0x84, 0x21, 0x0b, 0xc5, 0x7c, 0x48, 0x21, 0x1f, 0x94, 0x90, 0x80, 0xdc, 0xa6, 0xd0, 0x06, 0x77, 0x51, 0xea, 0x26, 0x05, 0x43, 0x48, 0x8b, 0x9a, 0x6e, 0xba, 0x31, 0x63, 0x69, 0x32, 0x11, 0x51, 0x34, 0x42, 0x1a, 0x17, 0xdc, 0xa7, 0xc8, 0x32, 0xcb, 0xbe, 0x4d, 0x43, 0x57, 0x59, 0x66, 0xe5, 0xb6, 0xca, 0x8b, 0x94, 0x19, 0x8d, 0x94, 0xbf, 0x85, 0xeb, 0x7a, 0x25, 0xcd, 0xbd, 0xe7, 0x1c, 0xdd, 0x39, 0xf7, 0x08, 0xb6, 0xc6, 0x3c, 0x08, 0xbb, 0x3c, 0xc1, 0x5e, 0x10, 0xd1, 0xe2, 0x19, 0x8f, 0xba, 0x09, 0xf1, 0x58, 0xe2, 0x13, 0x7f, 0x98, 0xc6, 0x38, 0x72, 0xe2, 0x84, 0x71, 0x86, 0xd6, 0x3d, 0xe6, 0x9d, 0x26, 0x0c, 0x7b, 0x27, 0x8e, 0x60, 0x39, 0x0a, 0xed, 0x94, 0xac, 0xce, 0x2a, 0x65, 0x94, 0x49, 0x70, 0x57, 0xbc, 0xe5, 0xbc, 0xce, 0x1a, 0x65, 0x8c, 0x86, 0xa4, 0x2b, 0x4f, 0xa3, 0xf1, 0x71, 0x17, 0x47, 0x13, 0xd5, 0xb2, 0x1f, 0xb6, 0x78, 0x70, 0x46, 0x52, 0x8e, 0xcf, 0x62, 0x05, 0xb0, 0x1e, 0x02, 0xfc, 0x71, 0x82, 0x79, 0xc0, 0xd4, 0x4c, 0x1b, 0xdf, 0x35, 0x30, 0x0e, 0x18, 0x75, 0xe5, 0xb8, 0x68, 0x17, 0x74, 0x21, 0x60, 0x6a, 0xeb, 0xda, 0xe6, 0xd2, 0x4e, 0xc7, 0xc9, 0xc9, 0x4e, 0x41, 0x76, 0x8e, 0x0a, 0xf5, 0x7e, 0xeb, 0x72, 0x6a, 0x57, 0xce, 0x7f, 0xda, 0x9a, 0x2b, 0x19, 0xe8, 0x3d, 0x34, 0x8e, 0x03, 0x12, 0xfa, 0xa9, 0x59, 0x5d, 0xaf, 0x6d, 0x2e, 0xed, 0x3c, 0x73, 0x66, 0x5d, 0xd6, 0x29, 0x3f, 0xeb, 0xbc, 0x13, 0xcc, 0xbe, 0x2e, 0x24, 0x5d, 0x25, 0xd3, 0xe9, 0x42, 0x5d, 0x96, 0xd1, 0x0a, 0xd4, 0x4e, 0xc9, 0x44, 0x8e, 0x64, 0xb8, 0xe2, 0x15, 0xad, 0x42, 0xfd, 0x0b, 0x0e, 0xc7, 0xc4, 0xac, 0xca, 0x5a, 0x7e, 0xd8, 0xf8, 0x51, 0x87, 0xb6, 0xab, 0x5c, 0xff, 0x18, 0xe3, 0x08, 0x3d, 0x81, 0x96, 0xf8, 0x18, 0x19, 0x06, 0xbe, 0x64, 0xeb, 0xfd, 0xa5, 0x6c, 0x6a, 0x37, 0x8f, 0x44, 0x6d, 0xb0, 0xe7, 0x36, 0x65, 0x73, 0xe0, 0xa3, 0xff, 0xa1, 0x29, 0x96, 0x24, 0x60, 0x55, 0x09, 0x83, 0x6c, 0x6a, 0x37, 0x84, 0xc4, 0x60, 0xcf, 0x6d, 0x88, 0xd6, 0xc0, 0x47, 0x2f, 0x60, 0x39, 0xc6, 0x09, 0x89, 0xf8, 0xb0, 0xc0, 0xd6, 0x24, 0x76, 0x25, 0x9b, 0xda, 0xed, 0x0f, 0xb2, 0xa3, 0x18, 0xed, 0xf8, 0xf6, 0xe4, 0xa3, 0xff, 0xc0, 0x60, 0x31, 0xc9, 0x2d, 0x37, 0x75, 0x39, 0xef, 0x6d, 0x01, 0x7d, 0x82, 0xe6, 0x08, 0x53, 0x8a, 0x29, 0x31, 0xeb, 0xd2, 0xb6, 0x57, 0xb3, 0x6d, 0xbb, 0x7b, 0x47, 0xa7, 0x9f, 0xb3, 0xf7, 0x23, 0x9e, 0x4c, 0xdc, 0x42, 0x0b, 0x1d, 0x80, 0xce, 0x31, 0x4d, 0xcd, 0x86, 0xd4, 0xdc, 0x9d, 0x53, 0xf3, 0x08, 0xd3, 0x34, 0x17, 0x94, 0x2a, 0xe8, 0x2d, 0x40, 0xca, 0x71, 0xc2, 0x87, 0x32, 0x1a, 0xcd, 0x39, 0xa2, 0x61, 0x48, 0x9e, 0xe8, 0xa0, 0xd7, 0xd0, 0x2a, 0x92, 0x67, 0xb6, 0xa4, 0xc4, 0xda, 0x23, 0x89, 0x3d, 0x05, 0xc8, 0x15, 0x2e, 0x84, 0x42, 0x49, 0x42, 0xfb, 0xa0, 0x87, 0x8c, 0xa6, 0xa6, 0x21, 0xef, 0xb4, 0x3d, 0x47, 0xbc, 0x54, 0xb0, 0x24, 0x1d, 0x6d, 0x41, 0x3d, 0xe5, 0x98, 0xa7, 0x26, 0xc8, 0x21, 0x56, 0x1f, 0x0d, 0xf1, 0x26, 0x9a, 0xb8, 0x39, 0xa4, 0xd3, 0x83, 0xf6, 0x5d, 0x7f, 0xff, 0x36, 0x89, 0xbd, 0xea, 0xae, 0xd6, 0x79, 0x09, 0x46, 0xe9, 0xe3, 0x3c, 0xc4, 0x9e, 0x7e, 0xf1, 0xcd, 0xae, 0x6c, 0x5c, 0xd7, 0x60, 0xf9, 0x90, 0x25, 0x67, 0x38, 0x0c, 0xbe, 0xaa, 0x38, 0xdf, 0x4b, 0x92, 0xf6, 0x30, 0x49, 0x87, 0x6a, 0xe5, 0xf9, 0xdf, 0xd7, 0x9b, 0x6d, 0xcf, 0x7d, 0xf5, 0x19, 0x4b, 0xaf, 0x2d, 0xbe, 0x74, 0x7d, 0x91, 0xa5, 0xd7, 0x17, 0x5b, 0xba, 0x0b, 0x2d, 0xef, 0x24, 0x08, 0xfd, 0x84, 0x44, 0xea, 0x9f, 0x78, 0x3a, 0xaf, 0x41, 0x4a, 0xaf, 0xd4, 0xf9, 0xe7, 0x05, 0xf7, 0xb7, 0x2f, 0x7f, 0x5b, 0x95, 0xcb, 0xcc, 0xd2, 0xae, 0x32, 0x4b, 0xbb, 0xce, 0x2c, 0xed, 0x57, 0x66, 0x69, 0xe7, 0x37, 0x56, 0xe5, 0xea, 0xc6, 0xaa, 0x5c, 0xdf, 0x58, 0x95, 0xcf, 0x46, 0x39, 0xc4, 0xa8, 0x21, 0x7d, 0x7a, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x96, 0x4b, 0x70, 0x33, 0x67, 0x06, 0x00, 0x00, }
pkg/util/tracing/tracingpb/recorded_span.pb.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.0015482412418350577, 0.00019114853057544678, 0.00016028240497689694, 0.00017305833171121776, 0.0001401867193635553 ]
{ "id": 3, "code_window": [ "\n", "\tdefault:\n", "\t\t// Fall back to writing output columns in column id order.\n", "\t\tcolList = opt.ColSetToList(e.Relational().OutputCols)\n", "\t}\n", "\n", "\tf.formatColumns(e, tp, colList, required.Presentation)\n", "\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tcolList = e.Relational().OutputCols.ToList()\n" ], "file_path": "pkg/sql/opt/memo/expr_format.go", "type": "replace", "edit_start_line_idx": 284 }
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package sql import ( "context" "strings" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/roleoption" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" ) type createDatabaseNode struct { n *tree.CreateDatabase } // CreateDatabase creates a database. // Privileges: superuser or CREATEDB func (p *planner) CreateDatabase(ctx context.Context, n *tree.CreateDatabase) (planNode, error) { if err := checkSchemaChangeEnabled( ctx, p.ExecCfg(), "CREATE DATABASE", ); err != nil { return nil, err } if n.Name == "" { return nil, errEmptyDatabaseName } if tmpl := n.Template; tmpl != "" { // See https://www.postgresql.org/docs/current/static/manage-ag-templatedbs.html if !strings.EqualFold(tmpl, "template0") { return nil, unimplemented.NewWithIssuef(10151, "unsupported template: %s", tmpl) } } if enc := n.Encoding; enc != "" { // We only support UTF8 (and aliases for UTF8). if !(strings.EqualFold(enc, "UTF8") || strings.EqualFold(enc, "UTF-8") || strings.EqualFold(enc, "UNICODE")) { return nil, unimplemented.NewWithIssueDetailf(35882, "create.db.encoding", "unsupported encoding: %s", enc) } } if col := n.Collate; col != "" { // We only support C and C.UTF-8. if col != "C" && col != "C.UTF-8" { return nil, unimplemented.NewWithIssueDetailf(16618, "create.db.collation", "unsupported collation: %s", col) } } if ctype := n.CType; ctype != "" { // We only support C and C.UTF-8. if ctype != "C" && ctype != "C.UTF-8" { return nil, unimplemented.NewWithIssueDetailf(35882, "create.db.classification", "unsupported character classification: %s", ctype) } } if n.ConnectionLimit != -1 { return nil, unimplemented.NewWithIssueDetailf( 54241, "create.db.connection_limit", "only connection limit -1 is supported, got: %d", n.ConnectionLimit, ) } hasCreateDB, err := p.HasRoleOption(ctx, roleoption.CREATEDB) if err != nil { return nil, err } if !hasCreateDB { return nil, pgerror.New(pgcode.InsufficientPrivilege, "permission denied to create database") } return &createDatabaseNode{n: n}, nil } func (n *createDatabaseNode) startExec(params runParams) error { telemetry.Inc(sqltelemetry.SchemaChangeCreateCounter("database")) desc, created, err := params.p.createDatabase( params.ctx, n.n, tree.AsStringWithFQNames(n.n, params.Ann())) if err != nil { return err } if created { // Log Create Database event. This is an auditable log event and is // recorded in the same transaction as the table descriptor update. if err := params.p.logEvent(params.ctx, desc.GetID(), &eventpb.CreateDatabase{ DatabaseName: n.n.Name.String(), }); err != nil { return err } } return nil } func (*createDatabaseNode) Next(runParams) (bool, error) { return false, nil } func (*createDatabaseNode) Values() tree.Datums { return tree.Datums{} } func (*createDatabaseNode) Close(context.Context) {} // ReadingOwnWrites implements the planNodeReadingOwnWrites Interface. This is // required because we create a type descriptor for multi-region databases, // which must be read during validation. We also call CONFIGURE ZONE which // perms multiple KV operations on descriptors and expects to see its own writes. func (*createDatabaseNode) ReadingOwnWrites() {}
pkg/sql/create_database.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.00019404137856326997, 0.0001722177112242207, 0.00016101048095151782, 0.00017253568512387574, 0.000008223766599257942 ]
{ "id": 4, "code_window": [ "\tcase *GroupByExpr, *ScalarGroupByExpr, *DistinctOnExpr, *EnsureDistinctOnExpr,\n", "\t\t*UpsertDistinctOnExpr, *EnsureUpsertDistinctOnExpr:\n", "\t\tprivate := e.Private().(*GroupingPrivate)\n", "\t\tif !f.HasFlags(ExprFmtHideColumns) && !private.GroupingCols.Empty() {\n", "\t\t\tf.formatColList(e, tp, \"grouping columns:\", opt.ColSetToList(private.GroupingCols))\n", "\t\t}\n", "\t\tif !f.HasFlags(ExprFmtHidePhysProps) && !private.Ordering.Any() {\n", "\t\t\ttp.Childf(\"internal-ordering: %s\", private.Ordering)\n", "\t\t}\n", "\t\tif !f.HasFlags(ExprFmtHideMiscProps) && private.ErrorOnDup != \"\" {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tf.formatColList(e, tp, \"grouping columns:\", private.GroupingCols.ToList())\n" ], "file_path": "pkg/sql/opt/memo/expr_format.go", "type": "replace", "edit_start_line_idx": 296 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package memo import ( "bytes" "context" "fmt" "sort" "strings" "unicode" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/treeprinter" "github.com/cockroachdb/errors" ) // ScalarFmtInterceptor is a callback that can be set to a custom formatting // function. If the function returns a non-empty string, the normal formatting // code is bypassed. var ScalarFmtInterceptor func(f *ExprFmtCtx, expr opt.ScalarExpr) string // ExprFmtFlags controls which properties of the expression are shown in // formatted output. type ExprFmtFlags int const ( // ExprFmtShowAll shows all properties of the expression. ExprFmtShowAll ExprFmtFlags = 0 // ExprFmtHideMiscProps does not show outer columns, row cardinality, provided // orderings, side effects, or error text in the output. ExprFmtHideMiscProps ExprFmtFlags = 1 << (iota - 1) // ExprFmtHideConstraints does not show inferred constraints in the output. ExprFmtHideConstraints // ExprFmtHideFuncDeps does not show functional dependencies in the output. ExprFmtHideFuncDeps // ExprFmtHideRuleProps does not show rule-specific properties in the output. ExprFmtHideRuleProps // ExprFmtHideStats does not show statistics in the output. ExprFmtHideStats // ExprFmtHideCost does not show expression cost in the output. ExprFmtHideCost // ExprFmtHideQualifications removes the qualification from column labels // (except when a shortened name would be ambiguous). ExprFmtHideQualifications // ExprFmtHideScalars removes subtrees that contain only scalars and replaces // them with the SQL expression (if possible). ExprFmtHideScalars // ExprFmtHidePhysProps hides all required physical properties, except for // Presentation (see ExprFmtHideColumns). ExprFmtHidePhysProps // ExprFmtHideTypes hides type information from columns and scalar // expressions. ExprFmtHideTypes // ExprFmtHideNotNull hides the !null specifier from columns. ExprFmtHideNotNull // ExprFmtHideColumns removes column information. ExprFmtHideColumns // ExprFmtHideAll shows only the basic structure of the expression. // Note: this flag should be used judiciously, as its meaning changes whenever // we add more flags. ExprFmtHideAll ExprFmtFlags = (1 << iota) - 1 ) // HasFlags tests whether the given flags are all set. func (f ExprFmtFlags) HasFlags(subset ExprFmtFlags) bool { return f&subset == subset } // FormatExpr returns a string representation of the given expression, formatted // according to the specified flags. func FormatExpr(e opt.Expr, flags ExprFmtFlags, mem *Memo, catalog cat.Catalog) string { if catalog == nil { // Automatically hide qualifications if we have no catalog. flags |= ExprFmtHideQualifications } f := MakeExprFmtCtx(flags, mem, catalog) f.FormatExpr(e) return f.Buffer.String() } // ExprFmtCtx is passed as context to expression formatting functions, which // need to know the formatting flags and memo in order to format. In addition, // a reusable bytes buffer avoids unnecessary allocations. type ExprFmtCtx struct { Buffer *bytes.Buffer // Flags controls how the expression is formatted. Flags ExprFmtFlags // Memo must contain any expression that is formatted. Memo *Memo // Catalog must be set unless the ExprFmtHideQualifications flag is set. Catalog cat.Catalog // nameGen is used to generate a unique name for each relational // subexpression when Memo.saveTablesPrefix is non-empty. These names // correspond to the tables that would be saved if the query were run // with the session variable `save_tables_prefix` set to the same value. nameGen *ExprNameGenerator } // MakeExprFmtCtx creates an expression formatting context from a new buffer. func MakeExprFmtCtx(flags ExprFmtFlags, mem *Memo, catalog cat.Catalog) ExprFmtCtx { return MakeExprFmtCtxBuffer(&bytes.Buffer{}, flags, mem, catalog) } // MakeExprFmtCtxBuffer creates an expression formatting context from an // existing buffer. func MakeExprFmtCtxBuffer( buf *bytes.Buffer, flags ExprFmtFlags, mem *Memo, catalog cat.Catalog, ) ExprFmtCtx { var nameGen *ExprNameGenerator if mem != nil && mem.saveTablesPrefix != "" { nameGen = NewExprNameGenerator(mem.saveTablesPrefix) } return ExprFmtCtx{Buffer: buf, Flags: flags, Memo: mem, Catalog: catalog, nameGen: nameGen} } // HasFlags tests whether the given flags are all set. func (f *ExprFmtCtx) HasFlags(subset ExprFmtFlags) bool { return f.Flags.HasFlags(subset) } // FormatExpr constructs a treeprinter view of the given expression for testing // and debugging, according to the flags in this context. func (f *ExprFmtCtx) FormatExpr(e opt.Expr) { tp := treeprinter.New() f.formatExpr(e, tp) f.Buffer.Reset() f.Buffer.WriteString(tp.String()) } func (f *ExprFmtCtx) space() { f.Buffer.WriteByte(' ') } func (f *ExprFmtCtx) formatExpr(e opt.Expr, tp treeprinter.Node) { scalar, ok := e.(opt.ScalarExpr) if ok { f.formatScalar(scalar, tp) } else { f.formatRelational(e.(RelExpr), tp) } } func (f *ExprFmtCtx) formatRelational(e RelExpr, tp treeprinter.Node) { md := f.Memo.Metadata() relational := e.Relational() required := e.RequiredPhysical() if required == nil { // required can be nil before optimization has taken place. required = physical.MinRequired } // Special cases for merge-join and lookup-join: we want the type of the join // to show up first. f.Buffer.Reset() switch t := e.(type) { case *MergeJoinExpr: fmt.Fprintf(f.Buffer, "%v (merge)", t.JoinType) case *LookupJoinExpr: fmt.Fprintf(f.Buffer, "%v (lookup", t.JoinType) FormatPrivate(f, e.Private(), required) f.Buffer.WriteByte(')') case *InvertedJoinExpr: fmt.Fprintf(f.Buffer, "%v (inverted", t.JoinType) FormatPrivate(f, e.Private(), required) f.Buffer.WriteByte(')') case *ZigzagJoinExpr: fmt.Fprintf(f.Buffer, "%v (zigzag", opt.InnerJoinOp) FormatPrivate(f, e.Private(), required) f.Buffer.WriteByte(')') case *ScanExpr, *IndexJoinExpr, *ShowTraceForSessionExpr, *InsertExpr, *UpdateExpr, *UpsertExpr, *DeleteExpr, *SequenceSelectExpr, *WindowExpr, *OpaqueRelExpr, *OpaqueMutationExpr, *OpaqueDDLExpr, *AlterTableSplitExpr, *AlterTableUnsplitExpr, *AlterTableUnsplitAllExpr, *AlterTableRelocateExpr, *ControlJobsExpr, *CancelQueriesExpr, *CancelSessionsExpr, *CreateViewExpr, *ExportExpr: fmt.Fprintf(f.Buffer, "%v", e.Op()) FormatPrivate(f, e.Private(), required) case *SortExpr: if t.InputOrdering.Any() { fmt.Fprintf(f.Buffer, "%v", e.Op()) } else { fmt.Fprintf(f.Buffer, "%v (segmented)", e.Op()) } case *WithExpr: fmt.Fprintf(f.Buffer, "%v &%d", e.Op(), t.ID) if t.Name != "" { fmt.Fprintf(f.Buffer, " (%s)", t.Name) } case *WithScanExpr: fmt.Fprintf(f.Buffer, "%v &%d", e.Op(), t.With) if t.Name != "" { fmt.Fprintf(f.Buffer, " (%s)", t.Name) } default: fmt.Fprintf(f.Buffer, "%v", e.Op()) if opt.IsJoinNonApplyOp(t) { // All join ops that weren't handled above execute as a hash join. if leftEqCols, _ := ExtractJoinEqualityColumns( e.Child(0).(RelExpr).Relational().OutputCols, e.Child(1).(RelExpr).Relational().OutputCols, *e.Child(2).(*FiltersExpr), ); len(leftEqCols) == 0 { // The case where there are no equality columns is executed as a // degenerate case of hash join; let's be explicit about that. f.Buffer.WriteString(" (cross)") } else { f.Buffer.WriteString(" (hash)") } } } tp = tp.Child(f.Buffer.String()) if f.nameGen != nil { name := f.nameGen.GenerateName(e.Op()) tp.Childf("save-table-name: %s", name) } var colList opt.ColList // Special handling to improve the columns display for certain ops. switch t := e.(type) { case *ProjectExpr: // We want the synthesized column IDs to map 1-to-1 to the projections, // and the pass-through columns at the end. // Get the list of columns from the ProjectionsOp, which has the natural // order. for i := range t.Projections { colList = append(colList, t.Projections[i].Col) } // Add pass-through columns. t.Passthrough.ForEach(func(i opt.ColumnID) { colList = append(colList, i) }) case *ValuesExpr: colList = t.Cols case *UnionExpr, *IntersectExpr, *ExceptExpr, *UnionAllExpr, *IntersectAllExpr, *ExceptAllExpr: colList = e.Private().(*SetPrivate).OutCols default: // Fall back to writing output columns in column id order. colList = opt.ColSetToList(e.Relational().OutputCols) } f.formatColumns(e, tp, colList, required.Presentation) switch t := e.(type) { // Special-case handling for GroupBy private; print grouping columns // and internal ordering in addition to full set of columns. case *GroupByExpr, *ScalarGroupByExpr, *DistinctOnExpr, *EnsureDistinctOnExpr, *UpsertDistinctOnExpr, *EnsureUpsertDistinctOnExpr: private := e.Private().(*GroupingPrivate) if !f.HasFlags(ExprFmtHideColumns) && !private.GroupingCols.Empty() { f.formatColList(e, tp, "grouping columns:", opt.ColSetToList(private.GroupingCols)) } if !f.HasFlags(ExprFmtHidePhysProps) && !private.Ordering.Any() { tp.Childf("internal-ordering: %s", private.Ordering) } if !f.HasFlags(ExprFmtHideMiscProps) && private.ErrorOnDup != "" { tp.Childf("error: \"%s\"", private.ErrorOnDup) } case *LimitExpr: if !f.HasFlags(ExprFmtHidePhysProps) && !t.Ordering.Any() { tp.Childf("internal-ordering: %s", t.Ordering) } case *OffsetExpr: if !f.HasFlags(ExprFmtHidePhysProps) && !t.Ordering.Any() { tp.Childf("internal-ordering: %s", t.Ordering) } case *Max1RowExpr: if !f.HasFlags(ExprFmtHideMiscProps) { tp.Childf("error: \"%s\"", t.ErrorText) } // Special-case handling for set operators to show the left and right // input columns that correspond to the output columns. case *UnionExpr, *IntersectExpr, *ExceptExpr, *UnionAllExpr, *IntersectAllExpr, *ExceptAllExpr: if !f.HasFlags(ExprFmtHideColumns) { private := e.Private().(*SetPrivate) f.formatColList(e, tp, "left columns:", private.LeftCols) f.formatColList(e, tp, "right columns:", private.RightCols) } case *ScanExpr: if t.IsCanonical() { // For the canonical scan, show the expressions attached to the TableMeta. tab := md.TableMeta(t.Table) if tab.Constraints != nil { c := tp.Childf("check constraint expressions") for i := 0; i < tab.Constraints.ChildCount(); i++ { f.formatExpr(tab.Constraints.Child(i), c) } } if len(tab.ComputedCols) > 0 { c := tp.Childf("computed column expressions") cols := make(opt.ColList, 0, len(tab.ComputedCols)) for col := range tab.ComputedCols { cols = append(cols, col) } sort.Slice(cols, func(i, j int) bool { return cols[i] < cols[j] }) for _, col := range cols { f.Buffer.Reset() f.formatExpr(tab.ComputedCols[col], c.Child(f.ColumnString(col))) } } if tab.PartialIndexPredicates != nil { c := tp.Child("partial index predicates") indexOrds := make([]cat.IndexOrdinal, 0, len(tab.PartialIndexPredicates)) for ord := range tab.PartialIndexPredicates { indexOrds = append(indexOrds, ord) } sort.Ints(indexOrds) for _, ord := range indexOrds { name := string(tab.Table.Index(ord).Name()) f.Buffer.Reset() f.formatScalarWithLabel(name, tab.PartialIndexPredicates[ord], c) } } } if c := t.Constraint; c != nil { if c.IsContradiction() { tp.Childf("constraint: contradiction") } else if c.Spans.Count() == 1 { tp.Childf("constraint: %s: %s", c.Columns.String(), c.Spans.Get(0).String()) } else { n := tp.Childf("constraint: %s", c.Columns.String()) for i := 0; i < c.Spans.Count(); i++ { n.Child(c.Spans.Get(i).String()) } } } if ic := t.InvertedConstraint; ic != nil { idx := md.Table(t.Table).Index(t.Index) var b strings.Builder for i := idx.NonInvertedPrefixColumnCount(); i < idx.KeyColumnCount(); i++ { b.WriteRune('/') b.WriteString(fmt.Sprintf("%d", t.Table.ColumnID(idx.Column(i).Ordinal()))) } n := tp.Childf("inverted constraint: %s", b.String()) ic.Format(n, "spans") } if t.HardLimit.IsSet() { tp.Childf("limit: %s", t.HardLimit) } if !t.Flags.Empty() { if t.Flags.NoIndexJoin { tp.Childf("flags: no-index-join") } else if t.Flags.ForceIndex { idx := md.Table(t.Table).Index(t.Flags.Index) dir := "" switch t.Flags.Direction { case tree.DefaultDirection: case tree.Ascending: dir = ",fwd" case tree.Descending: dir = ",rev" } tp.Childf("flags: force-index=%s%s", idx.Name(), dir) } } if t.Locking != nil { strength := "" switch t.Locking.Strength { case tree.ForNone: case tree.ForKeyShare: strength = "for-key-share" case tree.ForShare: strength = "for-share" case tree.ForNoKeyUpdate: strength = "for-no-key-update" case tree.ForUpdate: strength = "for-update" default: panic(errors.AssertionFailedf("unexpected strength")) } wait := "" switch t.Locking.WaitPolicy { case tree.LockWaitBlock: case tree.LockWaitSkip: wait = ",skip-locked" case tree.LockWaitError: wait = ",nowait" default: panic(errors.AssertionFailedf("unexpected wait policy")) } tp.Childf("locking: %s%s", strength, wait) } case *InvertedFilterExpr: var b strings.Builder b.WriteRune('/') b.WriteString(fmt.Sprintf("%d", t.InvertedColumn)) n := tp.Childf("inverted expression: %s", b.String()) t.InvertedExpression.Format(n, false /* includeSpansToRead */) if t.PreFiltererState != nil { n := tp.Childf("pre-filterer expression") f.formatExpr(t.PreFiltererState.Expr, n) } case *LookupJoinExpr: if !t.Flags.Empty() { tp.Childf("flags: %s", t.Flags.String()) } if !f.HasFlags(ExprFmtHideColumns) { idxCols := make(opt.ColList, len(t.KeyCols)) idx := md.Table(t.Table).Index(t.Index) for i := range idxCols { idxCols[i] = t.Table.ColumnID(idx.Column(i).Ordinal()) } tp.Childf("key columns: %v = %v", t.KeyCols, idxCols) } if t.LookupColsAreTableKey { tp.Childf("lookup columns are key") } case *InvertedJoinExpr: if !t.Flags.Empty() { tp.Childf("flags: %s", t.Flags.String()) } if !f.HasFlags(ExprFmtHideColumns) && len(t.PrefixKeyCols) > 0 { idxCols := make(opt.ColList, len(t.PrefixKeyCols)) idx := md.Table(t.Table).Index(t.Index) for i := range idxCols { idxCols[i] = t.Table.ColumnID(idx.Column(i).Ordinal()) } tp.Childf("prefix key columns: %v = %v", t.PrefixKeyCols, idxCols) } n := tp.Child("inverted-expr") f.formatExpr(t.InvertedExpr, n) case *ZigzagJoinExpr: if !f.HasFlags(ExprFmtHideColumns) { tp.Childf("eq columns: %v = %v", t.LeftEqCols, t.RightEqCols) leftVals := make([]tree.Datum, len(t.LeftFixedCols)) rightVals := make([]tree.Datum, len(t.RightFixedCols)) // FixedVals is always going to be a ScalarListExpr, containing tuples, // containing one ScalarListExpr, containing ConstExprs. for i := range t.LeftFixedCols { leftVals[i] = ExtractConstDatum(t.FixedVals[0].Child(0).Child(i)) } for i := range t.RightFixedCols { rightVals[i] = ExtractConstDatum(t.FixedVals[1].Child(0).Child(i)) } tp.Childf("left fixed columns: %v = %v", t.LeftFixedCols, leftVals) tp.Childf("right fixed columns: %v = %v", t.RightFixedCols, rightVals) } case *MergeJoinExpr: if !t.Flags.Empty() { tp.Childf("flags: %s", t.Flags.String()) } if !f.HasFlags(ExprFmtHidePhysProps) { tp.Childf("left ordering: %s", t.LeftEq) tp.Childf("right ordering: %s", t.RightEq) } case *InsertExpr: if !f.HasFlags(ExprFmtHideColumns) { if len(colList) == 0 { tp.Child("columns: <none>") } f.formatArbiters(tp, t.Arbiters, t.Table) f.formatMutationCols(e, tp, "insert-mapping:", t.InsertCols, t.Table) f.formatColList(e, tp, "check columns:", t.CheckCols) f.formatColList(e, tp, "partial index put columns:", t.PartialIndexPutCols) f.formatMutationCommon(tp, &t.MutationPrivate) } case *UpdateExpr: if !f.HasFlags(ExprFmtHideColumns) { if len(colList) == 0 { tp.Child("columns: <none>") } f.formatColList(e, tp, "fetch columns:", t.FetchCols) f.formatMutationCols(e, tp, "update-mapping:", t.UpdateCols, t.Table) f.formatColList(e, tp, "check columns:", t.CheckCols) f.formatColList(e, tp, "partial index put columns:", t.PartialIndexPutCols) f.formatColList(e, tp, "partial index del columns:", t.PartialIndexDelCols) f.formatMutationCommon(tp, &t.MutationPrivate) } case *UpsertExpr: if !f.HasFlags(ExprFmtHideColumns) { if len(colList) == 0 { tp.Child("columns: <none>") } if t.CanaryCol != 0 { f.formatArbiters(tp, t.Arbiters, t.Table) f.formatColList(e, tp, "canary column:", opt.ColList{t.CanaryCol}) f.formatColList(e, tp, "fetch columns:", t.FetchCols) f.formatMutationCols(e, tp, "insert-mapping:", t.InsertCols, t.Table) f.formatMutationCols(e, tp, "update-mapping:", t.UpdateCols, t.Table) f.formatMutationCols(e, tp, "return-mapping:", t.ReturnCols, t.Table) } else { f.formatMutationCols(e, tp, "upsert-mapping:", t.InsertCols, t.Table) } f.formatColList(e, tp, "check columns:", t.CheckCols) f.formatColList(e, tp, "partial index put columns:", t.PartialIndexPutCols) f.formatColList(e, tp, "partial index del columns:", t.PartialIndexDelCols) f.formatMutationCommon(tp, &t.MutationPrivate) } case *DeleteExpr: if !f.HasFlags(ExprFmtHideColumns) { if len(colList) == 0 { tp.Child("columns: <none>") } f.formatColList(e, tp, "fetch columns:", t.FetchCols) f.formatColList(e, tp, "partial index del columns:", t.PartialIndexDelCols) f.formatMutationCommon(tp, &t.MutationPrivate) } case *WithExpr: if t.Mtr.Set { if t.Mtr.Materialize { tp.Child("materialized") } else { tp.Child("not-materialized") } } case *WithScanExpr: if !f.HasFlags(ExprFmtHideColumns) { child := tp.Child("mapping:") for i := range t.InCols { f.Buffer.Reset() f.space() f.formatCol("" /* label */, t.InCols[i], opt.ColSet{} /* notNullCols */) f.Buffer.WriteString(" => ") f.formatCol("" /* label */, t.OutCols[i], opt.ColSet{} /* notNullCols */) child.Child(f.Buffer.String()) } } case *CreateTableExpr: tp.Child(t.Syntax.String()) case *CreateViewExpr: tp.Child(t.ViewQuery) f.Buffer.Reset() f.Buffer.WriteString("columns:") for _, col := range t.Columns { f.space() f.formatCol(col.Alias, col.ID, opt.ColSet{} /* notNullCols */) } tp.Child(f.Buffer.String()) n := tp.Child("dependencies") for _, dep := range t.Deps { f.Buffer.Reset() name := dep.DataSource.Name() f.Buffer.WriteString(name.String()) if dep.SpecificIndex { fmt.Fprintf(f.Buffer, "@%s", dep.DataSource.(cat.Table).Index(dep.Index).Name()) } colNames, isTable := dep.GetColumnNames() if len(colNames) > 0 { fmt.Fprintf(f.Buffer, " [columns:") for _, colName := range colNames { fmt.Fprintf(f.Buffer, " %s", colName) } fmt.Fprintf(f.Buffer, "]") } else if isTable { fmt.Fprintf(f.Buffer, " [no columns]") } n.Child(f.Buffer.String()) } case *CreateStatisticsExpr: tp.Child(t.Syntax.String()) case *ExportExpr: tp.Childf("format: %s", t.FileFormat) case *ExplainExpr: // ExplainPlan is the default, don't show it. m := "" if t.Options.Mode != tree.ExplainPlan { m = strings.ToLower(t.Options.Mode.String()) } if t.Options.Flags[tree.ExplainFlagVerbose] { if m != "" { m += ", " } m += "verbose" } if m != "" { tp.Childf("mode: %s", m) } case *RecursiveCTEExpr: if !f.HasFlags(ExprFmtHideColumns) { tp.Childf("working table binding: &%d", t.WithID) f.formatColList(e, tp, "initial columns:", t.InitialCols) f.formatColList(e, tp, "recursive columns:", t.RecursiveCols) } default: if opt.IsJoinOp(t) { p := t.Private().(*JoinPrivate) if !p.Flags.Empty() { tp.Childf("flags: %s", p.Flags.String()) } } } if !f.HasFlags(ExprFmtHideMiscProps) { if !relational.OuterCols.Empty() { tp.Childf("outer: %s", relational.OuterCols.String()) } if relational.Cardinality != props.AnyCardinality { // Suppress cardinality for Scan ops if it's redundant with Limit field. if scan, ok := e.(*ScanExpr); !ok || !scan.HardLimit.IsSet() { tp.Childf("cardinality: %s", relational.Cardinality) } } if join, ok := e.(joinWithMultiplicity); ok { mult := join.getMultiplicity() if s := mult.Format(e.Op()); s != "" { tp.Childf("multiplicity: %s", s) } } f.Buffer.Reset() writeFlag := func(name string) { if f.Buffer.Len() != 0 { f.Buffer.WriteString(", ") } f.Buffer.WriteString(name) } if !relational.VolatilitySet.IsLeakProof() { writeFlag(relational.VolatilitySet.String()) } if relational.CanMutate { writeFlag("mutations") } if relational.HasPlaceholder { writeFlag("has-placeholder") } if f.Buffer.Len() != 0 { tp.Child(f.Buffer.String()) } } if !f.HasFlags(ExprFmtHideStats) { tp.Childf("stats: %s", &relational.Stats) } if !f.HasFlags(ExprFmtHideCost) { cost := e.Cost() if cost != 0 { tp.Childf("cost: %.9g", cost) } } // Format functional dependencies. if !f.HasFlags(ExprFmtHideFuncDeps) { // Show the key separately from the rest of the FDs. if key, ok := relational.FuncDeps.StrictKey(); ok { tp.Childf("key: %s", key) } else if key, ok := relational.FuncDeps.LaxKey(); ok { tp.Childf("lax-key: %s", key) } if fdStr := relational.FuncDeps.StringOnlyFDs(); fdStr != "" { tp.Childf("fd: %s", fdStr) } } if !f.HasFlags(ExprFmtHidePhysProps) { if !required.Ordering.Any() { if f.HasFlags(ExprFmtHideMiscProps) { tp.Childf("ordering: %s", required.Ordering.String()) } else { // Show the provided ordering as well, unless it's exactly the same. provided := e.ProvidedPhysical().Ordering reqStr := required.Ordering.String() provStr := provided.String() if provStr == reqStr { tp.Childf("ordering: %s", required.Ordering.String()) } else { tp.Childf("ordering: %s [actual: %s]", required.Ordering.String(), provided.String()) } } } if required.LimitHint != 0 { tp.Childf("limit hint: %.2f", required.LimitHint) } } if !f.HasFlags(ExprFmtHideRuleProps) { r := &relational.Rule if !r.PruneCols.Empty() { tp.Childf("prune: %s", r.PruneCols.String()) } if !r.RejectNullCols.Empty() { tp.Childf("reject-nulls: %s", r.RejectNullCols.String()) } if len(r.InterestingOrderings) > 0 { tp.Childf("interesting orderings: %s", r.InterestingOrderings.String()) } if !r.UnfilteredCols.Empty() { tp.Childf("unfiltered-cols: %s", r.UnfilteredCols.String()) } if withUses := relational.Shared.Rule.WithUses; len(withUses) > 0 { n := tp.Childf("cte-uses") ids := make([]opt.WithID, 0, len(withUses)) for id := range withUses { ids = append(ids, id) } sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] }) for _, id := range ids { info := withUses[id] n.Childf("&%d: count=%d used-columns=%s", id, info.Count, info.UsedCols) } } } switch t := e.(type) { case *CreateTableExpr: // Do not print dummy input expression if there was no AS clause. if !t.Syntax.As() { return } } for i, n := 0, e.ChildCount(); i < n; i++ { f.formatExpr(e.Child(i), tp) } } func (f *ExprFmtCtx) formatScalar(scalar opt.ScalarExpr, tp treeprinter.Node) { f.formatScalarWithLabel("", scalar, tp) } func (f *ExprFmtCtx) formatScalarWithLabel( label string, scalar opt.ScalarExpr, tp treeprinter.Node, ) { f.Buffer.Reset() if label != "" { f.Buffer.WriteString(label) f.Buffer.WriteString(": ") } switch scalar.Op() { case opt.ProjectionsOp, opt.AggregationsOp, opt.UniqueChecksOp, opt.FKChecksOp, opt.KVOptionsOp: // Omit empty lists (except filters). if scalar.ChildCount() == 0 { return } case opt.FiltersOp: // Show empty Filters expression as "filters (true)". if scalar.ChildCount() == 0 { f.Buffer.WriteString("filters (true)") tp.Child(f.Buffer.String()) return } case opt.IfErrOp: fmt.Fprintf(f.Buffer, "%v", scalar.Op()) f.FormatScalarProps(scalar) tp = tp.Child(f.Buffer.String()) f.formatExpr(scalar.Child(0), tp) if scalar.Child(1).ChildCount() > 0 { f.formatExpr(scalar.Child(1), tp.Child("else")) } if scalar.Child(2).ChildCount() > 0 { f.formatExpr(scalar.Child(2), tp.Child("err-code")) } return case opt.AggFilterOp: fmt.Fprintf(f.Buffer, "%v", scalar.Op()) f.FormatScalarProps(scalar) tp = tp.Child(f.Buffer.String()) f.formatExpr(scalar.Child(0), tp) f.formatExpr(scalar.Child(1), tp.Child("filter")) return case opt.ScalarListOp: // Don't show scalar-list as a separate node, as it's redundant with its // parent. for i, n := 0, scalar.ChildCount(); i < n; i++ { f.formatExpr(scalar.Child(i), tp) } return } // Omit various list items from the output, but show some of their properties // along with the properties of their child. var scalarProps []string switch scalar.Op() { case opt.FiltersItemOp, opt.ProjectionsItemOp, opt.AggregationsItemOp, opt.ZipItemOp, opt.WindowsItemOp: emitProp := func(format string, args ...interface{}) { scalarProps = append(scalarProps, fmt.Sprintf(format, args...)) } switch item := scalar.(type) { case *ProjectionsItem: if !f.HasFlags(ExprFmtHideColumns) { emitProp("as=%s", f.ColumnString(item.Col)) } case *AggregationsItem: if !f.HasFlags(ExprFmtHideColumns) { emitProp("as=%s", f.ColumnString(item.Col)) } case *ZipItem: // TODO(radu): show the item.Cols case *WindowsItem: if !f.HasFlags(ExprFmtHideColumns) { emitProp("as=%s", f.ColumnString(item.Col)) } // Only show the frame if it differs from the default. def := WindowFrame{ Mode: tree.RANGE, StartBoundType: tree.UnboundedPreceding, EndBoundType: tree.CurrentRow, FrameExclusion: tree.NoExclusion, } if item.Frame != def { emitProp("frame=%q", item.Frame.String()) } } scalarProps = append(scalarProps, f.scalarPropsStrings(scalar)...) scalar = scalar.Child(0).(opt.ScalarExpr) default: scalarProps = f.scalarPropsStrings(scalar) } var intercepted bool if f.HasFlags(ExprFmtHideScalars) && ScalarFmtInterceptor != nil { if str := ScalarFmtInterceptor(f, scalar); str != "" { f.Buffer.WriteString(str) intercepted = true } } if !intercepted { fmt.Fprintf(f.Buffer, "%v", scalar.Op()) f.formatScalarPrivate(scalar) } if len(scalarProps) != 0 { f.Buffer.WriteString(" [") f.Buffer.WriteString(strings.Join(scalarProps, ", ")) f.Buffer.WriteByte(']') } tp = tp.Child(f.Buffer.String()) if !intercepted { for i, n := 0, scalar.ChildCount(); i < n; i++ { f.formatExpr(scalar.Child(i), tp) } } } // scalarPropsStrings returns a slice of strings, each describing a property; // for example: // {"type=bool", "outer=(1)", "constraints=(/1: [/1 - /1]; tight)"} func (f *ExprFmtCtx) scalarPropsStrings(scalar opt.ScalarExpr) []string { typ := scalar.DataType() if typ == nil { if scalar.Op() == opt.UniqueChecksItemOp || scalar.Op() == opt.FKChecksItemOp || scalar.Op() == opt.KVOptionsItemOp { // These are not true scalars and have no properties. return nil } // Don't panic if scalar properties don't yet exist when printing // expression. return []string{"type=undefined"} } var res []string emitProp := func(format string, args ...interface{}) { res = append(res, fmt.Sprintf(format, args...)) } if !f.HasFlags(ExprFmtHideTypes) && typ.Family() != types.AnyFamily { emitProp("type=%s", typ) } if propsExpr, ok := scalar.(ScalarPropsExpr); ok { scalarProps := propsExpr.ScalarProps() if !f.HasFlags(ExprFmtHideMiscProps) { if !scalarProps.OuterCols.Empty() { emitProp("outer=%s", scalarProps.OuterCols) } if !scalarProps.VolatilitySet.IsLeakProof() { emitProp(scalarProps.VolatilitySet.String()) } if scalarProps.HasCorrelatedSubquery { emitProp("correlated-subquery") } else if scalarProps.HasSubquery { emitProp("subquery") } } if !f.HasFlags(ExprFmtHideConstraints) { if scalarProps.Constraints != nil && !scalarProps.Constraints.IsUnconstrained() { var tight string if scalarProps.TightConstraints { tight = "; tight" } emitProp("constraints=(%s%s)", scalarProps.Constraints, tight) } } if !f.HasFlags(ExprFmtHideFuncDeps) && !scalarProps.FuncDeps.Empty() { emitProp("fd=%s", scalarProps.FuncDeps) } } return res } // FormatScalarProps writes out a string representation of the scalar // properties (with a preceding space); for example: // " [type=bool, outer=(1), constraints=(/1: [/1 - /1]; tight)]" func (f *ExprFmtCtx) FormatScalarProps(scalar opt.ScalarExpr) { props := f.scalarPropsStrings(scalar) if len(props) != 0 { f.Buffer.WriteString(" [") f.Buffer.WriteString(strings.Join(props, ", ")) f.Buffer.WriteByte(']') } } func (f *ExprFmtCtx) formatScalarPrivate(scalar opt.ScalarExpr) { var private interface{} switch t := scalar.(type) { case *NullExpr, *TupleExpr, *CollateExpr: // Private is redundant with logical type property. private = nil case *AnyExpr: // We don't want to show the OriginalExpr; just show Cmp. private = t.Cmp case *ArrayFlattenExpr: if t.Input.Relational().OutputCols.Len() != 1 { fmt.Fprintf(f.Buffer, " col=%v", t.RequestedCol) } case *SubqueryExpr, *ExistsExpr: // We don't want to show the OriginalExpr. private = nil case *CastExpr: private = t.Typ.SQLString() case *KVOptionsItem: fmt.Fprintf(f.Buffer, " %s", t.Key) case *UniqueChecksItem: tab := f.Memo.metadata.TableMeta(t.Table) constraint := tab.Table.Unique(t.CheckOrdinal) fmt.Fprintf(f.Buffer, ": %s(", tab.Alias.ObjectName) for i := 0; i < constraint.ColumnCount(); i++ { if i > 0 { f.Buffer.WriteByte(',') } col := tab.Table.Column(constraint.ColumnOrdinal(tab.Table, i)) f.Buffer.WriteString(string(col.ColName())) } f.Buffer.WriteByte(')') case *FKChecksItem: origin := f.Memo.metadata.TableMeta(t.OriginTable) referenced := f.Memo.metadata.TableMeta(t.ReferencedTable) var fk cat.ForeignKeyConstraint if t.FKOutbound { fk = origin.Table.OutboundForeignKey(t.FKOrdinal) } else { fk = referenced.Table.InboundForeignKey(t.FKOrdinal) } // Print the FK as: // child(a,b) -> parent(a,b) // // TODO(radu): maybe flip these if we are deleting from the parent (i.e. // FKOutbound=false)? fmt.Fprintf(f.Buffer, ": %s(", origin.Alias.ObjectName) for i := 0; i < fk.ColumnCount(); i++ { if i > 0 { f.Buffer.WriteByte(',') } col := origin.Table.Column(fk.OriginColumnOrdinal(origin.Table, i)) f.Buffer.WriteString(string(col.ColName())) } fmt.Fprintf(f.Buffer, ") -> %s(", referenced.Alias.ObjectName) for i := 0; i < fk.ColumnCount(); i++ { if i > 0 { f.Buffer.WriteByte(',') } col := referenced.Table.Column(fk.ReferencedColumnOrdinal(referenced.Table, i)) f.Buffer.WriteString(string(col.ColName())) } f.Buffer.WriteByte(')') default: private = scalar.Private() } if private != nil { f.Buffer.WriteRune(':') FormatPrivate(f, private, &physical.Required{}) } } // formatIndex outputs the specified index into the context's buffer with the // format: // // table_alias@index_name // // If reverse is true, ",rev" is appended. // // If the index is a partial index, ",partial" is appended. // // If the table is aliased, " [as=alias]" is appended. func (f *ExprFmtCtx) formatIndex(tabID opt.TableID, idxOrd cat.IndexOrdinal, reverse bool) { md := f.Memo.Metadata() tabMeta := md.TableMeta(tabID) index := tabMeta.Table.Index(idxOrd) if idxOrd == cat.PrimaryIndex { // Don't output the index name if it's the primary index. fmt.Fprintf(f.Buffer, " %s", tableName(f, tabID)) } else { fmt.Fprintf(f.Buffer, " %s@%s", tableName(f, tabID), index.Name()) } if reverse { f.Buffer.WriteString(",rev") } if _, isPartial := index.Predicate(); isPartial { f.Buffer.WriteString(",partial") } alias := md.TableMeta(tabID).Alias.Table() if alias != string(tabMeta.Table.Name()) { fmt.Fprintf(f.Buffer, " [as=%s]", alias) } } // formatArbiters constructs a new treeprinter child containing the // specified list of arbiter indexes. func (f *ExprFmtCtx) formatArbiters( tp treeprinter.Node, arbiters cat.IndexOrdinals, tabID opt.TableID, ) { md := f.Memo.Metadata() tab := md.Table(tabID) if len(arbiters) > 0 { f.Buffer.Reset() f.Buffer.WriteString("arbiter indexes:") for _, idx := range arbiters { name := string(tab.Index(idx).Name()) f.space() f.Buffer.WriteString(name) } tp.Child(f.Buffer.String()) } } func (f *ExprFmtCtx) formatColumns( nd RelExpr, tp treeprinter.Node, cols opt.ColList, presentation physical.Presentation, ) { if f.HasFlags(ExprFmtHideColumns) { return } if presentation.Any() { f.formatColList(nd, tp, "columns:", cols) return } // When a particular column presentation is required of the expression, then // print columns using that information. Include information about columns // that are hidden by the presentation separately. hidden := cols.ToSet() notNullCols := nd.Relational().NotNullCols f.Buffer.Reset() f.Buffer.WriteString("columns:") for _, col := range presentation { hidden.Remove(col.ID) f.space() f.formatCol(col.Alias, col.ID, notNullCols) } if !hidden.Empty() { f.Buffer.WriteString(" [hidden:") for _, col := range cols { if hidden.Contains(col) { f.space() f.formatCol("" /* label */, col, notNullCols) } } f.Buffer.WriteString("]") } tp.Child(f.Buffer.String()) } // formatColList constructs a new treeprinter child containing the specified // list of columns formatted using the formatCol method. func (f *ExprFmtCtx) formatColList( nd RelExpr, tp treeprinter.Node, heading string, colList opt.ColList, ) { if len(colList) > 0 { notNullCols := nd.Relational().NotNullCols f.Buffer.Reset() f.Buffer.WriteString(heading) for _, col := range colList { if col != 0 { f.space() f.formatCol("" /* label */, col, notNullCols) } } tp.Child(f.Buffer.String()) } } // formatMutationCols adds a new treeprinter child for each non-zero column in the // given list. Each child shows how the column will be mutated, with the id of // the "before" and "after" columns, similar to this: // // a:1 => x:4 // func (f *ExprFmtCtx) formatMutationCols( nd RelExpr, tp treeprinter.Node, heading string, colList opt.ColList, tabID opt.TableID, ) { if len(colList) == 0 { return } tpChild := tp.Child(heading) for i, col := range colList { if col != 0 { tpChild.Child(fmt.Sprintf("%s => %s", f.ColumnString(col), f.ColumnString(tabID.ColumnID(i)))) } } } // formatMutationCommon shows the MutationPrivate fields that format the same // for all types of mutations. func (f *ExprFmtCtx) formatMutationCommon(tp treeprinter.Node, p *MutationPrivate) { if p.WithID != 0 { tp.Childf("input binding: &%d", p.WithID) } if len(p.FKCascades) > 0 { c := tp.Childf("cascades") for i := range p.FKCascades { c.Child(p.FKCascades[i].FKName) } } } // ColumnString returns the column in the same format as formatColSimple. func (f *ExprFmtCtx) ColumnString(id opt.ColumnID) string { var buf bytes.Buffer f.formatColSimpleToBuffer(&buf, "" /* label */, id) return buf.String() } // formatColSimple outputs the specified column into the context's buffer using the // following format: // label:id // // The :id part is omitted if the formatting flags include ExprFmtHideColumns. // // If a label is given, then it is used. Otherwise, a "best effort" label is // used from query metadata. func (f *ExprFmtCtx) formatColSimple(label string, id opt.ColumnID) { f.formatColSimpleToBuffer(f.Buffer, label, id) } func (f *ExprFmtCtx) formatColSimpleToBuffer(buf *bytes.Buffer, label string, id opt.ColumnID) { if label == "" { if f.Memo != nil { md := f.Memo.metadata fullyQualify := !f.HasFlags(ExprFmtHideQualifications) label = md.QualifiedAlias(id, fullyQualify, f.Catalog) } else { label = fmt.Sprintf("unknown%d", id) } } if !isSimpleColumnName(label) { // Add quotations around the column name if it is not composed of simple // ASCII characters. label = "\"" + label + "\"" } buf.WriteString(label) if !f.HasFlags(ExprFmtHideColumns) { buf.WriteByte(':') fmt.Fprintf(buf, "%d", id) } } // formatCol outputs the specified column into the context's buffer using the // following format: // label:id(type) // // If the column is not nullable, then this is the format: // label:id(type!null) // // Some of the components can be omitted depending on formatting flags. // // If a label is given, then it is used. Otherwise, a "best effort" label is // used from query metadata. func (f *ExprFmtCtx) formatCol(label string, id opt.ColumnID, notNullCols opt.ColSet) { f.formatColSimple(label, id) parenOpen := false if !f.HasFlags(ExprFmtHideTypes) && f.Memo != nil { f.Buffer.WriteByte('(') parenOpen = true f.Buffer.WriteString(f.Memo.metadata.ColumnMeta(id).Type.String()) } if !f.HasFlags(ExprFmtHideNotNull) && notNullCols.Contains(id) { f.Buffer.WriteString("!null") } if parenOpen { f.Buffer.WriteByte(')') } } // ScanIsReverseFn is a callback that is used to figure out if a scan needs to // happen in reverse (the code lives in the ordering package, and depending on // that directly would be a dependency loop). var ScanIsReverseFn func(md *opt.Metadata, s *ScanPrivate, required *physical.OrderingChoice) bool // FormatPrivate outputs a description of the private to f.Buffer. func FormatPrivate(f *ExprFmtCtx, private interface{}, physProps *physical.Required) { if private == nil { return } switch t := private.(type) { case *opt.ColumnID: f.space() f.formatColSimple("" /* label */, *t) case *opt.ColList: for _, col := range *t { f.space() f.formatColSimple("" /* label */, col) } case *TupleOrdinal: fmt.Fprintf(f.Buffer, " %d", *t) case *ScanPrivate: f.formatIndex(t.Table, t.Index, ScanIsReverseFn(f.Memo.Metadata(), t, &physProps.Ordering)) case *SequenceSelectPrivate: seq := f.Memo.metadata.Sequence(t.Sequence) fmt.Fprintf(f.Buffer, " %s", seq.Name()) case *MutationPrivate: f.formatIndex(t.Table, cat.PrimaryIndex, false /* reverse */) case *OrdinalityPrivate: if !t.Ordering.Any() { fmt.Fprintf(f.Buffer, " ordering=%s", t.Ordering) } case *GroupingPrivate: fmt.Fprintf(f.Buffer, " cols=%s", t.GroupingCols.String()) if !t.Ordering.Any() { fmt.Fprintf(f.Buffer, ",ordering=%s", t.Ordering) } case *IndexJoinPrivate: tab := f.Memo.metadata.Table(t.Table) fmt.Fprintf(f.Buffer, " %s", tab.Name()) case *InvertedFilterPrivate: col := f.Memo.metadata.ColumnMeta(t.InvertedColumn) fmt.Fprintf(f.Buffer, " %s", col.Alias) case *LookupJoinPrivate: f.formatIndex(t.Table, t.Index, false /* reverse */) case *InvertedJoinPrivate: f.formatIndex(t.Table, t.Index, false /* reverse */) case *ValuesPrivate: fmt.Fprintf(f.Buffer, " id=v%d", t.ID) case *ZigzagJoinPrivate: f.formatIndex(t.LeftTable, t.LeftIndex, false /* reverse */) f.formatIndex(t.RightTable, t.RightIndex, false /* reverse */) case *MergeJoinPrivate: fmt.Fprintf(f.Buffer, " %s,%s,%s", t.JoinType, t.LeftEq, t.RightEq) case *FunctionPrivate: fmt.Fprintf(f.Buffer, " %s", t.Name) case *WindowsItemPrivate: fmt.Fprintf(f.Buffer, " frame=%q", &t.Frame) case *WindowPrivate: fmt.Fprintf(f.Buffer, " partition=%s", t.Partition) if !t.Ordering.Any() { fmt.Fprintf(f.Buffer, " ordering=%s", t.Ordering) } case *physical.OrderingChoice: if !t.Any() { fmt.Fprintf(f.Buffer, " ordering=%s", t) } case *OpaqueRelPrivate: f.space() f.Buffer.WriteString(t.Metadata.String()) case *AlterTableSplitPrivate: f.formatIndex(t.Table, t.Index, false /* reverse */) case *AlterTableRelocatePrivate: FormatPrivate(f, &t.AlterTableSplitPrivate, nil) if t.RelocateLease { f.Buffer.WriteString(" [lease]") } case *ControlJobsPrivate: fmt.Fprintf(f.Buffer, " (%s)", tree.JobCommandToStatement[t.Command]) case *CancelPrivate: if t.IfExists { f.Buffer.WriteString(" [if-exists]") } case *CreateViewPrivate: schema := f.Memo.Metadata().Schema(t.Schema) fmt.Fprintf(f.Buffer, " %s.%s", schema.Name(), t.ViewName) case *JoinPrivate: // Nothing to show; flags are shown separately. case *ExplainPrivate, *opt.ColSet, *SetPrivate, *types.T, *ExportPrivate: // Don't show anything, because it's mostly redundant. default: fmt.Fprintf(f.Buffer, " %v", private) } } // tableName returns the table name to be used for pretty-printing. If // ExprFmtHideQualifications is not set, the fully qualified table name is // returned. func tableName(f *ExprFmtCtx, tabID opt.TableID) string { tabMeta := f.Memo.metadata.TableMeta(tabID) if f.HasFlags(ExprFmtHideQualifications) { return string(tabMeta.Table.Name()) } tn, err := f.Catalog.FullyQualifiedName(context.TODO(), tabMeta.Table) if err != nil { panic(err) } return tn.FQString() } // isSimpleColumnName returns true if the given label consists of only ASCII // letters, numbers, underscores, quotation marks, and periods ("."). It is // used to determine whether to enclose a column name in quotation marks for // nicer display. func isSimpleColumnName(label string) bool { for i, r := range label { if r > unicode.MaxASCII { return false } if i == 0 { if r != '"' && !unicode.IsLetter(r) { // The first character must be a letter or quotation mark. return false } } else if r != '.' && r != '_' && r != '"' && !unicode.IsNumber(r) && !unicode.IsLetter(r) { return false } } return true }
pkg/sql/opt/memo/expr_format.go
1
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.9978001713752747, 0.019089961424469948, 0.00016108756244648248, 0.0010328416246920824, 0.11672013252973557 ]
{ "id": 4, "code_window": [ "\tcase *GroupByExpr, *ScalarGroupByExpr, *DistinctOnExpr, *EnsureDistinctOnExpr,\n", "\t\t*UpsertDistinctOnExpr, *EnsureUpsertDistinctOnExpr:\n", "\t\tprivate := e.Private().(*GroupingPrivate)\n", "\t\tif !f.HasFlags(ExprFmtHideColumns) && !private.GroupingCols.Empty() {\n", "\t\t\tf.formatColList(e, tp, \"grouping columns:\", opt.ColSetToList(private.GroupingCols))\n", "\t\t}\n", "\t\tif !f.HasFlags(ExprFmtHidePhysProps) && !private.Ordering.Any() {\n", "\t\t\ttp.Childf(\"internal-ordering: %s\", private.Ordering)\n", "\t\t}\n", "\t\tif !f.HasFlags(ExprFmtHideMiscProps) && private.ErrorOnDup != \"\" {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tf.formatColList(e, tp, \"grouping columns:\", private.GroupingCols.ToList())\n" ], "file_path": "pkg/sql/opt/memo/expr_format.go", "type": "replace", "edit_start_line_idx": 296 }
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package server_test import ( "context" "fmt" "testing" "time" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) const strKey = "testing.str" const intKey = "testing.int" const durationKey = "testing.duration" const byteSizeKey = "testing.bytesize" const enumKey = "testing.enum" var strA = settings.RegisterValidatedStringSetting(strKey, "desc", "<default>", func(sv *settings.Values, v string) error { if len(v) > 15 { return errors.Errorf("can't set %s to string longer than 15: %s", strKey, v) } return nil }) var intA = settings.RegisterIntSetting(intKey, "desc", 1, func(v int64) error { if v < 0 { return errors.Errorf("can't set %s to a negative value: %d", intKey, v) } return nil }) var durationA = settings.RegisterDurationSetting(durationKey, "desc", time.Minute, func(v time.Duration) error { if v < 0 { return errors.Errorf("can't set %s to a negative duration: %s", durationKey, v) } return nil }) var byteSizeA = settings.RegisterByteSizeSetting(byteSizeKey, "desc", 1024*1024, func(v int64) error { if v < 0 { return errors.Errorf("can't set %s to a negative value: %d", byteSizeKey, v) } return nil }) var enumA = settings.RegisterEnumSetting(enumKey, "desc", "foo", map[int64]string{1: "foo", 2: "bar"}) func TestSettingsRefresh(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) // Set up some additional cluster settings to play around with. Note that we // need to do this before starting the server, or there will be data races. st := cluster.MakeTestingClusterSettings() s, rawDB, _ := serverutils.StartServer(t, base.TestServerArgs{Settings: st}) defer s.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(rawDB) insertQ := `UPSERT INTO system.settings (name, value, "lastUpdated", "valueType") VALUES ($1, $2, now(), $3)` deleteQ := "DELETE FROM system.settings WHERE name = $1" if expected, actual := "<default>", strA.Get(&st.SV); expected != actual { t.Fatalf("expected %v, got %v", expected, actual) } if expected, actual := int64(1), intA.Get(&st.SV); expected != actual { t.Fatalf("expected %v, got %v", expected, actual) } // Inserting a new setting is reflected in cache. db.Exec(t, insertQ, strKey, "foo", "s") db.Exec(t, insertQ, intKey, settings.EncodeInt(2), "i") // Wait until we observe the gossip-driven update propagating to cache. testutils.SucceedsSoon(t, func() error { if expected, actual := "foo", strA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } if expected, actual := int64(2), intA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } return nil }) // Setting to empty also works. db.Exec(t, insertQ, strKey, "", "s") testutils.SucceedsSoon(t, func() error { if expected, actual := "", strA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } return nil }) // An unknown value doesn't block updates to a known one. db.Exec(t, insertQ, "dne", "???", "s") db.Exec(t, insertQ, strKey, "qux", "s") testutils.SucceedsSoon(t, func() error { if expected, actual := "qux", strA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } if expected, actual := int64(2), intA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } return nil }) // A malformed value doesn't revert previous set or block other changes. db.Exec(t, deleteQ, "dne") db.Exec(t, insertQ, intKey, "invalid", "i") db.Exec(t, insertQ, strKey, "after-invalid", "s") testutils.SucceedsSoon(t, func() error { if expected, actual := int64(2), intA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } if expected, actual := "after-invalid", strA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } return nil }) // A mis-typed value doesn't revert a previous set or block other changes. db.Exec(t, insertQ, intKey, settings.EncodeInt(7), "b") db.Exec(t, insertQ, strKey, "after-mistype", "s") testutils.SucceedsSoon(t, func() error { if expected, actual := int64(2), intA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } if expected, actual := "after-mistype", strA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } return nil }) // An invalid value doesn't revert a previous set or block other changes. prevStrA := strA.Get(&st.SV) prevIntA := intA.Get(&st.SV) prevDurationA := durationA.Get(&st.SV) prevByteSizeA := byteSizeA.Get(&st.SV) db.Exec(t, insertQ, strKey, "this is too big for this setting", "s") db.Exec(t, insertQ, intKey, settings.EncodeInt(-1), "i") db.Exec(t, insertQ, durationKey, settings.EncodeDuration(-time.Minute), "d") db.Exec(t, insertQ, byteSizeKey, settings.EncodeInt(-1), "z") testutils.SucceedsSoon(t, func() error { if expected, actual := prevStrA, strA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } if expected, actual := prevIntA, intA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } if expected, actual := prevDurationA, durationA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } if expected, actual := prevByteSizeA, byteSizeA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } return nil }) // Deleting a value reverts to default. db.Exec(t, deleteQ, strKey) testutils.SucceedsSoon(t, func() error { if expected, actual := "<default>", strA.Get(&st.SV); expected != actual { return errors.Errorf("expected %v, got %v", expected, actual) } return nil }) } func TestSettingsSetAndShow(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) // Set up some additional cluster settings to play around with. Note that we // need to do this before starting the server, or there will be data races. st := cluster.MakeTestingClusterSettings() s, rawDB, _ := serverutils.StartServer(t, base.TestServerArgs{Settings: st}) defer s.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(rawDB) // TODO(dt): add placeholder support to SET and SHOW. setQ := `SET CLUSTER SETTING "%s" = %s` showQ := `SHOW CLUSTER SETTING "%s"` db.Exec(t, fmt.Sprintf(setQ, strKey, "'via-set'")) if expected, actual := "via-set", db.QueryStr(t, fmt.Sprintf(showQ, strKey))[0][0]; expected != actual { t.Fatalf("expected %v, got %v", expected, actual) } db.Exec(t, fmt.Sprintf(setQ, intKey, "5")) if expected, actual := "5", db.QueryStr(t, fmt.Sprintf(showQ, intKey))[0][0]; expected != actual { t.Fatalf("expected %v, got %v", expected, actual) } db.Exec(t, fmt.Sprintf(setQ, durationKey, "'2h'")) if expected, actual := time.Hour*2, durationA.Get(&st.SV); expected != actual { t.Fatalf("expected %v, got %v", expected, actual) } if expected, actual := "02:00:00", db.QueryStr(t, fmt.Sprintf(showQ, durationKey))[0][0]; expected != actual { t.Fatalf("expected %v, got %v", expected, actual) } db.Exec(t, fmt.Sprintf(setQ, byteSizeKey, "'1500MB'")) if expected, actual := int64(1500000000), byteSizeA.Get(&st.SV); expected != actual { t.Fatalf("expected %v, got %v", expected, actual) } if expected, actual := "1.4 GiB", db.QueryStr(t, fmt.Sprintf(showQ, byteSizeKey))[0][0]; expected != actual { t.Fatalf("expected %v, got %v", expected, actual) } db.Exec(t, fmt.Sprintf(setQ, byteSizeKey, "'1450MB'")) if expected, actual := "1.4 GiB", db.QueryStr(t, fmt.Sprintf(showQ, byteSizeKey))[0][0]; expected != actual { t.Fatalf("expected %v, got %v", expected, actual) } db.ExpectErr(t, `could not parse "a-str" as type int`, fmt.Sprintf(setQ, intKey, "'a-str'")) db.Exec(t, fmt.Sprintf(setQ, enumKey, "2")) if expected, actual := int64(2), enumA.Get(&st.SV); expected != actual { t.Fatalf("expected %v, got %v", expected, actual) } if expected, actual := "bar", db.QueryStr(t, fmt.Sprintf(showQ, enumKey))[0][0]; expected != actual { t.Fatalf("expected %v, got %v", expected, actual) } db.Exec(t, fmt.Sprintf(setQ, enumKey, "'foo'")) if expected, actual := int64(1), enumA.Get(&st.SV); expected != actual { t.Fatalf("expected %v, got %v", expected, actual) } if expected, actual := "foo", db.QueryStr(t, fmt.Sprintf(showQ, enumKey))[0][0]; expected != actual { t.Fatalf("expected %v, got %v", expected, actual) } db.ExpectErr( t, `invalid string value 'unknown' for enum setting`, fmt.Sprintf(setQ, enumKey, "'unknown'"), ) db.ExpectErr(t, `invalid integer value '7' for enum setting`, fmt.Sprintf(setQ, enumKey, "7")) } func TestSettingsShowAll(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) // Set up some additional cluster settings to play around with. Note that we // need to do this before starting the server, or there will be data races. st := cluster.MakeTestingClusterSettings() s, rawDB, _ := serverutils.StartServer(t, base.TestServerArgs{Settings: st}) defer s.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(rawDB) rows := db.QueryStr(t, "SHOW ALL CLUSTER SETTINGS") if len(rows) < 2 { t.Fatalf("show all returned too few rows (%d)", len(rows)) } const expColumns = 5 if len(rows[0]) != expColumns { t.Fatalf("show all must return %d columns, found %d", expColumns, len(rows[0])) } hasIntKey := false hasStrKey := false for _, row := range rows { switch row[0] { case strKey: hasStrKey = true case intKey: hasIntKey = true } } if !hasIntKey || !hasStrKey { t.Fatalf("show all did not find the test keys: %q", rows) } }
pkg/server/settingsworker_test.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.0007210932672023773, 0.00019110858556814492, 0.00016099881031550467, 0.00017302446940448135, 0.00009847462206380442 ]
{ "id": 4, "code_window": [ "\tcase *GroupByExpr, *ScalarGroupByExpr, *DistinctOnExpr, *EnsureDistinctOnExpr,\n", "\t\t*UpsertDistinctOnExpr, *EnsureUpsertDistinctOnExpr:\n", "\t\tprivate := e.Private().(*GroupingPrivate)\n", "\t\tif !f.HasFlags(ExprFmtHideColumns) && !private.GroupingCols.Empty() {\n", "\t\t\tf.formatColList(e, tp, \"grouping columns:\", opt.ColSetToList(private.GroupingCols))\n", "\t\t}\n", "\t\tif !f.HasFlags(ExprFmtHidePhysProps) && !private.Ordering.Any() {\n", "\t\t\ttp.Childf(\"internal-ordering: %s\", private.Ordering)\n", "\t\t}\n", "\t\tif !f.HasFlags(ExprFmtHideMiscProps) && private.ErrorOnDup != \"\" {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tf.formatColList(e, tp, \"grouping columns:\", private.GroupingCols.ToList())\n" ], "file_path": "pkg/sql/opt/memo/expr_format.go", "type": "replace", "edit_start_line_idx": 296 }
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package sql import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/physicalplan" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/errors" ) // joinPlanningInfo is a utility struct that contains the information needed to // perform the physical planning of hash and merge joins. type joinPlanningInfo struct { leftPlan, rightPlan *PhysicalPlan joinType descpb.JoinType joinResultTypes []*types.T onExpr execinfrapb.Expression post execinfrapb.PostProcessSpec joinToStreamColMap []int // leftEqCols and rightEqCols are the indices of equality columns. These // are only used when planning a hash join. leftEqCols, rightEqCols []uint32 leftEqColsAreKey, rightEqColsAreKey bool // leftMergeOrd and rightMergeOrd are the orderings on both inputs to a // merge join. They must be of the same length, and if the length is 0, // then a hash join is planned. leftMergeOrd, rightMergeOrd execinfrapb.Ordering leftPlanDistribution, rightPlanDistribution physicalplan.PlanDistribution } // makeCoreSpec creates a processor core for hash and merge joins based on the // join planning information. Merge ordering fields of info determine which // kind of join is being planned. func (info *joinPlanningInfo) makeCoreSpec() execinfrapb.ProcessorCoreUnion { var core execinfrapb.ProcessorCoreUnion if len(info.leftMergeOrd.Columns) != len(info.rightMergeOrd.Columns) { panic(errors.AssertionFailedf( "unexpectedly different merge join ordering lengths: left %d, right %d", len(info.leftMergeOrd.Columns), len(info.rightMergeOrd.Columns), )) } if len(info.leftMergeOrd.Columns) == 0 { // There is no required ordering on the columns, so we plan a hash join. core.HashJoiner = &execinfrapb.HashJoinerSpec{ LeftEqColumns: info.leftEqCols, RightEqColumns: info.rightEqCols, OnExpr: info.onExpr, Type: info.joinType, LeftEqColumnsAreKey: info.leftEqColsAreKey, RightEqColumnsAreKey: info.rightEqColsAreKey, } } else { core.MergeJoiner = &execinfrapb.MergeJoinerSpec{ LeftOrdering: info.leftMergeOrd, RightOrdering: info.rightMergeOrd, OnExpr: info.onExpr, Type: info.joinType, LeftEqColumnsAreKey: info.leftEqColsAreKey, RightEqColumnsAreKey: info.rightEqColsAreKey, } } return core } // joinPlanningHelper is a utility struct that helps with the physical planning // of joins. type joinPlanningHelper struct { // numLeftOutCols and numRightOutCols store the number of columns that need // to be included in the output of the join from each of the sides. numLeftOutCols, numRightOutCols int // numAllLeftCols stores the width of the rows coming from the left side. // Note that it includes all of the left "out" columns and might include // other "internal" columns that are needed to merge the streams for the // left input. numAllLeftCols int leftPlanToStreamColMap, rightPlanToStreamColMap []int } func (h *joinPlanningHelper) joinOutColumns( joinType descpb.JoinType, columns colinfo.ResultColumns, ) (post execinfrapb.PostProcessSpec, joinToStreamColMap []int) { joinToStreamColMap = makePlanToStreamColMap(len(columns)) post.Projection = true // addOutCol appends to post.OutputColumns and returns the index // in the slice of the added column. addOutCol := func(col uint32) int { idx := len(post.OutputColumns) post.OutputColumns = append(post.OutputColumns, col) return idx } // The join columns are in two groups: // - the columns on the left side (numLeftOutCols) // - the columns on the right side (numRightOutCols) var numLeftOutCols int var numAllLeftCols int if joinType.ShouldIncludeLeftColsInOutput() { numLeftOutCols = h.numLeftOutCols numAllLeftCols = h.numAllLeftCols for i := 0; i < h.numLeftOutCols; i++ { joinToStreamColMap[i] = addOutCol(uint32(h.leftPlanToStreamColMap[i])) } } if joinType.ShouldIncludeRightColsInOutput() { for i := 0; i < h.numRightOutCols; i++ { joinToStreamColMap[numLeftOutCols+i] = addOutCol( uint32(numAllLeftCols + h.rightPlanToStreamColMap[i]), ) } } return post, joinToStreamColMap } // remapOnExpr remaps ordinal references in the ON condition (which refer to the // join columns as described above) to values that make sense in the joiner (0 // to N-1 for the left input columns, N to N+M-1 for the right input columns). func (h *joinPlanningHelper) remapOnExpr( planCtx *PlanningCtx, onCond tree.TypedExpr, ) (execinfrapb.Expression, error) { if onCond == nil { return execinfrapb.Expression{}, nil } joinColMap := make([]int, h.numLeftOutCols+h.numRightOutCols) idx := 0 leftCols := 0 for i := 0; i < h.numLeftOutCols; i++ { joinColMap[idx] = h.leftPlanToStreamColMap[i] if h.leftPlanToStreamColMap[i] != -1 { leftCols++ } idx++ } for i := 0; i < h.numRightOutCols; i++ { joinColMap[idx] = leftCols + h.rightPlanToStreamColMap[i] idx++ } return physicalplan.MakeExpression(onCond, planCtx, joinColMap) } // eqCols produces a slice of ordinal references for the plan columns specified // in eqIndices using planToColMap. // That is: eqIndices contains a slice of plan column indexes and planToColMap // maps the plan column indexes to the ordinal references (index of the // intermediate row produced). func eqCols(eqIndices []exec.NodeColumnOrdinal, planToColMap []int) []uint32 { eqCols := make([]uint32, len(eqIndices)) for i, planCol := range eqIndices { eqCols[i] = uint32(planToColMap[planCol]) } return eqCols } // distsqlOrdering converts the ordering specified by mergeJoinOrdering in // terms of the index of eqCols to the ordinal references provided by eqCols. func distsqlOrdering( mergeJoinOrdering colinfo.ColumnOrdering, eqCols []uint32, ) execinfrapb.Ordering { var ord execinfrapb.Ordering ord.Columns = make([]execinfrapb.Ordering_Column, len(mergeJoinOrdering)) for i, c := range mergeJoinOrdering { ord.Columns[i].ColIdx = eqCols[c.ColIdx] dir := execinfrapb.Ordering_Column_ASC if c.Direction == encoding.Descending { dir = execinfrapb.Ordering_Column_DESC } ord.Columns[i].Direction = dir } return ord } func distsqlSetOpJoinType(setOpType tree.UnionType) descpb.JoinType { switch setOpType { case tree.ExceptOp: return descpb.ExceptAllJoin case tree.IntersectOp: return descpb.IntersectAllJoin default: panic(errors.AssertionFailedf("set op type %v unsupported by joins", setOpType)) } } // getNodesOfRouters returns all nodes that routers are put on. func getNodesOfRouters( routers []physicalplan.ProcessorIdx, processors []physicalplan.Processor, ) (nodes []roachpb.NodeID) { seen := make(map[roachpb.NodeID]struct{}) for _, pIdx := range routers { n := processors[pIdx].Node if _, ok := seen[n]; !ok { seen[n] = struct{}{} nodes = append(nodes, n) } } return nodes } func findJoinProcessorNodes( leftRouters, rightRouters []physicalplan.ProcessorIdx, processors []physicalplan.Processor, ) (nodes []roachpb.NodeID) { // TODO(radu): for now we run a join processor on every node that produces // data for either source. In the future we should be smarter here. return getNodesOfRouters(append(leftRouters, rightRouters...), processors) }
pkg/sql/distsql_plan_join.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.0033427176531404257, 0.0003530922986101359, 0.00016218771634157747, 0.0001695802784524858, 0.0006528009544126689 ]
{ "id": 4, "code_window": [ "\tcase *GroupByExpr, *ScalarGroupByExpr, *DistinctOnExpr, *EnsureDistinctOnExpr,\n", "\t\t*UpsertDistinctOnExpr, *EnsureUpsertDistinctOnExpr:\n", "\t\tprivate := e.Private().(*GroupingPrivate)\n", "\t\tif !f.HasFlags(ExprFmtHideColumns) && !private.GroupingCols.Empty() {\n", "\t\t\tf.formatColList(e, tp, \"grouping columns:\", opt.ColSetToList(private.GroupingCols))\n", "\t\t}\n", "\t\tif !f.HasFlags(ExprFmtHidePhysProps) && !private.Ordering.Any() {\n", "\t\t\ttp.Childf(\"internal-ordering: %s\", private.Ordering)\n", "\t\t}\n", "\t\tif !f.HasFlags(ExprFmtHideMiscProps) && private.ErrorOnDup != \"\" {\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tf.formatColList(e, tp, \"grouping columns:\", private.GroupingCols.ToList())\n" ], "file_path": "pkg/sql/opt/memo/expr_format.go", "type": "replace", "edit_start_line_idx": 296 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package rowcontainer import ( "context" "math/bits" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/errors" ) // RowContainer is a container for rows of Datums which tracks the // approximate amount of memory allocated for row data. // Rows must be added using AddRow(); once the work is done // the Close() method must be called to release the allocated memory. // // TODO(knz): this does not currently track the amount of memory used // for the outer array of Datums references. type RowContainer struct { // We should not copy this structure around; each copy would have a // different memAcc (among other things like aliasing chunks). _ util.NoCopy numCols int // rowsPerChunk is the number of rows in a chunk; we pack multiple rows in a // single []Datum to reduce the overhead of the slice if we have few // columns. Must be a power of 2 as determination of the chunk given a row // index is performed using shifting. rowsPerChunk int rowsPerChunkShift uint chunks [][]tree.Datum firstChunk [1][]tree.Datum // avoids allocation numRows int // chunkMemSize is the memory used by a chunk. chunkMemSize int64 // fixedColsSize is the sum of widths of fixed-width columns in a // single row. fixedColsSize int64 // varSizedColumns indicates for which columns the datum size // is variable. varSizedColumns []int // deletedRows is the number of rows that have been deleted from the front // of the container. When this number reaches rowsPerChunk we delete that chunk // and reset this back to zero. deletedRows int // memAcc tracks the current memory consumption of this // RowContainer. memAcc mon.BoundAccount } // NewRowContainer allocates a new row container. // // The acc argument indicates where to register memory allocations by // this row container. Should probably be created by // Session.makeBoundAccount() or Session.TxnState.makeBoundAccount(). // // Note that we could, but do not (yet), report the size of the row // container itself to the monitor in this constructor. This is // because the various planNodes are not (yet) equipped to call // Close() upon encountering errors in their constructor (all nodes // initializing a RowContainer there) and SetLimitHint() (for sortNode // which initializes a RowContainer there). This would be rather // error-prone to implement consistently and hellishly difficult to // test properly. The trade-off is that very large table schemas or // column selections could cause unchecked and potentially dangerous // memory growth. func NewRowContainer(acc mon.BoundAccount, ti colinfo.ColTypeInfo) *RowContainer { return NewRowContainerWithCapacity(acc, ti, 0) } // NewRowContainerWithCapacity is like NewRowContainer, but it accepts a // rowCapacity argument. // // If provided, rowCapacity indicates how many rows are to be expected. // The value is used to configure the size of chunks that are allocated // within the container such that if no more than the specific number of // rows is added to the container, only a single chunk will be allocated // and wasted space will be kept to a minimum. func NewRowContainerWithCapacity( acc mon.BoundAccount, ti colinfo.ColTypeInfo, rowCapacity int, ) *RowContainer { c := &RowContainer{} c.Init(acc, ti, rowCapacity) return c } var rowsPerChunkShift = uint(util.ConstantWithMetamorphicTestValue( 6, /* defaultValue */ 1, /* metamorphicValue */ )) // Init can be used instead of NewRowContainer if we have a RowContainer that is // already part of an on-heap structure. func (c *RowContainer) Init(acc mon.BoundAccount, ti colinfo.ColTypeInfo, rowCapacity int) { nCols := ti.NumColumns() c.numCols = nCols c.memAcc = acc if rowCapacity != 0 { // If there is a row capacity provided, we use a single chunk with // sufficient capacity. The following is equivalent to: // // c.rowsPerChunkShift = ceil(log2(rowCapacity)) // c.rowsPerChunkShift = 64 - uint(bits.LeadingZeros64(uint64(rowCapacity-1))) } else if nCols != 0 { // If the rows have columns, we use 64 rows per chunk. c.rowsPerChunkShift = rowsPerChunkShift } else { // If there are no columns, every row gets mapped to the first chunk, // which ends up being a zero-length slice because each row contains no // columns. c.rowsPerChunkShift = 32 } c.rowsPerChunk = 1 << c.rowsPerChunkShift for i := 0; i < nCols; i++ { sz, variable := tree.DatumTypeSize(ti.Type(i)) if variable { if c.varSizedColumns == nil { // Only allocate varSizedColumns if necessary. c.varSizedColumns = make([]int, 0, nCols) } c.varSizedColumns = append(c.varSizedColumns, i) } else { c.fixedColsSize += int64(sz) } } // Precalculate the memory used for a chunk, specifically by the Datums in the // chunk and the slice pointing at the chunk. c.chunkMemSize = tree.SizeOfDatum * int64(c.rowsPerChunk*c.numCols) c.chunkMemSize += tree.SizeOfDatums } // Clear resets the container and releases the associated memory. This allows // the RowContainer to be reused. func (c *RowContainer) Clear(ctx context.Context) { c.chunks = nil c.numRows = 0 c.deletedRows = 0 c.memAcc.Clear(ctx) } // UnsafeReset resets the container without releasing the associated memory. This // allows the RowContainer to be reused, but keeps the previously-allocated // buffers around for reuse. This is desirable if this RowContainer will be used // and reset many times in the course of a computation before eventually being // discarded. It's unsafe because it immediately renders all previously // allocated rows unsafe - they might be overwritten without notice. This is // only safe to use if it's guaranteed that all previous rows retrieved by At // have been copied or otherwise not retained. func (c *RowContainer) UnsafeReset(ctx context.Context) error { c.numRows = 0 c.deletedRows = 0 return c.memAcc.ResizeTo(ctx, int64(len(c.chunks))*c.chunkMemSize) } // Close releases the memory associated with the RowContainer. func (c *RowContainer) Close(ctx context.Context) { if c == nil { // Allow Close on an uninitialized container. return } c.chunks = nil c.varSizedColumns = nil c.memAcc.Close(ctx) } func (c *RowContainer) allocChunks(ctx context.Context, numChunks int) error { datumsPerChunk := c.rowsPerChunk * c.numCols if err := c.memAcc.Grow(ctx, c.chunkMemSize*int64(numChunks)); err != nil { return err } if c.chunks == nil { if numChunks == 1 { c.chunks = c.firstChunk[:0:1] } else { c.chunks = make([][]tree.Datum, 0, numChunks) } } datums := make([]tree.Datum, numChunks*datumsPerChunk) for i, pos := 0, 0; i < numChunks; i++ { c.chunks = append(c.chunks, datums[pos:pos+datumsPerChunk]) pos += datumsPerChunk } return nil } // rowSize computes the size of a single row. func (c *RowContainer) rowSize(row tree.Datums) int64 { rsz := c.fixedColsSize for _, i := range c.varSizedColumns { rsz += int64(row[i].Size()) } return rsz } // getChunkAndPos returns the chunk index and the position inside the chunk for // a given row index. func (c *RowContainer) getChunkAndPos(rowIdx int) (chunk int, pos int) { // This is a hot path; use shifting to avoid division. row := rowIdx + c.deletedRows chunk = row >> c.rowsPerChunkShift return chunk, (row - (chunk << c.rowsPerChunkShift)) * (c.numCols) } // AddRow attempts to insert a new row in the RowContainer. The row slice is not // used directly: the Datum values inside the Datums are copied to internal storage. // Returns an error if the allocation was denied by the MemoryMonitor. func (c *RowContainer) AddRow(ctx context.Context, row tree.Datums) (tree.Datums, error) { if len(row) != c.numCols { panic(errors.AssertionFailedf("invalid row length %d, expected %d", len(row), c.numCols)) } if c.numCols == 0 { if c.chunks == nil { c.chunks = [][]tree.Datum{{}} } c.numRows++ return nil, nil } // Note that it is important that we perform the memory accounting before // actually adding the row. if err := c.memAcc.Grow(ctx, c.rowSize(row)); err != nil { return nil, err } chunk, pos := c.getChunkAndPos(c.numRows) if chunk == len(c.chunks) { // Grow the number of chunks by a fraction. numChunks := 1 + len(c.chunks)/8 if err := c.allocChunks(ctx, numChunks); err != nil { return nil, err } } copy(c.chunks[chunk][pos:pos+c.numCols], row) c.numRows++ return c.chunks[chunk][pos : pos+c.numCols : pos+c.numCols], nil } // Len reports the number of rows currently held in this RowContainer. func (c *RowContainer) Len() int { return c.numRows } // NumCols reports the number of columns for each row in the container. func (c *RowContainer) NumCols() int { return c.numCols } // At accesses a row at a specific index. Note that it does *not* copy the row: // callers must copy the row if they wish to mutate it. func (c *RowContainer) At(i int) tree.Datums { // This is a hot-path: do not add additional checks here. chunk, pos := c.getChunkAndPos(i) return c.chunks[chunk][pos : pos+c.numCols : pos+c.numCols] } // Swap exchanges two rows. Used for sorting. func (c *RowContainer) Swap(i, j int) { r1 := c.At(i) r2 := c.At(j) for idx := 0; idx < c.numCols; idx++ { r1[idx], r2[idx] = r2[idx], r1[idx] } } // PopFirst discards the first row in the RowContainer. func (c *RowContainer) PopFirst(ctx context.Context) { if c.numRows == 0 { panic("no rows added to container, nothing to pop") } c.numRows-- if c.numCols != 0 { c.deletedRows++ if c.deletedRows == c.rowsPerChunk { // We release the memory for rows in chunks. This includes the // chunk slice (allocated by allocChunks) and the Datums. size := c.chunkMemSize for i, pos := 0, 0; i < c.rowsPerChunk; i, pos = i+1, pos+c.numCols { size += c.rowSize(c.chunks[0][pos : pos+c.numCols]) } // Reset the pointer so the slice can be garbage collected. c.chunks[0] = nil c.deletedRows = 0 c.chunks = c.chunks[1:] c.memAcc.Shrink(ctx, size) } } } // Replace substitutes one row for another. This does query the // MemoryMonitor to determine whether the new row fits the // allowance. func (c *RowContainer) Replace(ctx context.Context, i int, newRow tree.Datums) error { newSz := c.rowSize(newRow) row := c.At(i) oldSz := c.rowSize(row) if newSz != oldSz { if err := c.memAcc.Resize(ctx, oldSz, newSz); err != nil { return err } } copy(row, newRow) return nil }
pkg/sql/rowcontainer/datum_row_container.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.0026701500173658133, 0.00024814321659505367, 0.00016586661513429135, 0.0001715605758363381, 0.00042818213114514947 ]
{ "id": 6, "code_window": [ "\tlast := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(0))\n", "\tfor i, cnt := 1, newSpans.Count(); i < cnt; i++ {\n", "\t\tnewScan := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(i))\n", "\t\tlast = c.e.f.ConstructUnion(last, newScan, &memo.SetPrivate{\n", "\t\t\tLeftCols: opt.ColSetToList(last.Relational().OutputCols),\n", "\t\t\tRightCols: opt.ColSetToList(newScan.Relational().OutputCols),\n", "\t\t\tOutCols: oldColList,\n", "\t\t})\n", "\t}\n", "\treturn last\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tLeftCols: last.Relational().OutputCols.ToList(),\n", "\t\t\tRightCols: newScan.Relational().OutputCols.ToList(),\n" ], "file_path": "pkg/sql/opt/xform/limit_funcs.go", "type": "replace", "edit_start_line_idx": 238 }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package xform import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/ordering" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" ) // LimitScanPrivate constructs a new ScanPrivate value that is based on the // given ScanPrivate. The new private's HardLimit is set to the given limit, // which must be a constant int datum value. The other fields are inherited from // the existing private. func (c *CustomFuncs) LimitScanPrivate( scanPrivate *memo.ScanPrivate, limit tree.Datum, required physical.OrderingChoice, ) *memo.ScanPrivate { // Determine the scan direction necessary to provide the required ordering. _, reverse := ordering.ScanPrivateCanProvide(c.e.mem.Metadata(), scanPrivate, &required) newScanPrivate := *scanPrivate newScanPrivate.HardLimit = memo.MakeScanLimit(int64(*limit.(*tree.DInt)), reverse) return &newScanPrivate } // CanLimitFilteredScan returns true if the given scan has not already been // limited, and is constrained or scans a partial index. This is only possible // when the required ordering of the rows to be limited can be satisfied by the // Scan operator. // // NOTE: Limiting unconstrained, non-partial index scans is done by the // GenerateLimitedScans rule, since that can require IndexJoin operators // to be generated. func (c *CustomFuncs) CanLimitFilteredScan( scanPrivate *memo.ScanPrivate, required physical.OrderingChoice, ) bool { if scanPrivate.HardLimit != 0 { // Don't push limit into scan if scan is already limited. This would // usually only happen when normalizations haven't run, as otherwise // redundant Limit operators would be discarded. return false } md := c.e.mem.Metadata() if scanPrivate.Constraint == nil && scanPrivate.PartialIndexPredicate(md) == nil { // This is not a constrained scan nor a partial index scan, so skip it. // The GenerateLimitedScans rule is responsible for limited // unconstrained scans on non-partial indexes. return false } ok, _ := ordering.ScanPrivateCanProvide(c.e.mem.Metadata(), scanPrivate, &required) return ok } // GenerateLimitedScans enumerates all non-inverted and non-partial secondary // indexes on the Scan operator's table and tries to create new limited Scan // operators from them. Since this only needs to be done once per table, // GenerateLimitedScans should only be called on the original unaltered primary // index Scan operator (i.e. not constrained or limited). // // For a secondary index that "covers" the columns needed by the scan, a single // limited Scan operator is created. For a non-covering index, an IndexJoin is // constructed to add missing columns to the limited Scan. // // Inverted index scans are not guaranteed to produce a specific number // of result rows because they contain multiple entries for a single row // indexed. Therefore, they cannot be considered for limited scans. // // Partial indexes do not index every row in the table and they can only be used // in cases where a query filter implies the partial index predicate. // GenerateLimitedScans deals with limits, but no filters, so it cannot generate // limited partial index scans. Limiting partial indexes is done by the // PushLimitIntoFilteredScans rule. func (c *CustomFuncs) GenerateLimitedScans( grp memo.RelExpr, scanPrivate *memo.ScanPrivate, limit tree.Datum, required physical.OrderingChoice, ) { limitVal := int64(*limit.(*tree.DInt)) var sb indexScanBuilder sb.init(c, scanPrivate.Table) // Iterate over all non-inverted, non-partial indexes, looking for those // that can be limited. var iter scanIndexIter iter.Init(c.e.mem, &c.im, scanPrivate, nil /* filters */, rejectInvertedIndexes|rejectPartialIndexes) iter.ForEach(func(index cat.Index, filters memo.FiltersExpr, indexCols opt.ColSet, isCovering bool) { newScanPrivate := *scanPrivate newScanPrivate.Index = index.Ordinal() // If the alternate index does not conform to the ordering, then skip it. // If reverse=true, then the scan needs to be in reverse order to match // the required ordering. ok, reverse := ordering.ScanPrivateCanProvide( c.e.mem.Metadata(), &newScanPrivate, &required, ) if !ok { return } newScanPrivate.HardLimit = memo.MakeScanLimit(limitVal, reverse) // If the alternate index includes the set of needed columns, then construct // a new Scan operator using that index. if isCovering { sb.setScan(&newScanPrivate) sb.build(grp) return } // Otherwise, try to construct an IndexJoin operator that provides the // columns missing from the index. if scanPrivate.Flags.NoIndexJoin { return } // Scan whatever columns we need which are available from the index, plus // the PK columns. newScanPrivate.Cols = indexCols.Intersection(scanPrivate.Cols) newScanPrivate.Cols.UnionWith(sb.primaryKeyCols()) sb.setScan(&newScanPrivate) // The Scan operator will go into its own group (because it projects a // different set of columns), and the IndexJoin operator will be added to // the same group as the original Limit operator. sb.addIndexJoin(scanPrivate.Cols) sb.build(grp) }) } // ScanIsLimited returns true if the scan operator with the given ScanPrivate is // limited. func (c *CustomFuncs) ScanIsLimited(sp *memo.ScanPrivate) bool { return sp.HardLimit != 0 } // ScanIsInverted returns true if the index of the given ScanPrivate is an // inverted index. func (c *CustomFuncs) ScanIsInverted(sp *memo.ScanPrivate) bool { md := c.e.mem.Metadata() idx := md.Table(sp.Table).Index(sp.Index) return idx.IsInverted() } // SplitScanIntoUnionScans returns a Union of Scan operators with hard limits // that each scan over a single key from the original scan's constraints. This // is beneficial in cases where the original scan had to scan over many rows but // had relatively few keys to scan over. // TODO(drewk): handle inverted scans. func (c *CustomFuncs) SplitScanIntoUnionScans( limitOrdering physical.OrderingChoice, scan memo.RelExpr, sp *memo.ScanPrivate, limit tree.Datum, ) memo.RelExpr { const maxScanCount = 16 const threshold = 4 cons, ok := c.getKnownScanConstraint(sp) if !ok { // No valid constraint was found. return nil } keyCtx := constraint.MakeKeyContext(&cons.Columns, c.e.evalCtx) limitVal := int(*limit.(*tree.DInt)) spans := cons.Spans // Retrieve the number of keys in the spans. keyCount, ok := spans.KeyCount(&keyCtx) if !ok { return nil } if keyCount <= 1 { // We need more than one key in order to split the existing Scan into // multiple Scans. return nil } if int(keyCount) > maxScanCount { // The number of new Scans created would exceed maxScanCount. return nil } // Check that the number of rows scanned by the new plan will be smaller than // the number scanned by the old plan by at least a factor of "threshold". if float64(int(keyCount)*limitVal*threshold) >= scan.Relational().Stats.RowCount { // Splitting the scan may not be worth the overhead; creating a sequence of // scans unioned together is expensive, so we don't want to create the plan // only for the optimizer to use something else. We only want to create the // plan if it is likely to be used. return nil } // Retrieve the length of the keys. All keys are required to be the same // length (this will be checked later) so we can simply use the length of the // first key. keyLength := spans.Get(0).StartKey().Length() // If the index ordering has a prefix of columns of length keyLength followed // by the limitOrdering columns, the scan can be split. Otherwise, return nil. hasLimitOrderingSeq, reverse := indexHasOrderingSequence( c.e.mem.Metadata(), scan, sp, limitOrdering, keyLength) if !hasLimitOrderingSeq { return nil } // Construct a hard limit for the new scans using the result of // hasLimitOrderingSeq. newHardLimit := memo.MakeScanLimit(int64(limitVal), reverse) // Construct a new Spans object containing a new Span for each key in the // original Scan's spans. newSpans, ok := spans.ExtractSingleKeySpans(&keyCtx, maxScanCount) if !ok { // Single key spans could not be created. return nil } // Construct a new ScanExpr for each span and union them all together. We // output the old ColumnIDs from each union. oldColList := opt.ColSetToList(scan.Relational().OutputCols) last := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(0)) for i, cnt := 1, newSpans.Count(); i < cnt; i++ { newScan := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(i)) last = c.e.f.ConstructUnion(last, newScan, &memo.SetPrivate{ LeftCols: opt.ColSetToList(last.Relational().OutputCols), RightCols: opt.ColSetToList(newScan.Relational().OutputCols), OutCols: oldColList, }) } return last } // indexHasOrderingSequence returns whether the scan can provide a given // ordering under the assumption that we are scanning a single-key span with the // given keyLength (and if so, whether we need to scan it in reverse). // For example: // // index: +1/-2/+3, // limitOrdering: -2/+3, // keyLength: 1, // => // hasSequence: True, reverse: False // // index: +1/-2/+3, // limitOrdering: +2/-3, // keyLength: 1, // => // hasSequence: True, reverse: True // // index: +1/-2/+3/+4, // limitOrdering: +3/+4, // keyLength: 1, // => // hasSequence: False, reverse: False // func indexHasOrderingSequence( md *opt.Metadata, scan memo.RelExpr, sp *memo.ScanPrivate, limitOrdering physical.OrderingChoice, keyLength int, ) (hasSequence, reverse bool) { tableMeta := md.TableMeta(sp.Table) index := tableMeta.Table.Index(sp.Index) if keyLength > index.ColumnCount() { // The key contains more columns than the index. The limit ordering sequence // cannot be part of the index ordering. return false, false } // Create a copy of the Scan's FuncDepSet, and add the first 'keyCount' // columns from the index as constant columns. The columns are constant // because the span contains only a single key on those columns. var fds props.FuncDepSet fds.CopyFrom(&scan.Relational().FuncDeps) prefixCols := opt.ColSet{} for i := 0; i < keyLength; i++ { col := sp.Table.IndexColumnID(index, i) prefixCols.Add(col) } fds.AddConstants(prefixCols) // Use fds to simplify a copy of the limit ordering; the prefix columns will // become part of the optional ColSet. requiredOrdering := limitOrdering.Copy() requiredOrdering.Simplify(&fds) // If the ScanPrivate can satisfy requiredOrdering, it must return columns // ordered by a prefix of length keyLength, followed by the columns of // limitOrdering. return ordering.ScanPrivateCanProvide(md, sp, &requiredOrdering) } // makeNewScan constructs a new Scan operator with a new TableID and the given // limit and span. All ColumnIDs and references to those ColumnIDs are // replaced with new ones from the new TableID. All other fields are simply // copied from the old ScanPrivate. func (c *CustomFuncs) makeNewScan( sp *memo.ScanPrivate, columns constraint.Columns, newHardLimit memo.ScanLimit, span *constraint.Span, ) memo.RelExpr { newScanPrivate := c.DuplicateScanPrivate(sp) // duplicateScanPrivate does not initialize the Constraint or HardLimit // fields, so we do that now. newScanPrivate.HardLimit = newHardLimit // Construct the new Constraint field with the given span and remapped // ordering columns. var newSpans constraint.Spans newSpans.InitSingleSpan(span) newConstraint := &constraint.Constraint{ Columns: columns.RemapColumns(sp.Table, newScanPrivate.Table), Spans: newSpans, } newScanPrivate.Constraint = newConstraint return c.e.f.ConstructScan(newScanPrivate) } // getKnownScanConstraint returns a Constraint that is known to hold true for // the output of the Scan operator with the given ScanPrivate. If the // ScanPrivate has a Constraint, the scan Constraint is returned. Otherwise, an // effort is made to retrieve a Constraint from the underlying table's check // constraints. getKnownScanConstraint assumes that the scan is not inverted. func (c *CustomFuncs) getKnownScanConstraint( sp *memo.ScanPrivate, ) (cons *constraint.Constraint, found bool) { if sp.Constraint != nil { // The ScanPrivate has a constraint, so return it. cons = sp.Constraint } else { // Build a constraint set with the check constraints of the underlying // table. filters := c.checkConstraintFilters(sp.Table) instance := c.initIdxConstraintForIndex( nil, /* requiredFilters */ filters, sp.Table, sp.Index, false, /* isInverted */ ) cons = instance.Constraint() } return cons, !cons.IsUnconstrained() }
pkg/sql/opt/xform/limit_funcs.go
1
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.9978321194648743, 0.029165267944335938, 0.00016066787065938115, 0.00034712915658019483, 0.1615799516439438 ]
{ "id": 6, "code_window": [ "\tlast := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(0))\n", "\tfor i, cnt := 1, newSpans.Count(); i < cnt; i++ {\n", "\t\tnewScan := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(i))\n", "\t\tlast = c.e.f.ConstructUnion(last, newScan, &memo.SetPrivate{\n", "\t\t\tLeftCols: opt.ColSetToList(last.Relational().OutputCols),\n", "\t\t\tRightCols: opt.ColSetToList(newScan.Relational().OutputCols),\n", "\t\t\tOutCols: oldColList,\n", "\t\t})\n", "\t}\n", "\treturn last\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tLeftCols: last.Relational().OutputCols.ToList(),\n", "\t\t\tRightCols: newScan.Relational().OutputCols.ToList(),\n" ], "file_path": "pkg/sql/opt/xform/limit_funcs.go", "type": "replace", "edit_start_line_idx": 238 }
# ------------------------------------------------------------- # Read-only request runs into replicated intent. It informs the # lock-table and waits for the intent to be resolved. # ------------------------------------------------------------- new-txn name=txn1 ts=10,1 epoch=0 ---- new-txn name=txn2 ts=12,1 epoch=0 ---- new-request name=req1 txn=txn2 ts=12,1 get key=k ---- sequence req=req1 ---- [1] sequence req1: sequencing request [1] sequence req1: acquiring latches [1] sequence req1: scanning lock table for conflicting locks [1] sequence req1: sequencing complete, returned guard handle-write-intent-error req=req1 lease-seq=1 intent txn=txn1 key=k ---- [2] handle write intent error req1: handled conflicting intents on "k", released latches debug-lock-table ---- global: num=1 lock: "k" holder: txn: 00000001-0000-0000-0000-000000000000, ts: 0.000000010,1, info: repl epoch: 0, seqs: [0] local: num=0 sequence req=req1 ---- [3] sequence req1: re-sequencing request [3] sequence req1: acquiring latches [3] sequence req1: scanning lock table for conflicting locks [3] sequence req1: waiting in lock wait-queues [3] sequence req1: pushing timestamp of txn 00000001 above 0.000000012,1 [3] sequence req1: blocked on select in concurrency_test.(*cluster).PushTransaction on-txn-updated txn=txn1 status=aborted ---- [-] update txn: aborting txn1 [3] sequence req1: resolving intent "k" for txn 00000001 with ABORTED status [3] sequence req1: acquiring latches [3] sequence req1: scanning lock table for conflicting locks [3] sequence req1: sequencing complete, returned guard finish req=req1 ---- [-] finish req1: finishing request reset namespace ---- # ------------------------------------------------------------- # Read-only request runs into replicated intent while the # lock-table is disabled. The lock-table cannot store the lock, # so the request is forced to push (PUSH_TIMESTAMP) immediately. # ------------------------------------------------------------- new-txn name=txn1 ts=10,1 epoch=0 ---- new-txn name=txn2 ts=12,1 epoch=0 ---- new-request name=req1 txn=txn2 ts=12,1 get key=k ---- on-lease-updated leaseholder=false lease-seq=2 ---- [-] transfer lease: released sequence req=req1 ---- [1] sequence req1: sequencing request [1] sequence req1: acquiring latches [1] sequence req1: scanning lock table for conflicting locks [1] sequence req1: sequencing complete, returned guard handle-write-intent-error req=req1 lease-seq=2 intent txn=txn1 key=k ---- [2] handle write intent error req1: pushing timestamp of txn 00000001 above 0.000000012,1 [2] handle write intent error req1: blocked on select in concurrency_test.(*cluster).PushTransaction on-txn-updated txn=txn1 status=aborted ---- [-] update txn: aborting txn1 [2] handle write intent error req1: resolving intent "k" for txn 00000001 with ABORTED status [2] handle write intent error req1: handled conflicting intents on "k", released latches debug-lock-table ---- global: num=0 local: num=0 sequence req=req1 ---- [3] sequence req1: re-sequencing request [3] sequence req1: acquiring latches [3] sequence req1: scanning lock table for conflicting locks [3] sequence req1: sequencing complete, returned guard finish req=req1 ---- [-] finish req1: finishing request reset namespace ---- # ------------------------------------------------------------- # Read-write request runs into replicated intent while the # lock-table is disabled. The lock-table cannot store the lock, # so the request is forced to push (PUSH_ABORT) immediately. # ------------------------------------------------------------- new-txn name=txn1 ts=10,1 epoch=0 ---- new-txn name=txn2 ts=12,1 epoch=0 ---- new-request name=req1 txn=txn2 ts=12,1 put key=k value=v ---- on-lease-updated leaseholder=false lease-seq=2 ---- [-] transfer lease: released sequence req=req1 ---- [1] sequence req1: sequencing request [1] sequence req1: acquiring latches [1] sequence req1: scanning lock table for conflicting locks [1] sequence req1: sequencing complete, returned guard handle-write-intent-error req=req1 lease-seq=2 intent txn=txn1 key=k ---- [2] handle write intent error req1: pushing txn 00000001 to abort [2] handle write intent error req1: blocked on select in concurrency_test.(*cluster).PushTransaction on-txn-updated txn=txn1 status=aborted ---- [-] update txn: aborting txn1 [2] handle write intent error req1: resolving intent "k" for txn 00000001 with ABORTED status [2] handle write intent error req1: handled conflicting intents on "k", released latches debug-lock-table ---- global: num=0 local: num=0 sequence req=req1 ---- [3] sequence req1: re-sequencing request [3] sequence req1: acquiring latches [3] sequence req1: scanning lock table for conflicting locks [3] sequence req1: sequencing complete, returned guard finish req=req1 ---- [-] finish req1: finishing request reset namespace ---- # ------------------------------------------------------------- # Read-write request runs into replicated intent while the # lock-table is disabled. The lock-table cannot store the lock, # so the request is forced to push (PUSH_ABORT) immediately. # The request's own transaction is aborted while pushing. # ------------------------------------------------------------- new-txn name=txn1 ts=10,1 epoch=0 ---- new-txn name=txn2 ts=12,1 epoch=0 ---- new-request name=req1 txn=txn2 ts=12,1 get key=k ---- on-lease-updated leaseholder=false lease-seq=2 ---- [-] transfer lease: released sequence req=req1 ---- [1] sequence req1: sequencing request [1] sequence req1: acquiring latches [1] sequence req1: scanning lock table for conflicting locks [1] sequence req1: sequencing complete, returned guard handle-write-intent-error req=req1 lease-seq=2 intent txn=txn1 key=k ---- [2] handle write intent error req1: pushing timestamp of txn 00000001 above 0.000000012,1 [2] handle write intent error req1: blocked on select in concurrency_test.(*cluster).PushTransaction on-txn-updated txn=txn2 status=aborted ---- [-] update txn: aborting txn2 [2] handle write intent error req1: detected pusher aborted [2] handle write intent error req1: handled conflicting intents on "k", returned error: TransactionAbortedError(ABORT_REASON_PUSHER_ABORTED): <nil> debug-lock-table ---- global: num=0 local: num=0 reset namespace ----
pkg/kv/kvserver/concurrency/testdata/concurrency_manager/discovered_lock
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.00017503948765806854, 0.00017254753038287163, 0.0001687450858298689, 0.00017267624207306653, 0.0000015644351378796273 ]
{ "id": 6, "code_window": [ "\tlast := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(0))\n", "\tfor i, cnt := 1, newSpans.Count(); i < cnt; i++ {\n", "\t\tnewScan := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(i))\n", "\t\tlast = c.e.f.ConstructUnion(last, newScan, &memo.SetPrivate{\n", "\t\t\tLeftCols: opt.ColSetToList(last.Relational().OutputCols),\n", "\t\t\tRightCols: opt.ColSetToList(newScan.Relational().OutputCols),\n", "\t\t\tOutCols: oldColList,\n", "\t\t})\n", "\t}\n", "\treturn last\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tLeftCols: last.Relational().OutputCols.ToList(),\n", "\t\t\tRightCols: newScan.Relational().OutputCols.ToList(),\n" ], "file_path": "pkg/sql/opt/xform/limit_funcs.go", "type": "replace", "edit_start_line_idx": 238 }
#! /usr/bin/env expect -f source [file join [file dirname $argv0] common.tcl] spawn /bin/bash send "PS1=':''/# '\r" eexpect ":/# " start_test "Check that --max-disk-temp-storage works." send "$argv start-single-node --insecure --store=path=logs/mystore --max-disk-temp-storage=10GiB\r" eexpect "node starting" interrupt eexpect ":/# " end_test start_test "Check that --max-disk-temp-storage can be expressed as a percentage." send "$argv start-single-node --insecure --store=path=logs/mystore --max-disk-temp-storage=10%\r" eexpect "node starting" interrupt eexpect ":/# " end_test start_test "Check that --max-disk-temp-storage percentage works when the store is in-memory." send "$argv start-single-node --insecure --store=type=mem,size=1GB --max-disk-temp-storage=10%\r" eexpect "node starting" interrupt eexpect ":/# " end_test start_test "Check that memory max flags do not exceed available RAM." send "$argv start-single-node --insecure --cache=.40 --max-sql-memory=.40\r" eexpect "WARNING: the sum of --max-sql-memory" eexpect "is larger than" eexpect "of total RAM" eexpect "increased risk" eexpect "node starting" interrupt eexpect ":/# " end_test start_test "Check that not using --host nor --advertise causes a user warning." send "$argv start-single-node --insecure\r" eexpect "WARNING: neither --listen-addr nor --advertise-addr was specified" eexpect "node starting" interrupt eexpect ":/# " end_test start_test "Check that --listening-url-file gets created with the right data" send "$argv start-single-node --insecure --listening-url-file=foourl\r" eexpect "node starting" system "grep -q 'postgresql://.*@.*:\[0-9\]\[0-9\]*' foourl" interrupt eexpect ":/# " end_test start_test {Check that the "failed running SUBCOMMAND" message does not consider a flag the subcommand} send "$argv --vmodule=*=2 start --garbage\r" eexpect {Failed running "start"} eexpect ":/# " end_test start_test {Check that the "failed running SUBCOMMAND" message handles nested subcommands} send "$argv --vmodule=*=2 debug zip --garbage\r" eexpect {Failed running "debug zip"} eexpect ":/# " end_test start_test {Check that the "failed running SUBCOMMAND" message handles missing subcommands} send "$argv --vmodule=*=2 --garbage\r" eexpect {Failed running "cockroach"} eexpect ":/# " end_test start_test "Check that start without --join errors out" send "$argv start --insecure\r" eexpect "ERROR: no --join flags provided to 'cockroach start'" eexpect "HINT: Consider using 'cockroach init' or 'cockroach start-single-node' instead" eexpect {Failed running "start"} end_test start_test "Check that demo start-up flags are reported to telemetry" send "$argv demo --empty --echo-sql --logtostderr=WARNING\r" eexpect "defaultdb>" send "SELECT * FROM crdb_internal.feature_usage WHERE feature_name LIKE 'cli.demo.%' ORDER BY 1;\r" eexpect feature_name eexpect "cli.demo.explicitflags.echo-sql" eexpect "cli.demo.explicitflags.empty" eexpect "cli.demo.explicitflags.logtostderr" eexpect "cli.demo.runs" eexpect "defaultdb>" interrupt eexpect ":/# " end_test start_test "Check that locality flags without a region tier warn" send "$argv start-single-node --insecure --locality=data-center=us-east,zone=a\r" eexpect "WARNING: The --locality flag does not contain a" interrupt eexpect ":/# " end_test start_server $argv start_test "Check that server start-up flags are reported to telemetry" send "$argv sql --insecure\r" eexpect "defaultdb>" send "SELECT * FROM crdb_internal.feature_usage WHERE feature_name LIKE 'cli.start-single-node.%' ORDER BY 1;\r" eexpect feature_name eexpect "cli.start-single-node.explicitflags.insecure" eexpect "cli.start-single-node.explicitflags.listening-url-file" eexpect "cli.start-single-node.explicitflags.max-sql-memory" eexpect "cli.start-single-node.runs" eexpect "defaultdb>" interrupt eexpect ":/# " end_test start_test "Check that a client can connect using the URL env var" send "export COCKROACH_URL=`cat server_url`;\r" eexpect ":/# " send "$argv sql\r" eexpect "defaultdb>" interrupt eexpect ":/# " end_test stop_server $argv send "exit 0\r" eexpect eof
pkg/cli/interactive_tests/test_flags.tcl
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.0001748256472637877, 0.00017093306814786047, 0.00016652776685077697, 0.00017141982971224934, 0.000002608014256111346 ]
{ "id": 6, "code_window": [ "\tlast := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(0))\n", "\tfor i, cnt := 1, newSpans.Count(); i < cnt; i++ {\n", "\t\tnewScan := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(i))\n", "\t\tlast = c.e.f.ConstructUnion(last, newScan, &memo.SetPrivate{\n", "\t\t\tLeftCols: opt.ColSetToList(last.Relational().OutputCols),\n", "\t\t\tRightCols: opt.ColSetToList(newScan.Relational().OutputCols),\n", "\t\t\tOutCols: oldColList,\n", "\t\t})\n", "\t}\n", "\treturn last\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\tLeftCols: last.Relational().OutputCols.ToList(),\n", "\t\t\tRightCols: newScan.Relational().OutputCols.ToList(),\n" ], "file_path": "pkg/sql/opt/xform/limit_funcs.go", "type": "replace", "edit_start_line_idx": 238 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. import { Col, Row, Tabs } from "antd"; import { SummaryCard } from "src/views/shared/components/summaryCard"; import React from "react"; import { Helmet } from "react-helmet"; import { connect } from "react-redux"; import { RouteComponentProps, withRouter } from "react-router-dom"; import * as protos from "src/js/protos"; import { generateTableID, refreshTableDetails, refreshTableStats, refreshDatabaseDetails } from "src/redux/apiReducers"; import { LocalSetting } from "src/redux/localsettings"; import { AdminUIState } from "src/redux/state"; import { databaseNameAttr, tableNameAttr } from "src/util/constants"; import { Bytes } from "src/util/format"; import { TableInfo } from "src/views/databases/data/tableInfo"; import { SortSetting } from "src/views/shared/components/sortabletable"; import { SortedTable } from "src/views/shared/components/sortedtable"; const { TabPane } = Tabs; import { getMatchParamByName } from "src/util/query"; import { databaseDetails } from "../databaseSummary"; import { Button } from "@cockroachlabs/admin-ui-components"; import { ArrowLeft } from "@cockroachlabs/icons"; import SqlBox from "src/views/shared/components/sql/box"; class GrantsSortedTable extends SortedTable<protos.cockroach.server.serverpb.TableDetailsResponse.IGrant> {} const databaseTableGrantsSortSetting = new LocalSetting<AdminUIState, SortSetting>( "tableDetails/sort_setting/grants", (s) => s.localSettings, ); /** * TableMainData are the data properties which should be passed to the TableMain * container. */ interface TableMainData { tableInfo: TableInfo; grantsSortSetting: SortSetting; } /** * TableMainActions are the action dispatchers which should be passed to the * TableMain container. */ interface TableMainActions { // Refresh the table data refreshTableDetails: typeof refreshTableDetails; refreshTableStats: typeof refreshTableStats; refreshDatabaseDetails: typeof refreshDatabaseDetails; setSort: typeof databaseTableGrantsSortSetting.set; dbResponse: protos.cockroach.server.serverpb.DatabaseDetailsResponse; } /** * TableMainProps is the type of the props object that must be passed to * TableMain component. */ type TableMainProps = TableMainData & TableMainActions & RouteComponentProps; /** * TableMain renders the main content of the databases page, which is primarily a * data table of all databases. */ export class TableMain extends React.Component<TableMainProps, {}> { componentDidMount() { const database = getMatchParamByName(this.props.match, databaseNameAttr); const table = getMatchParamByName(this.props.match, tableNameAttr); this.props.refreshDatabaseDetails(new protos.cockroach.server.serverpb.DatabaseDetailsRequest({ database: getMatchParamByName(this.props.match, databaseNameAttr) })); this.props.refreshTableDetails(new protos.cockroach.server.serverpb.TableDetailsRequest({ database, table, })); this.props.refreshTableStats(new protos.cockroach.server.serverpb.TableStatsRequest({ database, table, })); } prevPage = () => this.props.history.goBack(); render() { const { tableInfo, grantsSortSetting, match, dbResponse } = this.props; const database = getMatchParamByName(match, databaseNameAttr); const table = getMatchParamByName(match, tableNameAttr); const title = `${database}.${table}`; if (tableInfo) { return ( <div> <Helmet title={`${title} Table | Databases`} /> <div className="page--header"> <Button onClick={this.prevPage} type="unstyled-link" size="small" icon={<ArrowLeft fontSize={"10px"} />} iconPosition="left" > Databases </Button> <div className="database-summary-title"> <h2 className="base-heading">{ title }</h2> </div> </div> <section className="section section--container table-details"> <Tabs defaultActiveKey="1" className="cockroach--tabs"> <TabPane tab="Overview" key="1"> <Row gutter={16}> <Col className="gutter-row" span={16}> <SqlBox value={ tableInfo.createStatement || "" } zone={dbResponse} /> </Col> <Col className="gutter-row" span={8}> <SummaryCard> <Row> <Col span={12}> <div className="summary--card__counting"> <h3 className="summary--card__counting--value">{Bytes(tableInfo.physicalSize)}</h3> <p className="summary--card__counting--label">Size</p> </div> </Col> <Col span={12}> <div className="summary--card__counting"> <h3 className="summary--card__counting--value">{tableInfo.numReplicas}</h3> <p className="summary--card__counting--label">Replicas</p> </div> </Col> <Col span={24}> <div className="summary--card__counting"> <h3 className="summary--card__counting--value">{tableInfo.rangeCount}</h3> <p className="summary--card__counting--label">Ranges</p> </div> </Col> </Row> </SummaryCard> </Col> </Row> </TabPane> <TabPane tab="Grants" key="2"> <SummaryCard> <GrantsSortedTable data={tableInfo.grants} sortSetting={grantsSortSetting} onChangeSortSetting={(setting) => this.props.setSort(setting) } columns={[ { title: "User", cell: (grants) => grants.user, sort: (grants) => grants.user, }, { title: "Grants", cell: (grants) => grants.privileges.join(", "), sort: (grants) => grants.privileges.join(", "), }, ]}/> </SummaryCard> </TabPane> </Tabs> </section> </div> ); } return <div>No results.</div>; } } /****************************** * SELECTORS */ export function selectTableInfo(state: AdminUIState, props: RouteComponentProps): TableInfo { const db = getMatchParamByName(props.match, databaseNameAttr); const table = getMatchParamByName(props.match, tableNameAttr); const details = state.cachedData.tableDetails[generateTableID(db, table)]; const stats = state.cachedData.tableStats[generateTableID(db, table)]; return new TableInfo(table, details && details.data, stats && stats.data); } const mapStateToProps = (state: AdminUIState, ownProps: RouteComponentProps) => ({ tableInfo: selectTableInfo(state, ownProps), grantsSortSetting: databaseTableGrantsSortSetting.selector(state), dbResponse: databaseDetails(state)[getMatchParamByName(ownProps.match, databaseNameAttr)] && databaseDetails(state)[getMatchParamByName(ownProps.match, databaseNameAttr)].data, }); const mapDispatchToProps = { setSort: databaseTableGrantsSortSetting.set, refreshTableDetails, refreshTableStats, refreshDatabaseDetails, }; // Connect the TableMain class with our redux store. const tableMainConnected = withRouter(connect( mapStateToProps, mapDispatchToProps, )(TableMain)); export default tableMainConnected;
pkg/ui/src/views/databases/containers/tableDetails/index.tsx
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.0001806118816602975, 0.00017249502707272768, 0.00016393155965488404, 0.00017280290194321424, 0.00000397396024709451 ]
{ "id": 7, "code_window": [ "// output column IDs of the Union expression.\n", "func (c *CustomFuncs) MakeSetPrivateForSplitDisjunction(\n", "\tleft, right *memo.ScanPrivate,\n", ") *memo.SetPrivate {\n", "\tleftAndOutCols := opt.ColSetToList(left.Cols)\n", "\treturn &memo.SetPrivate{\n", "\t\tLeftCols: leftAndOutCols,\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "\tleftAndOutCols := left.Cols.ToList()\n" ], "file_path": "pkg/sql/opt/xform/select_funcs.go", "type": "replace", "edit_start_line_idx": 1000 }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package xform import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/ordering" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" ) // LimitScanPrivate constructs a new ScanPrivate value that is based on the // given ScanPrivate. The new private's HardLimit is set to the given limit, // which must be a constant int datum value. The other fields are inherited from // the existing private. func (c *CustomFuncs) LimitScanPrivate( scanPrivate *memo.ScanPrivate, limit tree.Datum, required physical.OrderingChoice, ) *memo.ScanPrivate { // Determine the scan direction necessary to provide the required ordering. _, reverse := ordering.ScanPrivateCanProvide(c.e.mem.Metadata(), scanPrivate, &required) newScanPrivate := *scanPrivate newScanPrivate.HardLimit = memo.MakeScanLimit(int64(*limit.(*tree.DInt)), reverse) return &newScanPrivate } // CanLimitFilteredScan returns true if the given scan has not already been // limited, and is constrained or scans a partial index. This is only possible // when the required ordering of the rows to be limited can be satisfied by the // Scan operator. // // NOTE: Limiting unconstrained, non-partial index scans is done by the // GenerateLimitedScans rule, since that can require IndexJoin operators // to be generated. func (c *CustomFuncs) CanLimitFilteredScan( scanPrivate *memo.ScanPrivate, required physical.OrderingChoice, ) bool { if scanPrivate.HardLimit != 0 { // Don't push limit into scan if scan is already limited. This would // usually only happen when normalizations haven't run, as otherwise // redundant Limit operators would be discarded. return false } md := c.e.mem.Metadata() if scanPrivate.Constraint == nil && scanPrivate.PartialIndexPredicate(md) == nil { // This is not a constrained scan nor a partial index scan, so skip it. // The GenerateLimitedScans rule is responsible for limited // unconstrained scans on non-partial indexes. return false } ok, _ := ordering.ScanPrivateCanProvide(c.e.mem.Metadata(), scanPrivate, &required) return ok } // GenerateLimitedScans enumerates all non-inverted and non-partial secondary // indexes on the Scan operator's table and tries to create new limited Scan // operators from them. Since this only needs to be done once per table, // GenerateLimitedScans should only be called on the original unaltered primary // index Scan operator (i.e. not constrained or limited). // // For a secondary index that "covers" the columns needed by the scan, a single // limited Scan operator is created. For a non-covering index, an IndexJoin is // constructed to add missing columns to the limited Scan. // // Inverted index scans are not guaranteed to produce a specific number // of result rows because they contain multiple entries for a single row // indexed. Therefore, they cannot be considered for limited scans. // // Partial indexes do not index every row in the table and they can only be used // in cases where a query filter implies the partial index predicate. // GenerateLimitedScans deals with limits, but no filters, so it cannot generate // limited partial index scans. Limiting partial indexes is done by the // PushLimitIntoFilteredScans rule. func (c *CustomFuncs) GenerateLimitedScans( grp memo.RelExpr, scanPrivate *memo.ScanPrivate, limit tree.Datum, required physical.OrderingChoice, ) { limitVal := int64(*limit.(*tree.DInt)) var sb indexScanBuilder sb.init(c, scanPrivate.Table) // Iterate over all non-inverted, non-partial indexes, looking for those // that can be limited. var iter scanIndexIter iter.Init(c.e.mem, &c.im, scanPrivate, nil /* filters */, rejectInvertedIndexes|rejectPartialIndexes) iter.ForEach(func(index cat.Index, filters memo.FiltersExpr, indexCols opt.ColSet, isCovering bool) { newScanPrivate := *scanPrivate newScanPrivate.Index = index.Ordinal() // If the alternate index does not conform to the ordering, then skip it. // If reverse=true, then the scan needs to be in reverse order to match // the required ordering. ok, reverse := ordering.ScanPrivateCanProvide( c.e.mem.Metadata(), &newScanPrivate, &required, ) if !ok { return } newScanPrivate.HardLimit = memo.MakeScanLimit(limitVal, reverse) // If the alternate index includes the set of needed columns, then construct // a new Scan operator using that index. if isCovering { sb.setScan(&newScanPrivate) sb.build(grp) return } // Otherwise, try to construct an IndexJoin operator that provides the // columns missing from the index. if scanPrivate.Flags.NoIndexJoin { return } // Scan whatever columns we need which are available from the index, plus // the PK columns. newScanPrivate.Cols = indexCols.Intersection(scanPrivate.Cols) newScanPrivate.Cols.UnionWith(sb.primaryKeyCols()) sb.setScan(&newScanPrivate) // The Scan operator will go into its own group (because it projects a // different set of columns), and the IndexJoin operator will be added to // the same group as the original Limit operator. sb.addIndexJoin(scanPrivate.Cols) sb.build(grp) }) } // ScanIsLimited returns true if the scan operator with the given ScanPrivate is // limited. func (c *CustomFuncs) ScanIsLimited(sp *memo.ScanPrivate) bool { return sp.HardLimit != 0 } // ScanIsInverted returns true if the index of the given ScanPrivate is an // inverted index. func (c *CustomFuncs) ScanIsInverted(sp *memo.ScanPrivate) bool { md := c.e.mem.Metadata() idx := md.Table(sp.Table).Index(sp.Index) return idx.IsInverted() } // SplitScanIntoUnionScans returns a Union of Scan operators with hard limits // that each scan over a single key from the original scan's constraints. This // is beneficial in cases where the original scan had to scan over many rows but // had relatively few keys to scan over. // TODO(drewk): handle inverted scans. func (c *CustomFuncs) SplitScanIntoUnionScans( limitOrdering physical.OrderingChoice, scan memo.RelExpr, sp *memo.ScanPrivate, limit tree.Datum, ) memo.RelExpr { const maxScanCount = 16 const threshold = 4 cons, ok := c.getKnownScanConstraint(sp) if !ok { // No valid constraint was found. return nil } keyCtx := constraint.MakeKeyContext(&cons.Columns, c.e.evalCtx) limitVal := int(*limit.(*tree.DInt)) spans := cons.Spans // Retrieve the number of keys in the spans. keyCount, ok := spans.KeyCount(&keyCtx) if !ok { return nil } if keyCount <= 1 { // We need more than one key in order to split the existing Scan into // multiple Scans. return nil } if int(keyCount) > maxScanCount { // The number of new Scans created would exceed maxScanCount. return nil } // Check that the number of rows scanned by the new plan will be smaller than // the number scanned by the old plan by at least a factor of "threshold". if float64(int(keyCount)*limitVal*threshold) >= scan.Relational().Stats.RowCount { // Splitting the scan may not be worth the overhead; creating a sequence of // scans unioned together is expensive, so we don't want to create the plan // only for the optimizer to use something else. We only want to create the // plan if it is likely to be used. return nil } // Retrieve the length of the keys. All keys are required to be the same // length (this will be checked later) so we can simply use the length of the // first key. keyLength := spans.Get(0).StartKey().Length() // If the index ordering has a prefix of columns of length keyLength followed // by the limitOrdering columns, the scan can be split. Otherwise, return nil. hasLimitOrderingSeq, reverse := indexHasOrderingSequence( c.e.mem.Metadata(), scan, sp, limitOrdering, keyLength) if !hasLimitOrderingSeq { return nil } // Construct a hard limit for the new scans using the result of // hasLimitOrderingSeq. newHardLimit := memo.MakeScanLimit(int64(limitVal), reverse) // Construct a new Spans object containing a new Span for each key in the // original Scan's spans. newSpans, ok := spans.ExtractSingleKeySpans(&keyCtx, maxScanCount) if !ok { // Single key spans could not be created. return nil } // Construct a new ScanExpr for each span and union them all together. We // output the old ColumnIDs from each union. oldColList := opt.ColSetToList(scan.Relational().OutputCols) last := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(0)) for i, cnt := 1, newSpans.Count(); i < cnt; i++ { newScan := c.makeNewScan(sp, cons.Columns, newHardLimit, newSpans.Get(i)) last = c.e.f.ConstructUnion(last, newScan, &memo.SetPrivate{ LeftCols: opt.ColSetToList(last.Relational().OutputCols), RightCols: opt.ColSetToList(newScan.Relational().OutputCols), OutCols: oldColList, }) } return last } // indexHasOrderingSequence returns whether the scan can provide a given // ordering under the assumption that we are scanning a single-key span with the // given keyLength (and if so, whether we need to scan it in reverse). // For example: // // index: +1/-2/+3, // limitOrdering: -2/+3, // keyLength: 1, // => // hasSequence: True, reverse: False // // index: +1/-2/+3, // limitOrdering: +2/-3, // keyLength: 1, // => // hasSequence: True, reverse: True // // index: +1/-2/+3/+4, // limitOrdering: +3/+4, // keyLength: 1, // => // hasSequence: False, reverse: False // func indexHasOrderingSequence( md *opt.Metadata, scan memo.RelExpr, sp *memo.ScanPrivate, limitOrdering physical.OrderingChoice, keyLength int, ) (hasSequence, reverse bool) { tableMeta := md.TableMeta(sp.Table) index := tableMeta.Table.Index(sp.Index) if keyLength > index.ColumnCount() { // The key contains more columns than the index. The limit ordering sequence // cannot be part of the index ordering. return false, false } // Create a copy of the Scan's FuncDepSet, and add the first 'keyCount' // columns from the index as constant columns. The columns are constant // because the span contains only a single key on those columns. var fds props.FuncDepSet fds.CopyFrom(&scan.Relational().FuncDeps) prefixCols := opt.ColSet{} for i := 0; i < keyLength; i++ { col := sp.Table.IndexColumnID(index, i) prefixCols.Add(col) } fds.AddConstants(prefixCols) // Use fds to simplify a copy of the limit ordering; the prefix columns will // become part of the optional ColSet. requiredOrdering := limitOrdering.Copy() requiredOrdering.Simplify(&fds) // If the ScanPrivate can satisfy requiredOrdering, it must return columns // ordered by a prefix of length keyLength, followed by the columns of // limitOrdering. return ordering.ScanPrivateCanProvide(md, sp, &requiredOrdering) } // makeNewScan constructs a new Scan operator with a new TableID and the given // limit and span. All ColumnIDs and references to those ColumnIDs are // replaced with new ones from the new TableID. All other fields are simply // copied from the old ScanPrivate. func (c *CustomFuncs) makeNewScan( sp *memo.ScanPrivate, columns constraint.Columns, newHardLimit memo.ScanLimit, span *constraint.Span, ) memo.RelExpr { newScanPrivate := c.DuplicateScanPrivate(sp) // duplicateScanPrivate does not initialize the Constraint or HardLimit // fields, so we do that now. newScanPrivate.HardLimit = newHardLimit // Construct the new Constraint field with the given span and remapped // ordering columns. var newSpans constraint.Spans newSpans.InitSingleSpan(span) newConstraint := &constraint.Constraint{ Columns: columns.RemapColumns(sp.Table, newScanPrivate.Table), Spans: newSpans, } newScanPrivate.Constraint = newConstraint return c.e.f.ConstructScan(newScanPrivate) } // getKnownScanConstraint returns a Constraint that is known to hold true for // the output of the Scan operator with the given ScanPrivate. If the // ScanPrivate has a Constraint, the scan Constraint is returned. Otherwise, an // effort is made to retrieve a Constraint from the underlying table's check // constraints. getKnownScanConstraint assumes that the scan is not inverted. func (c *CustomFuncs) getKnownScanConstraint( sp *memo.ScanPrivate, ) (cons *constraint.Constraint, found bool) { if sp.Constraint != nil { // The ScanPrivate has a constraint, so return it. cons = sp.Constraint } else { // Build a constraint set with the check constraints of the underlying // table. filters := c.checkConstraintFilters(sp.Table) instance := c.initIdxConstraintForIndex( nil, /* requiredFilters */ filters, sp.Table, sp.Index, false, /* isInverted */ ) cons = instance.Constraint() } return cons, !cons.IsUnconstrained() }
pkg/sql/opt/xform/limit_funcs.go
1
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.9621905088424683, 0.06362707167863846, 0.000160481984494254, 0.0009258412756025791, 0.1842711865901947 ]
{ "id": 7, "code_window": [ "// output column IDs of the Union expression.\n", "func (c *CustomFuncs) MakeSetPrivateForSplitDisjunction(\n", "\tleft, right *memo.ScanPrivate,\n", ") *memo.SetPrivate {\n", "\tleftAndOutCols := opt.ColSetToList(left.Cols)\n", "\treturn &memo.SetPrivate{\n", "\t\tLeftCols: leftAndOutCols,\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "\tleftAndOutCols := left.Cols.ToList()\n" ], "file_path": "pkg/sql/opt/xform/select_funcs.go", "type": "replace", "edit_start_line_idx": 1000 }
#!/usr/bin/env bash set -euxo pipefail write_teamcity_config() { sudo -u agent tee /home/agent/conf/buildAgent.properties <<EOF serverUrl=https://teamcity.cockroachdb.com name= workDir=../work tempDir=../temp systemDir=../system EOF } # Avoid saving any Bash history. HISTSIZE=0 # Add third-party APT repositories. apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 0EBFCD88 cat > /etc/apt/sources.list.d/docker.list <<EOF deb https://download.docker.com/linux/ubuntu bionic stable EOF # Per https://github.com/golang/go/wiki/Ubuntu add-apt-repository ppa:longsleep/golang-backports # Git 2.7, which ships with Xenial, has a bug where submodule metadata sometimes # uses absolute paths instead of relative paths, which means the affected # submodules cannot be mounted in Docker containers. Use the latest version of # Git until we upgrade to a newer Ubuntu distribution. add-apt-repository ppa:git-core/ppa apt-get update --yes # Install the necessary dependencies. Keep this list small! apt-get install --yes \ docker-ce \ docker-compose \ gnome-keyring \ gnupg2 \ git \ golang-go \ openjdk-11-jre-headless \ pass \ unzip # Installing gnome-keyring prevents the error described in # https://github.com/moby/moby/issues/34048 # Add a user for the TeamCity agent if it doesn't exist already. id -u agent &>/dev/null 2>&1 || adduser agent --disabled-password # Give the user for the TeamCity agent Docker rights. usermod -a -G docker agent # Download the TeamCity agent code and install its configuration. # N.B.: This must be done as the agent user. su - agent <<'EOF' set -euxo pipefail echo 'export GOPATH="$HOME"/work/.go' >> .profile && source .profile wget https://teamcity.cockroachdb.com/update/buildAgent.zip unzip buildAgent.zip rm buildAgent.zip # Cache the current version of the main Cockroach repository on the agent to # speed up the first build. As of 2017-10-13, the main repository is 450MB (!). # The other repositories we run CI on are small enough not to be worth caching, # but feel free to add them if it becomes necessary. # # WARNING: This uses undocumented implementation details of TeamCity's Git # alternate system. git clone --bare https://github.com/cockroachdb/cockroach system/git/cockroach.git cat > system/git/map <<EOS https://github.com/cockroachdb/cockroach = cockroach.git EOS # For master and the last two release, download the builder and acceptance # containers. repo="$GOPATH"/src/github.com/cockroachdb/cockroach git clone --shared system/git/cockroach.git "$repo" cd "$repo" # Work around a bug in the builder's git version (at the time of writing) # which would corrupt the submodule defs. Probably good to remove once the # builder uses Ubuntu 18.04 or higher. git submodule update --init --recursive for branch in $(git branch --all --list --sort=-committerdate 'origin/release-*' | head -n1) master do # Clean out all non-checked-in files. This is because of the check-in of # the generated execgen files. Once we are no longer building 20.1 builds, # the `git clean -dxf` line can be removed. git clean -dxf git checkout "$branch" COCKROACH_BUILDER_CCACHE=1 build/builder.sh make test testrace TESTTIMEOUT=45m TESTS=- # TODO(benesch): store the acceptanceversion somewhere more accessible. docker pull $(git grep cockroachdb/acceptance -- '*.go' | sed -E 's/.*"([^"]*).*"/\1/') || true done cd - EOF write_teamcity_config # Configure the Teamcity agent to start when the server starts. # # systemd will nuke the auto-upgrade process unless we mark the service as # "oneshot". This has the unfortunate side-effect of making `systemctl start # teamcity-agent` hang forever when run manually, but it at least works when the # system starts the service at bootup. # # TODO(benesch): see if we can fix this with Type=forking, KillMode=process. cat > /etc/systemd/system/teamcity-agent.service <<EOF [Unit] Description=TeamCity Build Agent After=network.target Requires=network.target [Service] Type=oneshot RemainAfterExit=yes User=agent PIDFile=/home/agent/logs/buildAgent.pid ExecStart=/home/agent/bin/agent.sh start ExecStop=/home/agent/bin/agent.sh stop SuccessExitStatus=0 143 [Install] WantedBy=multi-user.target EOF systemctl enable teamcity-agent.service # Enable LRU pruning of Docker images. # https://github.com/stepchowfun/docuum#running-docuum-in-a-docker-container DOCUUM_VERSION=0.9.4 cat > /etc/systemd/system/docuum.service <<EOF [Unit] Description=Remove Stale Docker Images After=docker.service Requires=docker.service [Service] ExecStart=/usr/bin/docker run \ --init \ --rm \ --tty \ --name docuum \ --volume /var/run/docker.sock:/var/run/docker.sock \ --volume docuum:/root stephanmisc/docuum:$DOCUUM_VERSION \ --threshold '128 GB' Restart=always [Install] WantedBy=multi-user.target EOF systemctl enable docuum.service # Prefetch the image docker pull stephanmisc/docuum:$DOCUUM_VERSION # Boot the TeamCity agent so it can be upgraded by the server (i.e., download # and install whatever plugins the server has installed) before we bake the # image. # # WARNING: There seems to be no clean way to check when the upgrade is complete. # As a hack, the string below seems to appear in the logs iff the upgrade is # successful. systemctl start teamcity-agent.service until grep -q 'Updating agent parameters on the server' /home/agent/logs/teamcity-agent.log do echo . sleep 5 done # Re-write the TeamCity config to discard the name and authorization token # assigned by the TeamCity server; otherwise, agents created from this image # might look like unauthorized duplicates to the TeamCity server. systemctl stop teamcity-agent.service write_teamcity_config # Prepare for imaging by removing unnecessary files. rm -rf /home/agent/logs apt-get clean sync
build/packer/teamcity-agent.sh
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.00017735642904881388, 0.0001732678065309301, 0.00017023496911861002, 0.00017241088789887726, 0.0000021038847535237437 ]
{ "id": 7, "code_window": [ "// output column IDs of the Union expression.\n", "func (c *CustomFuncs) MakeSetPrivateForSplitDisjunction(\n", "\tleft, right *memo.ScanPrivate,\n", ") *memo.SetPrivate {\n", "\tleftAndOutCols := opt.ColSetToList(left.Cols)\n", "\treturn &memo.SetPrivate{\n", "\t\tLeftCols: leftAndOutCols,\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "\tleftAndOutCols := left.Cols.ToList()\n" ], "file_path": "pkg/sql/opt/xform/select_funcs.go", "type": "replace", "edit_start_line_idx": 1000 }
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package testutils import ( "bufio" "io" "io/ioutil" "os" "path/filepath" ) // ReadAllFiles reads all of the files matching pattern, thus ensuring they are // in the OS buffer cache. func ReadAllFiles(pattern string) { matches, err := filepath.Glob(pattern) if err != nil { return } for _, m := range matches { f, err := os.Open(m) if err != nil { continue } _, _ = io.Copy(ioutil.Discard, bufio.NewReader(f)) f.Close() } }
pkg/testutils/files.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.00017909164307639003, 0.00017276013386435807, 0.00016605714336037636, 0.0001729459036141634, 0.0000050220965022163 ]
{ "id": 7, "code_window": [ "// output column IDs of the Union expression.\n", "func (c *CustomFuncs) MakeSetPrivateForSplitDisjunction(\n", "\tleft, right *memo.ScanPrivate,\n", ") *memo.SetPrivate {\n", "\tleftAndOutCols := opt.ColSetToList(left.Cols)\n", "\treturn &memo.SetPrivate{\n", "\t\tLeftCols: leftAndOutCols,\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "keep", "keep" ], "after_edit": [ "\tleftAndOutCols := left.Cols.ToList()\n" ], "file_path": "pkg/sql/opt/xform/select_funcs.go", "type": "replace", "edit_start_line_idx": 1000 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package tree_test import ( "bytes" "context" "flag" "fmt" "io/ioutil" "path/filepath" "runtime" "strings" "testing" "github.com/cockroachdb/cockroach/pkg/sql/parser" _ "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/pretty" "golang.org/x/sync/errgroup" ) var ( flagWritePretty = flag.Bool("rewrite-pretty", false, "rewrite pretty test outputs") testPrettyCfg = func() tree.PrettyCfg { cfg := tree.DefaultPrettyCfg() cfg.JSONFmt = true return cfg }() ) // TestPrettyData reads in a single SQL statement from a file, formats // it at 40 characters width, and compares that output to a known-good // output file. It is most useful when changing or implementing the // doc interface for a node, and should be used to compare and verify // the changed output. func TestPrettyDataShort(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) matches, err := filepath.Glob(filepath.Join("testdata", "pretty", "*.sql")) if err != nil { t.Fatal(err) } if *flagWritePretty { t.Log("WARNING: do not forget to run TestPrettyData with build flag 'nightly' and the -rewrite-pretty flag too!") } cfg := testPrettyCfg cfg.Align = tree.PrettyNoAlign t.Run("ref", func(t *testing.T) { runTestPrettyData(t, "ref", cfg, matches, true /*short*/) }) cfg.Align = tree.PrettyAlignAndDeindent t.Run("align-deindent", func(t *testing.T) { runTestPrettyData(t, "align-deindent", cfg, matches, true /*short*/) }) cfg.Align = tree.PrettyAlignOnly t.Run("align-only", func(t *testing.T) { runTestPrettyData(t, "align-only", cfg, matches, true /*short*/) }) } func runTestPrettyData( t *testing.T, prefix string, cfg tree.PrettyCfg, matches []string, short bool, ) { for _, m := range matches { m := m t.Run(filepath.Base(m), func(t *testing.T) { sql, err := ioutil.ReadFile(m) if err != nil { t.Fatal(err) } stmt, err := parser.ParseOne(string(sql)) if err != nil { t.Fatal(err) } // We have a statement, now we need to format it at all possible line // lengths. We use the length of the string + 10 as the upper bound to try to // find what happens at the longest line length. Preallocate a result slice and // work chan, then fire off a bunch of workers to compute all of the variants. var res []string if short { res = []string{""} } else { res = make([]string, len(sql)+10) } type param struct{ idx, numCols int } work := make(chan param, len(res)) if short { work <- param{0, 40} } else { for i := range res { work <- param{i, i + 1} } } close(work) g, _ := errgroup.WithContext(context.Background()) worker := func() error { for p := range work { thisCfg := cfg thisCfg.LineWidth = p.numCols res[p.idx] = thisCfg.Pretty(stmt.AST) } return nil } for i := 0; i < runtime.NumCPU(); i++ { g.Go(worker) } if err := g.Wait(); err != nil { t.Fatal(err) } var sb strings.Builder for i, s := range res { // Only write each new result to the output, along with a small header // indicating the line length. if i == 0 || s != res[i-1] { fmt.Fprintf(&sb, "%d:\n%s\n%s\n\n", i+1, strings.Repeat("-", i+1), s) } } var gotB bytes.Buffer gotB.WriteString("// Code generated by TestPretty. DO NOT EDIT.\n") gotB.WriteString("// GENERATED FILE DO NOT EDIT\n") gotB.WriteString(sb.String()) gotB.WriteByte('\n') got := gotB.String() ext := filepath.Ext(m) outfile := m[:len(m)-len(ext)] + "." + prefix + ".golden" if short { outfile = outfile + ".short" } if *flagWritePretty { if err := ioutil.WriteFile(outfile, []byte(got), 0666); err != nil { t.Fatal(err) } return } expect, err := ioutil.ReadFile(outfile) if err != nil { t.Fatal(err) } if string(expect) != got { t.Fatalf("expected:\n%s\ngot:\n%s", expect, got) } sqlutils.VerifyStatementPrettyRoundtrip(t, string(sql)) }) } } func TestPrettyVerify(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) tests := map[string]string{ // Verify that INTERVAL is maintained. `SELECT interval '-2µs'`: `SELECT '-00:00:00.000002':::INTERVAL`, } for orig, pretty := range tests { t.Run(orig, func(t *testing.T) { sqlutils.VerifyStatementPrettyRoundtrip(t, orig) stmt, err := parser.ParseOne(orig) if err != nil { t.Fatal(err) } got := tree.Pretty(stmt.AST) if pretty != got { t.Fatalf("got: %s\nexpected: %s", got, pretty) } }) } } func BenchmarkPrettyData(b *testing.B) { matches, err := filepath.Glob(filepath.Join("testdata", "pretty", "*.sql")) if err != nil { b.Fatal(err) } var docs []pretty.Doc cfg := tree.DefaultPrettyCfg() for _, m := range matches { sql, err := ioutil.ReadFile(m) if err != nil { b.Fatal(err) } stmt, err := parser.ParseOne(string(sql)) if err != nil { b.Fatal(err) } docs = append(docs, cfg.Doc(stmt.AST)) } b.ResetTimer() for i := 0; i < b.N; i++ { for _, doc := range docs { for _, w := range []int{1, 30, 80} { pretty.Pretty(doc, w, true /*useTabs*/, 4 /*tabWidth*/, nil /* keywordTransform */) } } } } func TestPrettyExprs(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) tests := map[tree.Expr]string{ &tree.CastExpr{ Expr: tree.NewDString("foo"), Type: types.MakeCollatedString(types.String, "en"), }: `CAST('foo':::STRING AS STRING) COLLATE en`, } for expr, pretty := range tests { got := tree.Pretty(expr) if pretty != got { t.Fatalf("got: %s\nexpected: %s", got, pretty) } } }
pkg/sql/sem/tree/pretty_test.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.0001792693219613284, 0.00017128129547927529, 0.00016462487110402435, 0.00017169013153761625, 0.0000037461584270204185 ]
{ "id": 8, "code_window": [ "\treturn &memo.SetPrivate{\n", "\t\tLeftCols: leftAndOutCols,\n", "\t\tRightCols: opt.ColSetToList(right.Cols),\n", "\t\tOutCols: leftAndOutCols,\n", "\t}\n", "}\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\tRightCols: right.Cols.ToList(),\n" ], "file_path": "pkg/sql/opt/xform/select_funcs.go", "type": "replace", "edit_start_line_idx": 1003 }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package memo import ( "bytes" "context" "fmt" "sort" "strings" "unicode" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/treeprinter" "github.com/cockroachdb/errors" ) // ScalarFmtInterceptor is a callback that can be set to a custom formatting // function. If the function returns a non-empty string, the normal formatting // code is bypassed. var ScalarFmtInterceptor func(f *ExprFmtCtx, expr opt.ScalarExpr) string // ExprFmtFlags controls which properties of the expression are shown in // formatted output. type ExprFmtFlags int const ( // ExprFmtShowAll shows all properties of the expression. ExprFmtShowAll ExprFmtFlags = 0 // ExprFmtHideMiscProps does not show outer columns, row cardinality, provided // orderings, side effects, or error text in the output. ExprFmtHideMiscProps ExprFmtFlags = 1 << (iota - 1) // ExprFmtHideConstraints does not show inferred constraints in the output. ExprFmtHideConstraints // ExprFmtHideFuncDeps does not show functional dependencies in the output. ExprFmtHideFuncDeps // ExprFmtHideRuleProps does not show rule-specific properties in the output. ExprFmtHideRuleProps // ExprFmtHideStats does not show statistics in the output. ExprFmtHideStats // ExprFmtHideCost does not show expression cost in the output. ExprFmtHideCost // ExprFmtHideQualifications removes the qualification from column labels // (except when a shortened name would be ambiguous). ExprFmtHideQualifications // ExprFmtHideScalars removes subtrees that contain only scalars and replaces // them with the SQL expression (if possible). ExprFmtHideScalars // ExprFmtHidePhysProps hides all required physical properties, except for // Presentation (see ExprFmtHideColumns). ExprFmtHidePhysProps // ExprFmtHideTypes hides type information from columns and scalar // expressions. ExprFmtHideTypes // ExprFmtHideNotNull hides the !null specifier from columns. ExprFmtHideNotNull // ExprFmtHideColumns removes column information. ExprFmtHideColumns // ExprFmtHideAll shows only the basic structure of the expression. // Note: this flag should be used judiciously, as its meaning changes whenever // we add more flags. ExprFmtHideAll ExprFmtFlags = (1 << iota) - 1 ) // HasFlags tests whether the given flags are all set. func (f ExprFmtFlags) HasFlags(subset ExprFmtFlags) bool { return f&subset == subset } // FormatExpr returns a string representation of the given expression, formatted // according to the specified flags. func FormatExpr(e opt.Expr, flags ExprFmtFlags, mem *Memo, catalog cat.Catalog) string { if catalog == nil { // Automatically hide qualifications if we have no catalog. flags |= ExprFmtHideQualifications } f := MakeExprFmtCtx(flags, mem, catalog) f.FormatExpr(e) return f.Buffer.String() } // ExprFmtCtx is passed as context to expression formatting functions, which // need to know the formatting flags and memo in order to format. In addition, // a reusable bytes buffer avoids unnecessary allocations. type ExprFmtCtx struct { Buffer *bytes.Buffer // Flags controls how the expression is formatted. Flags ExprFmtFlags // Memo must contain any expression that is formatted. Memo *Memo // Catalog must be set unless the ExprFmtHideQualifications flag is set. Catalog cat.Catalog // nameGen is used to generate a unique name for each relational // subexpression when Memo.saveTablesPrefix is non-empty. These names // correspond to the tables that would be saved if the query were run // with the session variable `save_tables_prefix` set to the same value. nameGen *ExprNameGenerator } // MakeExprFmtCtx creates an expression formatting context from a new buffer. func MakeExprFmtCtx(flags ExprFmtFlags, mem *Memo, catalog cat.Catalog) ExprFmtCtx { return MakeExprFmtCtxBuffer(&bytes.Buffer{}, flags, mem, catalog) } // MakeExprFmtCtxBuffer creates an expression formatting context from an // existing buffer. func MakeExprFmtCtxBuffer( buf *bytes.Buffer, flags ExprFmtFlags, mem *Memo, catalog cat.Catalog, ) ExprFmtCtx { var nameGen *ExprNameGenerator if mem != nil && mem.saveTablesPrefix != "" { nameGen = NewExprNameGenerator(mem.saveTablesPrefix) } return ExprFmtCtx{Buffer: buf, Flags: flags, Memo: mem, Catalog: catalog, nameGen: nameGen} } // HasFlags tests whether the given flags are all set. func (f *ExprFmtCtx) HasFlags(subset ExprFmtFlags) bool { return f.Flags.HasFlags(subset) } // FormatExpr constructs a treeprinter view of the given expression for testing // and debugging, according to the flags in this context. func (f *ExprFmtCtx) FormatExpr(e opt.Expr) { tp := treeprinter.New() f.formatExpr(e, tp) f.Buffer.Reset() f.Buffer.WriteString(tp.String()) } func (f *ExprFmtCtx) space() { f.Buffer.WriteByte(' ') } func (f *ExprFmtCtx) formatExpr(e opt.Expr, tp treeprinter.Node) { scalar, ok := e.(opt.ScalarExpr) if ok { f.formatScalar(scalar, tp) } else { f.formatRelational(e.(RelExpr), tp) } } func (f *ExprFmtCtx) formatRelational(e RelExpr, tp treeprinter.Node) { md := f.Memo.Metadata() relational := e.Relational() required := e.RequiredPhysical() if required == nil { // required can be nil before optimization has taken place. required = physical.MinRequired } // Special cases for merge-join and lookup-join: we want the type of the join // to show up first. f.Buffer.Reset() switch t := e.(type) { case *MergeJoinExpr: fmt.Fprintf(f.Buffer, "%v (merge)", t.JoinType) case *LookupJoinExpr: fmt.Fprintf(f.Buffer, "%v (lookup", t.JoinType) FormatPrivate(f, e.Private(), required) f.Buffer.WriteByte(')') case *InvertedJoinExpr: fmt.Fprintf(f.Buffer, "%v (inverted", t.JoinType) FormatPrivate(f, e.Private(), required) f.Buffer.WriteByte(')') case *ZigzagJoinExpr: fmt.Fprintf(f.Buffer, "%v (zigzag", opt.InnerJoinOp) FormatPrivate(f, e.Private(), required) f.Buffer.WriteByte(')') case *ScanExpr, *IndexJoinExpr, *ShowTraceForSessionExpr, *InsertExpr, *UpdateExpr, *UpsertExpr, *DeleteExpr, *SequenceSelectExpr, *WindowExpr, *OpaqueRelExpr, *OpaqueMutationExpr, *OpaqueDDLExpr, *AlterTableSplitExpr, *AlterTableUnsplitExpr, *AlterTableUnsplitAllExpr, *AlterTableRelocateExpr, *ControlJobsExpr, *CancelQueriesExpr, *CancelSessionsExpr, *CreateViewExpr, *ExportExpr: fmt.Fprintf(f.Buffer, "%v", e.Op()) FormatPrivate(f, e.Private(), required) case *SortExpr: if t.InputOrdering.Any() { fmt.Fprintf(f.Buffer, "%v", e.Op()) } else { fmt.Fprintf(f.Buffer, "%v (segmented)", e.Op()) } case *WithExpr: fmt.Fprintf(f.Buffer, "%v &%d", e.Op(), t.ID) if t.Name != "" { fmt.Fprintf(f.Buffer, " (%s)", t.Name) } case *WithScanExpr: fmt.Fprintf(f.Buffer, "%v &%d", e.Op(), t.With) if t.Name != "" { fmt.Fprintf(f.Buffer, " (%s)", t.Name) } default: fmt.Fprintf(f.Buffer, "%v", e.Op()) if opt.IsJoinNonApplyOp(t) { // All join ops that weren't handled above execute as a hash join. if leftEqCols, _ := ExtractJoinEqualityColumns( e.Child(0).(RelExpr).Relational().OutputCols, e.Child(1).(RelExpr).Relational().OutputCols, *e.Child(2).(*FiltersExpr), ); len(leftEqCols) == 0 { // The case where there are no equality columns is executed as a // degenerate case of hash join; let's be explicit about that. f.Buffer.WriteString(" (cross)") } else { f.Buffer.WriteString(" (hash)") } } } tp = tp.Child(f.Buffer.String()) if f.nameGen != nil { name := f.nameGen.GenerateName(e.Op()) tp.Childf("save-table-name: %s", name) } var colList opt.ColList // Special handling to improve the columns display for certain ops. switch t := e.(type) { case *ProjectExpr: // We want the synthesized column IDs to map 1-to-1 to the projections, // and the pass-through columns at the end. // Get the list of columns from the ProjectionsOp, which has the natural // order. for i := range t.Projections { colList = append(colList, t.Projections[i].Col) } // Add pass-through columns. t.Passthrough.ForEach(func(i opt.ColumnID) { colList = append(colList, i) }) case *ValuesExpr: colList = t.Cols case *UnionExpr, *IntersectExpr, *ExceptExpr, *UnionAllExpr, *IntersectAllExpr, *ExceptAllExpr: colList = e.Private().(*SetPrivate).OutCols default: // Fall back to writing output columns in column id order. colList = opt.ColSetToList(e.Relational().OutputCols) } f.formatColumns(e, tp, colList, required.Presentation) switch t := e.(type) { // Special-case handling for GroupBy private; print grouping columns // and internal ordering in addition to full set of columns. case *GroupByExpr, *ScalarGroupByExpr, *DistinctOnExpr, *EnsureDistinctOnExpr, *UpsertDistinctOnExpr, *EnsureUpsertDistinctOnExpr: private := e.Private().(*GroupingPrivate) if !f.HasFlags(ExprFmtHideColumns) && !private.GroupingCols.Empty() { f.formatColList(e, tp, "grouping columns:", opt.ColSetToList(private.GroupingCols)) } if !f.HasFlags(ExprFmtHidePhysProps) && !private.Ordering.Any() { tp.Childf("internal-ordering: %s", private.Ordering) } if !f.HasFlags(ExprFmtHideMiscProps) && private.ErrorOnDup != "" { tp.Childf("error: \"%s\"", private.ErrorOnDup) } case *LimitExpr: if !f.HasFlags(ExprFmtHidePhysProps) && !t.Ordering.Any() { tp.Childf("internal-ordering: %s", t.Ordering) } case *OffsetExpr: if !f.HasFlags(ExprFmtHidePhysProps) && !t.Ordering.Any() { tp.Childf("internal-ordering: %s", t.Ordering) } case *Max1RowExpr: if !f.HasFlags(ExprFmtHideMiscProps) { tp.Childf("error: \"%s\"", t.ErrorText) } // Special-case handling for set operators to show the left and right // input columns that correspond to the output columns. case *UnionExpr, *IntersectExpr, *ExceptExpr, *UnionAllExpr, *IntersectAllExpr, *ExceptAllExpr: if !f.HasFlags(ExprFmtHideColumns) { private := e.Private().(*SetPrivate) f.formatColList(e, tp, "left columns:", private.LeftCols) f.formatColList(e, tp, "right columns:", private.RightCols) } case *ScanExpr: if t.IsCanonical() { // For the canonical scan, show the expressions attached to the TableMeta. tab := md.TableMeta(t.Table) if tab.Constraints != nil { c := tp.Childf("check constraint expressions") for i := 0; i < tab.Constraints.ChildCount(); i++ { f.formatExpr(tab.Constraints.Child(i), c) } } if len(tab.ComputedCols) > 0 { c := tp.Childf("computed column expressions") cols := make(opt.ColList, 0, len(tab.ComputedCols)) for col := range tab.ComputedCols { cols = append(cols, col) } sort.Slice(cols, func(i, j int) bool { return cols[i] < cols[j] }) for _, col := range cols { f.Buffer.Reset() f.formatExpr(tab.ComputedCols[col], c.Child(f.ColumnString(col))) } } if tab.PartialIndexPredicates != nil { c := tp.Child("partial index predicates") indexOrds := make([]cat.IndexOrdinal, 0, len(tab.PartialIndexPredicates)) for ord := range tab.PartialIndexPredicates { indexOrds = append(indexOrds, ord) } sort.Ints(indexOrds) for _, ord := range indexOrds { name := string(tab.Table.Index(ord).Name()) f.Buffer.Reset() f.formatScalarWithLabel(name, tab.PartialIndexPredicates[ord], c) } } } if c := t.Constraint; c != nil { if c.IsContradiction() { tp.Childf("constraint: contradiction") } else if c.Spans.Count() == 1 { tp.Childf("constraint: %s: %s", c.Columns.String(), c.Spans.Get(0).String()) } else { n := tp.Childf("constraint: %s", c.Columns.String()) for i := 0; i < c.Spans.Count(); i++ { n.Child(c.Spans.Get(i).String()) } } } if ic := t.InvertedConstraint; ic != nil { idx := md.Table(t.Table).Index(t.Index) var b strings.Builder for i := idx.NonInvertedPrefixColumnCount(); i < idx.KeyColumnCount(); i++ { b.WriteRune('/') b.WriteString(fmt.Sprintf("%d", t.Table.ColumnID(idx.Column(i).Ordinal()))) } n := tp.Childf("inverted constraint: %s", b.String()) ic.Format(n, "spans") } if t.HardLimit.IsSet() { tp.Childf("limit: %s", t.HardLimit) } if !t.Flags.Empty() { if t.Flags.NoIndexJoin { tp.Childf("flags: no-index-join") } else if t.Flags.ForceIndex { idx := md.Table(t.Table).Index(t.Flags.Index) dir := "" switch t.Flags.Direction { case tree.DefaultDirection: case tree.Ascending: dir = ",fwd" case tree.Descending: dir = ",rev" } tp.Childf("flags: force-index=%s%s", idx.Name(), dir) } } if t.Locking != nil { strength := "" switch t.Locking.Strength { case tree.ForNone: case tree.ForKeyShare: strength = "for-key-share" case tree.ForShare: strength = "for-share" case tree.ForNoKeyUpdate: strength = "for-no-key-update" case tree.ForUpdate: strength = "for-update" default: panic(errors.AssertionFailedf("unexpected strength")) } wait := "" switch t.Locking.WaitPolicy { case tree.LockWaitBlock: case tree.LockWaitSkip: wait = ",skip-locked" case tree.LockWaitError: wait = ",nowait" default: panic(errors.AssertionFailedf("unexpected wait policy")) } tp.Childf("locking: %s%s", strength, wait) } case *InvertedFilterExpr: var b strings.Builder b.WriteRune('/') b.WriteString(fmt.Sprintf("%d", t.InvertedColumn)) n := tp.Childf("inverted expression: %s", b.String()) t.InvertedExpression.Format(n, false /* includeSpansToRead */) if t.PreFiltererState != nil { n := tp.Childf("pre-filterer expression") f.formatExpr(t.PreFiltererState.Expr, n) } case *LookupJoinExpr: if !t.Flags.Empty() { tp.Childf("flags: %s", t.Flags.String()) } if !f.HasFlags(ExprFmtHideColumns) { idxCols := make(opt.ColList, len(t.KeyCols)) idx := md.Table(t.Table).Index(t.Index) for i := range idxCols { idxCols[i] = t.Table.ColumnID(idx.Column(i).Ordinal()) } tp.Childf("key columns: %v = %v", t.KeyCols, idxCols) } if t.LookupColsAreTableKey { tp.Childf("lookup columns are key") } case *InvertedJoinExpr: if !t.Flags.Empty() { tp.Childf("flags: %s", t.Flags.String()) } if !f.HasFlags(ExprFmtHideColumns) && len(t.PrefixKeyCols) > 0 { idxCols := make(opt.ColList, len(t.PrefixKeyCols)) idx := md.Table(t.Table).Index(t.Index) for i := range idxCols { idxCols[i] = t.Table.ColumnID(idx.Column(i).Ordinal()) } tp.Childf("prefix key columns: %v = %v", t.PrefixKeyCols, idxCols) } n := tp.Child("inverted-expr") f.formatExpr(t.InvertedExpr, n) case *ZigzagJoinExpr: if !f.HasFlags(ExprFmtHideColumns) { tp.Childf("eq columns: %v = %v", t.LeftEqCols, t.RightEqCols) leftVals := make([]tree.Datum, len(t.LeftFixedCols)) rightVals := make([]tree.Datum, len(t.RightFixedCols)) // FixedVals is always going to be a ScalarListExpr, containing tuples, // containing one ScalarListExpr, containing ConstExprs. for i := range t.LeftFixedCols { leftVals[i] = ExtractConstDatum(t.FixedVals[0].Child(0).Child(i)) } for i := range t.RightFixedCols { rightVals[i] = ExtractConstDatum(t.FixedVals[1].Child(0).Child(i)) } tp.Childf("left fixed columns: %v = %v", t.LeftFixedCols, leftVals) tp.Childf("right fixed columns: %v = %v", t.RightFixedCols, rightVals) } case *MergeJoinExpr: if !t.Flags.Empty() { tp.Childf("flags: %s", t.Flags.String()) } if !f.HasFlags(ExprFmtHidePhysProps) { tp.Childf("left ordering: %s", t.LeftEq) tp.Childf("right ordering: %s", t.RightEq) } case *InsertExpr: if !f.HasFlags(ExprFmtHideColumns) { if len(colList) == 0 { tp.Child("columns: <none>") } f.formatArbiters(tp, t.Arbiters, t.Table) f.formatMutationCols(e, tp, "insert-mapping:", t.InsertCols, t.Table) f.formatColList(e, tp, "check columns:", t.CheckCols) f.formatColList(e, tp, "partial index put columns:", t.PartialIndexPutCols) f.formatMutationCommon(tp, &t.MutationPrivate) } case *UpdateExpr: if !f.HasFlags(ExprFmtHideColumns) { if len(colList) == 0 { tp.Child("columns: <none>") } f.formatColList(e, tp, "fetch columns:", t.FetchCols) f.formatMutationCols(e, tp, "update-mapping:", t.UpdateCols, t.Table) f.formatColList(e, tp, "check columns:", t.CheckCols) f.formatColList(e, tp, "partial index put columns:", t.PartialIndexPutCols) f.formatColList(e, tp, "partial index del columns:", t.PartialIndexDelCols) f.formatMutationCommon(tp, &t.MutationPrivate) } case *UpsertExpr: if !f.HasFlags(ExprFmtHideColumns) { if len(colList) == 0 { tp.Child("columns: <none>") } if t.CanaryCol != 0 { f.formatArbiters(tp, t.Arbiters, t.Table) f.formatColList(e, tp, "canary column:", opt.ColList{t.CanaryCol}) f.formatColList(e, tp, "fetch columns:", t.FetchCols) f.formatMutationCols(e, tp, "insert-mapping:", t.InsertCols, t.Table) f.formatMutationCols(e, tp, "update-mapping:", t.UpdateCols, t.Table) f.formatMutationCols(e, tp, "return-mapping:", t.ReturnCols, t.Table) } else { f.formatMutationCols(e, tp, "upsert-mapping:", t.InsertCols, t.Table) } f.formatColList(e, tp, "check columns:", t.CheckCols) f.formatColList(e, tp, "partial index put columns:", t.PartialIndexPutCols) f.formatColList(e, tp, "partial index del columns:", t.PartialIndexDelCols) f.formatMutationCommon(tp, &t.MutationPrivate) } case *DeleteExpr: if !f.HasFlags(ExprFmtHideColumns) { if len(colList) == 0 { tp.Child("columns: <none>") } f.formatColList(e, tp, "fetch columns:", t.FetchCols) f.formatColList(e, tp, "partial index del columns:", t.PartialIndexDelCols) f.formatMutationCommon(tp, &t.MutationPrivate) } case *WithExpr: if t.Mtr.Set { if t.Mtr.Materialize { tp.Child("materialized") } else { tp.Child("not-materialized") } } case *WithScanExpr: if !f.HasFlags(ExprFmtHideColumns) { child := tp.Child("mapping:") for i := range t.InCols { f.Buffer.Reset() f.space() f.formatCol("" /* label */, t.InCols[i], opt.ColSet{} /* notNullCols */) f.Buffer.WriteString(" => ") f.formatCol("" /* label */, t.OutCols[i], opt.ColSet{} /* notNullCols */) child.Child(f.Buffer.String()) } } case *CreateTableExpr: tp.Child(t.Syntax.String()) case *CreateViewExpr: tp.Child(t.ViewQuery) f.Buffer.Reset() f.Buffer.WriteString("columns:") for _, col := range t.Columns { f.space() f.formatCol(col.Alias, col.ID, opt.ColSet{} /* notNullCols */) } tp.Child(f.Buffer.String()) n := tp.Child("dependencies") for _, dep := range t.Deps { f.Buffer.Reset() name := dep.DataSource.Name() f.Buffer.WriteString(name.String()) if dep.SpecificIndex { fmt.Fprintf(f.Buffer, "@%s", dep.DataSource.(cat.Table).Index(dep.Index).Name()) } colNames, isTable := dep.GetColumnNames() if len(colNames) > 0 { fmt.Fprintf(f.Buffer, " [columns:") for _, colName := range colNames { fmt.Fprintf(f.Buffer, " %s", colName) } fmt.Fprintf(f.Buffer, "]") } else if isTable { fmt.Fprintf(f.Buffer, " [no columns]") } n.Child(f.Buffer.String()) } case *CreateStatisticsExpr: tp.Child(t.Syntax.String()) case *ExportExpr: tp.Childf("format: %s", t.FileFormat) case *ExplainExpr: // ExplainPlan is the default, don't show it. m := "" if t.Options.Mode != tree.ExplainPlan { m = strings.ToLower(t.Options.Mode.String()) } if t.Options.Flags[tree.ExplainFlagVerbose] { if m != "" { m += ", " } m += "verbose" } if m != "" { tp.Childf("mode: %s", m) } case *RecursiveCTEExpr: if !f.HasFlags(ExprFmtHideColumns) { tp.Childf("working table binding: &%d", t.WithID) f.formatColList(e, tp, "initial columns:", t.InitialCols) f.formatColList(e, tp, "recursive columns:", t.RecursiveCols) } default: if opt.IsJoinOp(t) { p := t.Private().(*JoinPrivate) if !p.Flags.Empty() { tp.Childf("flags: %s", p.Flags.String()) } } } if !f.HasFlags(ExprFmtHideMiscProps) { if !relational.OuterCols.Empty() { tp.Childf("outer: %s", relational.OuterCols.String()) } if relational.Cardinality != props.AnyCardinality { // Suppress cardinality for Scan ops if it's redundant with Limit field. if scan, ok := e.(*ScanExpr); !ok || !scan.HardLimit.IsSet() { tp.Childf("cardinality: %s", relational.Cardinality) } } if join, ok := e.(joinWithMultiplicity); ok { mult := join.getMultiplicity() if s := mult.Format(e.Op()); s != "" { tp.Childf("multiplicity: %s", s) } } f.Buffer.Reset() writeFlag := func(name string) { if f.Buffer.Len() != 0 { f.Buffer.WriteString(", ") } f.Buffer.WriteString(name) } if !relational.VolatilitySet.IsLeakProof() { writeFlag(relational.VolatilitySet.String()) } if relational.CanMutate { writeFlag("mutations") } if relational.HasPlaceholder { writeFlag("has-placeholder") } if f.Buffer.Len() != 0 { tp.Child(f.Buffer.String()) } } if !f.HasFlags(ExprFmtHideStats) { tp.Childf("stats: %s", &relational.Stats) } if !f.HasFlags(ExprFmtHideCost) { cost := e.Cost() if cost != 0 { tp.Childf("cost: %.9g", cost) } } // Format functional dependencies. if !f.HasFlags(ExprFmtHideFuncDeps) { // Show the key separately from the rest of the FDs. if key, ok := relational.FuncDeps.StrictKey(); ok { tp.Childf("key: %s", key) } else if key, ok := relational.FuncDeps.LaxKey(); ok { tp.Childf("lax-key: %s", key) } if fdStr := relational.FuncDeps.StringOnlyFDs(); fdStr != "" { tp.Childf("fd: %s", fdStr) } } if !f.HasFlags(ExprFmtHidePhysProps) { if !required.Ordering.Any() { if f.HasFlags(ExprFmtHideMiscProps) { tp.Childf("ordering: %s", required.Ordering.String()) } else { // Show the provided ordering as well, unless it's exactly the same. provided := e.ProvidedPhysical().Ordering reqStr := required.Ordering.String() provStr := provided.String() if provStr == reqStr { tp.Childf("ordering: %s", required.Ordering.String()) } else { tp.Childf("ordering: %s [actual: %s]", required.Ordering.String(), provided.String()) } } } if required.LimitHint != 0 { tp.Childf("limit hint: %.2f", required.LimitHint) } } if !f.HasFlags(ExprFmtHideRuleProps) { r := &relational.Rule if !r.PruneCols.Empty() { tp.Childf("prune: %s", r.PruneCols.String()) } if !r.RejectNullCols.Empty() { tp.Childf("reject-nulls: %s", r.RejectNullCols.String()) } if len(r.InterestingOrderings) > 0 { tp.Childf("interesting orderings: %s", r.InterestingOrderings.String()) } if !r.UnfilteredCols.Empty() { tp.Childf("unfiltered-cols: %s", r.UnfilteredCols.String()) } if withUses := relational.Shared.Rule.WithUses; len(withUses) > 0 { n := tp.Childf("cte-uses") ids := make([]opt.WithID, 0, len(withUses)) for id := range withUses { ids = append(ids, id) } sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] }) for _, id := range ids { info := withUses[id] n.Childf("&%d: count=%d used-columns=%s", id, info.Count, info.UsedCols) } } } switch t := e.(type) { case *CreateTableExpr: // Do not print dummy input expression if there was no AS clause. if !t.Syntax.As() { return } } for i, n := 0, e.ChildCount(); i < n; i++ { f.formatExpr(e.Child(i), tp) } } func (f *ExprFmtCtx) formatScalar(scalar opt.ScalarExpr, tp treeprinter.Node) { f.formatScalarWithLabel("", scalar, tp) } func (f *ExprFmtCtx) formatScalarWithLabel( label string, scalar opt.ScalarExpr, tp treeprinter.Node, ) { f.Buffer.Reset() if label != "" { f.Buffer.WriteString(label) f.Buffer.WriteString(": ") } switch scalar.Op() { case opt.ProjectionsOp, opt.AggregationsOp, opt.UniqueChecksOp, opt.FKChecksOp, opt.KVOptionsOp: // Omit empty lists (except filters). if scalar.ChildCount() == 0 { return } case opt.FiltersOp: // Show empty Filters expression as "filters (true)". if scalar.ChildCount() == 0 { f.Buffer.WriteString("filters (true)") tp.Child(f.Buffer.String()) return } case opt.IfErrOp: fmt.Fprintf(f.Buffer, "%v", scalar.Op()) f.FormatScalarProps(scalar) tp = tp.Child(f.Buffer.String()) f.formatExpr(scalar.Child(0), tp) if scalar.Child(1).ChildCount() > 0 { f.formatExpr(scalar.Child(1), tp.Child("else")) } if scalar.Child(2).ChildCount() > 0 { f.formatExpr(scalar.Child(2), tp.Child("err-code")) } return case opt.AggFilterOp: fmt.Fprintf(f.Buffer, "%v", scalar.Op()) f.FormatScalarProps(scalar) tp = tp.Child(f.Buffer.String()) f.formatExpr(scalar.Child(0), tp) f.formatExpr(scalar.Child(1), tp.Child("filter")) return case opt.ScalarListOp: // Don't show scalar-list as a separate node, as it's redundant with its // parent. for i, n := 0, scalar.ChildCount(); i < n; i++ { f.formatExpr(scalar.Child(i), tp) } return } // Omit various list items from the output, but show some of their properties // along with the properties of their child. var scalarProps []string switch scalar.Op() { case opt.FiltersItemOp, opt.ProjectionsItemOp, opt.AggregationsItemOp, opt.ZipItemOp, opt.WindowsItemOp: emitProp := func(format string, args ...interface{}) { scalarProps = append(scalarProps, fmt.Sprintf(format, args...)) } switch item := scalar.(type) { case *ProjectionsItem: if !f.HasFlags(ExprFmtHideColumns) { emitProp("as=%s", f.ColumnString(item.Col)) } case *AggregationsItem: if !f.HasFlags(ExprFmtHideColumns) { emitProp("as=%s", f.ColumnString(item.Col)) } case *ZipItem: // TODO(radu): show the item.Cols case *WindowsItem: if !f.HasFlags(ExprFmtHideColumns) { emitProp("as=%s", f.ColumnString(item.Col)) } // Only show the frame if it differs from the default. def := WindowFrame{ Mode: tree.RANGE, StartBoundType: tree.UnboundedPreceding, EndBoundType: tree.CurrentRow, FrameExclusion: tree.NoExclusion, } if item.Frame != def { emitProp("frame=%q", item.Frame.String()) } } scalarProps = append(scalarProps, f.scalarPropsStrings(scalar)...) scalar = scalar.Child(0).(opt.ScalarExpr) default: scalarProps = f.scalarPropsStrings(scalar) } var intercepted bool if f.HasFlags(ExprFmtHideScalars) && ScalarFmtInterceptor != nil { if str := ScalarFmtInterceptor(f, scalar); str != "" { f.Buffer.WriteString(str) intercepted = true } } if !intercepted { fmt.Fprintf(f.Buffer, "%v", scalar.Op()) f.formatScalarPrivate(scalar) } if len(scalarProps) != 0 { f.Buffer.WriteString(" [") f.Buffer.WriteString(strings.Join(scalarProps, ", ")) f.Buffer.WriteByte(']') } tp = tp.Child(f.Buffer.String()) if !intercepted { for i, n := 0, scalar.ChildCount(); i < n; i++ { f.formatExpr(scalar.Child(i), tp) } } } // scalarPropsStrings returns a slice of strings, each describing a property; // for example: // {"type=bool", "outer=(1)", "constraints=(/1: [/1 - /1]; tight)"} func (f *ExprFmtCtx) scalarPropsStrings(scalar opt.ScalarExpr) []string { typ := scalar.DataType() if typ == nil { if scalar.Op() == opt.UniqueChecksItemOp || scalar.Op() == opt.FKChecksItemOp || scalar.Op() == opt.KVOptionsItemOp { // These are not true scalars and have no properties. return nil } // Don't panic if scalar properties don't yet exist when printing // expression. return []string{"type=undefined"} } var res []string emitProp := func(format string, args ...interface{}) { res = append(res, fmt.Sprintf(format, args...)) } if !f.HasFlags(ExprFmtHideTypes) && typ.Family() != types.AnyFamily { emitProp("type=%s", typ) } if propsExpr, ok := scalar.(ScalarPropsExpr); ok { scalarProps := propsExpr.ScalarProps() if !f.HasFlags(ExprFmtHideMiscProps) { if !scalarProps.OuterCols.Empty() { emitProp("outer=%s", scalarProps.OuterCols) } if !scalarProps.VolatilitySet.IsLeakProof() { emitProp(scalarProps.VolatilitySet.String()) } if scalarProps.HasCorrelatedSubquery { emitProp("correlated-subquery") } else if scalarProps.HasSubquery { emitProp("subquery") } } if !f.HasFlags(ExprFmtHideConstraints) { if scalarProps.Constraints != nil && !scalarProps.Constraints.IsUnconstrained() { var tight string if scalarProps.TightConstraints { tight = "; tight" } emitProp("constraints=(%s%s)", scalarProps.Constraints, tight) } } if !f.HasFlags(ExprFmtHideFuncDeps) && !scalarProps.FuncDeps.Empty() { emitProp("fd=%s", scalarProps.FuncDeps) } } return res } // FormatScalarProps writes out a string representation of the scalar // properties (with a preceding space); for example: // " [type=bool, outer=(1), constraints=(/1: [/1 - /1]; tight)]" func (f *ExprFmtCtx) FormatScalarProps(scalar opt.ScalarExpr) { props := f.scalarPropsStrings(scalar) if len(props) != 0 { f.Buffer.WriteString(" [") f.Buffer.WriteString(strings.Join(props, ", ")) f.Buffer.WriteByte(']') } } func (f *ExprFmtCtx) formatScalarPrivate(scalar opt.ScalarExpr) { var private interface{} switch t := scalar.(type) { case *NullExpr, *TupleExpr, *CollateExpr: // Private is redundant with logical type property. private = nil case *AnyExpr: // We don't want to show the OriginalExpr; just show Cmp. private = t.Cmp case *ArrayFlattenExpr: if t.Input.Relational().OutputCols.Len() != 1 { fmt.Fprintf(f.Buffer, " col=%v", t.RequestedCol) } case *SubqueryExpr, *ExistsExpr: // We don't want to show the OriginalExpr. private = nil case *CastExpr: private = t.Typ.SQLString() case *KVOptionsItem: fmt.Fprintf(f.Buffer, " %s", t.Key) case *UniqueChecksItem: tab := f.Memo.metadata.TableMeta(t.Table) constraint := tab.Table.Unique(t.CheckOrdinal) fmt.Fprintf(f.Buffer, ": %s(", tab.Alias.ObjectName) for i := 0; i < constraint.ColumnCount(); i++ { if i > 0 { f.Buffer.WriteByte(',') } col := tab.Table.Column(constraint.ColumnOrdinal(tab.Table, i)) f.Buffer.WriteString(string(col.ColName())) } f.Buffer.WriteByte(')') case *FKChecksItem: origin := f.Memo.metadata.TableMeta(t.OriginTable) referenced := f.Memo.metadata.TableMeta(t.ReferencedTable) var fk cat.ForeignKeyConstraint if t.FKOutbound { fk = origin.Table.OutboundForeignKey(t.FKOrdinal) } else { fk = referenced.Table.InboundForeignKey(t.FKOrdinal) } // Print the FK as: // child(a,b) -> parent(a,b) // // TODO(radu): maybe flip these if we are deleting from the parent (i.e. // FKOutbound=false)? fmt.Fprintf(f.Buffer, ": %s(", origin.Alias.ObjectName) for i := 0; i < fk.ColumnCount(); i++ { if i > 0 { f.Buffer.WriteByte(',') } col := origin.Table.Column(fk.OriginColumnOrdinal(origin.Table, i)) f.Buffer.WriteString(string(col.ColName())) } fmt.Fprintf(f.Buffer, ") -> %s(", referenced.Alias.ObjectName) for i := 0; i < fk.ColumnCount(); i++ { if i > 0 { f.Buffer.WriteByte(',') } col := referenced.Table.Column(fk.ReferencedColumnOrdinal(referenced.Table, i)) f.Buffer.WriteString(string(col.ColName())) } f.Buffer.WriteByte(')') default: private = scalar.Private() } if private != nil { f.Buffer.WriteRune(':') FormatPrivate(f, private, &physical.Required{}) } } // formatIndex outputs the specified index into the context's buffer with the // format: // // table_alias@index_name // // If reverse is true, ",rev" is appended. // // If the index is a partial index, ",partial" is appended. // // If the table is aliased, " [as=alias]" is appended. func (f *ExprFmtCtx) formatIndex(tabID opt.TableID, idxOrd cat.IndexOrdinal, reverse bool) { md := f.Memo.Metadata() tabMeta := md.TableMeta(tabID) index := tabMeta.Table.Index(idxOrd) if idxOrd == cat.PrimaryIndex { // Don't output the index name if it's the primary index. fmt.Fprintf(f.Buffer, " %s", tableName(f, tabID)) } else { fmt.Fprintf(f.Buffer, " %s@%s", tableName(f, tabID), index.Name()) } if reverse { f.Buffer.WriteString(",rev") } if _, isPartial := index.Predicate(); isPartial { f.Buffer.WriteString(",partial") } alias := md.TableMeta(tabID).Alias.Table() if alias != string(tabMeta.Table.Name()) { fmt.Fprintf(f.Buffer, " [as=%s]", alias) } } // formatArbiters constructs a new treeprinter child containing the // specified list of arbiter indexes. func (f *ExprFmtCtx) formatArbiters( tp treeprinter.Node, arbiters cat.IndexOrdinals, tabID opt.TableID, ) { md := f.Memo.Metadata() tab := md.Table(tabID) if len(arbiters) > 0 { f.Buffer.Reset() f.Buffer.WriteString("arbiter indexes:") for _, idx := range arbiters { name := string(tab.Index(idx).Name()) f.space() f.Buffer.WriteString(name) } tp.Child(f.Buffer.String()) } } func (f *ExprFmtCtx) formatColumns( nd RelExpr, tp treeprinter.Node, cols opt.ColList, presentation physical.Presentation, ) { if f.HasFlags(ExprFmtHideColumns) { return } if presentation.Any() { f.formatColList(nd, tp, "columns:", cols) return } // When a particular column presentation is required of the expression, then // print columns using that information. Include information about columns // that are hidden by the presentation separately. hidden := cols.ToSet() notNullCols := nd.Relational().NotNullCols f.Buffer.Reset() f.Buffer.WriteString("columns:") for _, col := range presentation { hidden.Remove(col.ID) f.space() f.formatCol(col.Alias, col.ID, notNullCols) } if !hidden.Empty() { f.Buffer.WriteString(" [hidden:") for _, col := range cols { if hidden.Contains(col) { f.space() f.formatCol("" /* label */, col, notNullCols) } } f.Buffer.WriteString("]") } tp.Child(f.Buffer.String()) } // formatColList constructs a new treeprinter child containing the specified // list of columns formatted using the formatCol method. func (f *ExprFmtCtx) formatColList( nd RelExpr, tp treeprinter.Node, heading string, colList opt.ColList, ) { if len(colList) > 0 { notNullCols := nd.Relational().NotNullCols f.Buffer.Reset() f.Buffer.WriteString(heading) for _, col := range colList { if col != 0 { f.space() f.formatCol("" /* label */, col, notNullCols) } } tp.Child(f.Buffer.String()) } } // formatMutationCols adds a new treeprinter child for each non-zero column in the // given list. Each child shows how the column will be mutated, with the id of // the "before" and "after" columns, similar to this: // // a:1 => x:4 // func (f *ExprFmtCtx) formatMutationCols( nd RelExpr, tp treeprinter.Node, heading string, colList opt.ColList, tabID opt.TableID, ) { if len(colList) == 0 { return } tpChild := tp.Child(heading) for i, col := range colList { if col != 0 { tpChild.Child(fmt.Sprintf("%s => %s", f.ColumnString(col), f.ColumnString(tabID.ColumnID(i)))) } } } // formatMutationCommon shows the MutationPrivate fields that format the same // for all types of mutations. func (f *ExprFmtCtx) formatMutationCommon(tp treeprinter.Node, p *MutationPrivate) { if p.WithID != 0 { tp.Childf("input binding: &%d", p.WithID) } if len(p.FKCascades) > 0 { c := tp.Childf("cascades") for i := range p.FKCascades { c.Child(p.FKCascades[i].FKName) } } } // ColumnString returns the column in the same format as formatColSimple. func (f *ExprFmtCtx) ColumnString(id opt.ColumnID) string { var buf bytes.Buffer f.formatColSimpleToBuffer(&buf, "" /* label */, id) return buf.String() } // formatColSimple outputs the specified column into the context's buffer using the // following format: // label:id // // The :id part is omitted if the formatting flags include ExprFmtHideColumns. // // If a label is given, then it is used. Otherwise, a "best effort" label is // used from query metadata. func (f *ExprFmtCtx) formatColSimple(label string, id opt.ColumnID) { f.formatColSimpleToBuffer(f.Buffer, label, id) } func (f *ExprFmtCtx) formatColSimpleToBuffer(buf *bytes.Buffer, label string, id opt.ColumnID) { if label == "" { if f.Memo != nil { md := f.Memo.metadata fullyQualify := !f.HasFlags(ExprFmtHideQualifications) label = md.QualifiedAlias(id, fullyQualify, f.Catalog) } else { label = fmt.Sprintf("unknown%d", id) } } if !isSimpleColumnName(label) { // Add quotations around the column name if it is not composed of simple // ASCII characters. label = "\"" + label + "\"" } buf.WriteString(label) if !f.HasFlags(ExprFmtHideColumns) { buf.WriteByte(':') fmt.Fprintf(buf, "%d", id) } } // formatCol outputs the specified column into the context's buffer using the // following format: // label:id(type) // // If the column is not nullable, then this is the format: // label:id(type!null) // // Some of the components can be omitted depending on formatting flags. // // If a label is given, then it is used. Otherwise, a "best effort" label is // used from query metadata. func (f *ExprFmtCtx) formatCol(label string, id opt.ColumnID, notNullCols opt.ColSet) { f.formatColSimple(label, id) parenOpen := false if !f.HasFlags(ExprFmtHideTypes) && f.Memo != nil { f.Buffer.WriteByte('(') parenOpen = true f.Buffer.WriteString(f.Memo.metadata.ColumnMeta(id).Type.String()) } if !f.HasFlags(ExprFmtHideNotNull) && notNullCols.Contains(id) { f.Buffer.WriteString("!null") } if parenOpen { f.Buffer.WriteByte(')') } } // ScanIsReverseFn is a callback that is used to figure out if a scan needs to // happen in reverse (the code lives in the ordering package, and depending on // that directly would be a dependency loop). var ScanIsReverseFn func(md *opt.Metadata, s *ScanPrivate, required *physical.OrderingChoice) bool // FormatPrivate outputs a description of the private to f.Buffer. func FormatPrivate(f *ExprFmtCtx, private interface{}, physProps *physical.Required) { if private == nil { return } switch t := private.(type) { case *opt.ColumnID: f.space() f.formatColSimple("" /* label */, *t) case *opt.ColList: for _, col := range *t { f.space() f.formatColSimple("" /* label */, col) } case *TupleOrdinal: fmt.Fprintf(f.Buffer, " %d", *t) case *ScanPrivate: f.formatIndex(t.Table, t.Index, ScanIsReverseFn(f.Memo.Metadata(), t, &physProps.Ordering)) case *SequenceSelectPrivate: seq := f.Memo.metadata.Sequence(t.Sequence) fmt.Fprintf(f.Buffer, " %s", seq.Name()) case *MutationPrivate: f.formatIndex(t.Table, cat.PrimaryIndex, false /* reverse */) case *OrdinalityPrivate: if !t.Ordering.Any() { fmt.Fprintf(f.Buffer, " ordering=%s", t.Ordering) } case *GroupingPrivate: fmt.Fprintf(f.Buffer, " cols=%s", t.GroupingCols.String()) if !t.Ordering.Any() { fmt.Fprintf(f.Buffer, ",ordering=%s", t.Ordering) } case *IndexJoinPrivate: tab := f.Memo.metadata.Table(t.Table) fmt.Fprintf(f.Buffer, " %s", tab.Name()) case *InvertedFilterPrivate: col := f.Memo.metadata.ColumnMeta(t.InvertedColumn) fmt.Fprintf(f.Buffer, " %s", col.Alias) case *LookupJoinPrivate: f.formatIndex(t.Table, t.Index, false /* reverse */) case *InvertedJoinPrivate: f.formatIndex(t.Table, t.Index, false /* reverse */) case *ValuesPrivate: fmt.Fprintf(f.Buffer, " id=v%d", t.ID) case *ZigzagJoinPrivate: f.formatIndex(t.LeftTable, t.LeftIndex, false /* reverse */) f.formatIndex(t.RightTable, t.RightIndex, false /* reverse */) case *MergeJoinPrivate: fmt.Fprintf(f.Buffer, " %s,%s,%s", t.JoinType, t.LeftEq, t.RightEq) case *FunctionPrivate: fmt.Fprintf(f.Buffer, " %s", t.Name) case *WindowsItemPrivate: fmt.Fprintf(f.Buffer, " frame=%q", &t.Frame) case *WindowPrivate: fmt.Fprintf(f.Buffer, " partition=%s", t.Partition) if !t.Ordering.Any() { fmt.Fprintf(f.Buffer, " ordering=%s", t.Ordering) } case *physical.OrderingChoice: if !t.Any() { fmt.Fprintf(f.Buffer, " ordering=%s", t) } case *OpaqueRelPrivate: f.space() f.Buffer.WriteString(t.Metadata.String()) case *AlterTableSplitPrivate: f.formatIndex(t.Table, t.Index, false /* reverse */) case *AlterTableRelocatePrivate: FormatPrivate(f, &t.AlterTableSplitPrivate, nil) if t.RelocateLease { f.Buffer.WriteString(" [lease]") } case *ControlJobsPrivate: fmt.Fprintf(f.Buffer, " (%s)", tree.JobCommandToStatement[t.Command]) case *CancelPrivate: if t.IfExists { f.Buffer.WriteString(" [if-exists]") } case *CreateViewPrivate: schema := f.Memo.Metadata().Schema(t.Schema) fmt.Fprintf(f.Buffer, " %s.%s", schema.Name(), t.ViewName) case *JoinPrivate: // Nothing to show; flags are shown separately. case *ExplainPrivate, *opt.ColSet, *SetPrivate, *types.T, *ExportPrivate: // Don't show anything, because it's mostly redundant. default: fmt.Fprintf(f.Buffer, " %v", private) } } // tableName returns the table name to be used for pretty-printing. If // ExprFmtHideQualifications is not set, the fully qualified table name is // returned. func tableName(f *ExprFmtCtx, tabID opt.TableID) string { tabMeta := f.Memo.metadata.TableMeta(tabID) if f.HasFlags(ExprFmtHideQualifications) { return string(tabMeta.Table.Name()) } tn, err := f.Catalog.FullyQualifiedName(context.TODO(), tabMeta.Table) if err != nil { panic(err) } return tn.FQString() } // isSimpleColumnName returns true if the given label consists of only ASCII // letters, numbers, underscores, quotation marks, and periods ("."). It is // used to determine whether to enclose a column name in quotation marks for // nicer display. func isSimpleColumnName(label string) bool { for i, r := range label { if r > unicode.MaxASCII { return false } if i == 0 { if r != '"' && !unicode.IsLetter(r) { // The first character must be a letter or quotation mark. return false } } else if r != '.' && r != '_' && r != '"' && !unicode.IsNumber(r) && !unicode.IsLetter(r) { return false } } return true }
pkg/sql/opt/memo/expr_format.go
1
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.5597359538078308, 0.004842621274292469, 0.00016287642938550562, 0.00017334245785605162, 0.04707406461238861 ]
{ "id": 8, "code_window": [ "\treturn &memo.SetPrivate{\n", "\t\tLeftCols: leftAndOutCols,\n", "\t\tRightCols: opt.ColSetToList(right.Cols),\n", "\t\tOutCols: leftAndOutCols,\n", "\t}\n", "}\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\tRightCols: right.Cols.ToList(),\n" ], "file_path": "pkg/sql/opt/xform/select_funcs.go", "type": "replace", "edit_start_line_idx": 1003 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package rowcontainer import ( "container/heap" "context" "unsafe" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/diskmap" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/cancelchecker" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/ring" "github.com/cockroachdb/cockroach/pkg/util/sort" "github.com/cockroachdb/errors" ) // SortableRowContainer is a container used to store rows and optionally sort // these. type SortableRowContainer interface { Len() int // AddRow adds a row to the container. If an error is returned, then the // row wasn't actually added. AddRow(context.Context, rowenc.EncDatumRow) error // Sort sorts the rows according to the current ordering (the one set either // at initialization or by the last call of Reorder() - if the container is // ReorderableRowContainer). Sort(context.Context) // NewIterator returns a RowIterator that can be used to iterate over // the rows. NewIterator(context.Context) RowIterator // NewFinalIterator returns a RowIterator that can be used to iterate over the // rows, possibly freeing resources along the way. Subsequent calls to // NewIterator or NewFinalIterator are not guaranteed to return any rows. NewFinalIterator(context.Context) RowIterator // UnsafeReset resets the container, allowing for reuse. It renders all // previously allocated rows unsafe. UnsafeReset(context.Context) error // InitTopK enables optimizations in cases where the caller cares only about // the top k rows where k is the size of the SortableRowContainer when // InitTopK is called. Once InitTopK is called, callers should not call // AddRow. Iterators created after calling InitTopK are guaranteed to read the // top k rows only. InitTopK() // MaybeReplaceMax checks whether the given row belongs in the top k rows, // potentially evicting a row in favor of the given row. MaybeReplaceMax(context.Context, rowenc.EncDatumRow) error // Close frees up resources held by the SortableRowContainer. Close(context.Context) } // ReorderableRowContainer is a SortableRowContainer that can change the // ordering on which the rows are sorted. type ReorderableRowContainer interface { SortableRowContainer // Reorder changes the ordering on which the rows are sorted. In order for // new ordering to take effect, Sort() must be called. It returns an error if // it occurs. Reorder(context.Context, colinfo.ColumnOrdering) error } // IndexedRowContainer is a ReorderableRowContainer which also implements // tree.IndexedRows. It allows retrieving a row at a particular index. type IndexedRowContainer interface { ReorderableRowContainer // GetRow returns a row at the given index or an error. GetRow(ctx context.Context, idx int) (tree.IndexedRow, error) } // DeDupingRowContainer is a container that de-duplicates rows added to the // container, and assigns them a dense index starting from 0, representing // when that row was first added. It only supports a configuration where all // the columns are encoded into the key -- relaxing this is not hard, but is // not worth adding the code without a use for it. type DeDupingRowContainer interface { // AddRowWithDeDup adds the given row if not already present in the // container. It returns the dense number of when the row is first // added. AddRowWithDeDup(context.Context, rowenc.EncDatumRow) (int, error) // UnsafeReset resets the container, allowing for reuse. It renders all // previously allocated rows unsafe. UnsafeReset(context.Context) error // Close frees up resources held by the container. Close(context.Context) } // RowIterator is a simple iterator used to iterate over sqlbase.EncDatumRows. // Example use: // var i RowIterator // for i.Rewind(); ; i.Next() { // if ok, err := i.Valid(); err != nil { // // Handle error. // } else if !ok { // break // } // row, err := i.Row() // if err != nil { // // Handle error. // } // // Do something. // } // type RowIterator interface { // Rewind seeks to the first row. Rewind() // Valid must be called after any call to Rewind() or Next(). It returns // (true, nil) if the iterator points to a valid row and (false, nil) if the // iterator has moved past the last row. // If an error has occurred, the returned bool is invalid. Valid() (bool, error) // Next advances the iterator to the next row in the iteration. Next() // Row returns the current row. The returned row is only valid until the // next call to Rewind() or Next(). Row() (rowenc.EncDatumRow, error) // Close frees up resources held by the iterator. Close() } // MemRowContainer is the wrapper around rowcontainer.RowContainer that // provides more functionality, especially around converting to/from // EncDatumRows and facilitating sorting. type MemRowContainer struct { RowContainer types []*types.T invertSorting bool // Inverts the sorting predicate. ordering colinfo.ColumnOrdering scratchRow tree.Datums scratchEncRow rowenc.EncDatumRow evalCtx *tree.EvalContext datumAlloc rowenc.DatumAlloc } var _ heap.Interface = &MemRowContainer{} var _ IndexedRowContainer = &MemRowContainer{} // Init initializes the MemRowContainer. The MemRowContainer uses evalCtx.Mon // to track memory usage. func (mc *MemRowContainer) Init( ordering colinfo.ColumnOrdering, types []*types.T, evalCtx *tree.EvalContext, ) { mc.InitWithMon(ordering, types, evalCtx, evalCtx.Mon) } // InitWithMon initializes the MemRowContainer with an explicit monitor. Only // use this if the default MemRowContainer.Init() function is insufficient. func (mc *MemRowContainer) InitWithMon( ordering colinfo.ColumnOrdering, types []*types.T, evalCtx *tree.EvalContext, mon *mon.BytesMonitor, ) { acc := mon.MakeBoundAccount() mc.RowContainer.Init(acc, colinfo.ColTypeInfoFromColTypes(types), 0 /* rowCapacity */) mc.types = types mc.ordering = ordering mc.scratchRow = make(tree.Datums, len(types)) mc.scratchEncRow = make(rowenc.EncDatumRow, len(types)) mc.evalCtx = evalCtx } // Less is part of heap.Interface and is only meant to be used internally. func (mc *MemRowContainer) Less(i, j int) bool { cmp := colinfo.CompareDatums(mc.ordering, mc.evalCtx, mc.At(i), mc.At(j)) if mc.invertSorting { cmp = -cmp } return cmp < 0 } // EncRow returns the idx-th row as an EncDatumRow. The slice itself is reused // so it is only valid until the next call to EncRow. func (mc *MemRowContainer) EncRow(idx int) rowenc.EncDatumRow { datums := mc.At(idx) for i, d := range datums { mc.scratchEncRow[i] = rowenc.DatumToEncDatum(mc.types[i], d) } return mc.scratchEncRow } // AddRow adds a row to the container. func (mc *MemRowContainer) AddRow(ctx context.Context, row rowenc.EncDatumRow) error { if len(row) != len(mc.types) { log.Fatalf(ctx, "invalid row length %d, expected %d", len(row), len(mc.types)) } for i := range row { err := row[i].EnsureDecoded(mc.types[i], &mc.datumAlloc) if err != nil { return err } mc.scratchRow[i] = row[i].Datum } _, err := mc.RowContainer.AddRow(ctx, mc.scratchRow) return err } // Sort is part of the SortableRowContainer interface. func (mc *MemRowContainer) Sort(ctx context.Context) { mc.invertSorting = false cancelChecker := cancelchecker.NewCancelChecker(ctx) sort.Sort(mc, cancelChecker) } // Reorder implements ReorderableRowContainer. We don't need to create a new // MemRowContainer and can just change the ordering on-the-fly. func (mc *MemRowContainer) Reorder(_ context.Context, ordering colinfo.ColumnOrdering) error { mc.ordering = ordering return nil } // Push is part of heap.Interface. func (mc *MemRowContainer) Push(_ interface{}) { panic("unimplemented") } // Pop is part of heap.Interface. func (mc *MemRowContainer) Pop() interface{} { panic("unimplemented") } // MaybeReplaceMax replaces the maximum element with the given row, if it is // smaller. Assumes InitTopK was called. func (mc *MemRowContainer) MaybeReplaceMax(ctx context.Context, row rowenc.EncDatumRow) error { max := mc.At(0) cmp, err := row.CompareToDatums(mc.types, &mc.datumAlloc, mc.ordering, mc.evalCtx, max) if err != nil { return err } if cmp < 0 { // row is smaller than the max; replace. for i := range row { if err := row[i].EnsureDecoded(mc.types[i], &mc.datumAlloc); err != nil { return err } mc.scratchRow[i] = row[i].Datum } if err := mc.Replace(ctx, 0, mc.scratchRow); err != nil { return err } heap.Fix(mc, 0) } return nil } // InitTopK rearranges the rows in the MemRowContainer into a Max-Heap. func (mc *MemRowContainer) InitTopK() { mc.invertSorting = true heap.Init(mc) } // memRowIterator is a RowIterator that iterates over a MemRowContainer. This // iterator doesn't iterate over a snapshot of MemRowContainer. type memRowIterator struct { *MemRowContainer curIdx int } var _ RowIterator = &memRowIterator{} // NewIterator returns an iterator that can be used to iterate over a // MemRowContainer. Note that this iterator doesn't iterate over a snapshot // of MemRowContainer. func (mc *MemRowContainer) NewIterator(_ context.Context) RowIterator { return &memRowIterator{MemRowContainer: mc} } // Rewind implements the RowIterator interface. func (i *memRowIterator) Rewind() { i.curIdx = 0 } // Valid implements the RowIterator interface. func (i *memRowIterator) Valid() (bool, error) { return i.curIdx < i.Len(), nil } // Next implements the RowIterator interface. func (i *memRowIterator) Next() { i.curIdx++ } // Row implements the RowIterator interface. func (i *memRowIterator) Row() (rowenc.EncDatumRow, error) { return i.EncRow(i.curIdx), nil } // Close implements the RowIterator interface. func (i *memRowIterator) Close() {} // memRowFinalIterator is a RowIterator that iterates over a MemRowContainer. // This iterator doesn't iterate over a snapshot of MemRowContainer and deletes // rows as soon as they are iterated over to free up memory eagerly. type memRowFinalIterator struct { *MemRowContainer ctx context.Context } // NewFinalIterator returns an iterator that can be used to iterate over a // MemRowContainer. Note that this iterator doesn't iterate over a snapshot // of MemRowContainer and that it deletes rows as soon as they are iterated // over. func (mc *MemRowContainer) NewFinalIterator(ctx context.Context) RowIterator { return memRowFinalIterator{MemRowContainer: mc, ctx: ctx} } // GetRow implements IndexedRowContainer. func (mc *MemRowContainer) GetRow(ctx context.Context, pos int) (tree.IndexedRow, error) { return IndexedRow{Idx: pos, Row: mc.EncRow(pos)}, nil } var _ RowIterator = memRowFinalIterator{} // Rewind implements the RowIterator interface. func (i memRowFinalIterator) Rewind() {} // Valid implements the RowIterator interface. func (i memRowFinalIterator) Valid() (bool, error) { return i.Len() > 0, nil } // Next implements the RowIterator interface. func (i memRowFinalIterator) Next() { i.PopFirst(i.ctx) } // Row implements the RowIterator interface. func (i memRowFinalIterator) Row() (rowenc.EncDatumRow, error) { return i.EncRow(0), nil } // Close implements the RowIterator interface. func (i memRowFinalIterator) Close() {} // DiskBackedRowContainer is a ReorderableRowContainer that uses a // MemRowContainer to store rows and spills back to disk automatically if // memory usage exceeds a given budget. type DiskBackedRowContainer struct { // src is the current ReorderableRowContainer that is being used to store // rows. All the ReorderableRowContainer methods are redefined rather than // delegated to an embedded struct because of how defer works: // rc.Init(...) // defer rc.Close(ctx) // The Close will call MemRowContainer.Close(ctx) even after spilling to disk. src ReorderableRowContainer mrc *MemRowContainer drc *DiskRowContainer // See comment in DoDeDuplicate(). deDuplicate bool keyToIndex map[string]int // Encoding helpers for de-duplication: // encodings keeps around the DatumEncoding equivalents of the encoding // directions in ordering to avoid conversions in hot paths. encodings []descpb.DatumEncoding datumAlloc rowenc.DatumAlloc scratchKey []byte spilled bool // The following fields are used to create a DiskRowContainer when spilling // to disk. engine diskmap.Factory diskMonitor *mon.BytesMonitor } var _ ReorderableRowContainer = &DiskBackedRowContainer{} var _ DeDupingRowContainer = &DiskBackedRowContainer{} // Init initializes a DiskBackedRowContainer. // Arguments: // - ordering is the output ordering; the order in which rows should be sorted. // - types is the schema of rows that will be added to this container. // - evalCtx defines the context in which to evaluate comparisons, only used // when storing rows in memory. // - engine is the store used for rows when spilling to disk. // - memoryMonitor is used to monitor the DiskBackedRowContainer's memory usage. // If this monitor denies an allocation, the DiskBackedRowContainer will // spill to disk. // - diskMonitor is used to monitor the DiskBackedRowContainer's disk usage if // and when it spills to disk. func (f *DiskBackedRowContainer) Init( ordering colinfo.ColumnOrdering, types []*types.T, evalCtx *tree.EvalContext, engine diskmap.Factory, memoryMonitor *mon.BytesMonitor, diskMonitor *mon.BytesMonitor, ) { mrc := MemRowContainer{} mrc.InitWithMon(ordering, types, evalCtx, memoryMonitor) f.mrc = &mrc f.src = &mrc f.engine = engine f.diskMonitor = diskMonitor f.encodings = make([]descpb.DatumEncoding, len(ordering)) for i, orderInfo := range ordering { f.encodings[i] = rowenc.EncodingDirToDatumEncoding(orderInfo.Direction) } } // DoDeDuplicate causes DiskBackedRowContainer to behave as an implementation // of DeDupingRowContainer. It should not be mixed with calls to AddRow(). It // de-duplicates the keys such that only the first row with the given key will // be stored. The index returned in AddRowWithDedup() is a dense index // starting from 0, representing when that key was first added. This feature // does not combine with Sort(), Reorder() etc., and only to be used for // assignment of these dense indexes. The main reason to add this to // DiskBackedRowContainer is to avoid significant code duplication in // constructing another row container. func (f *DiskBackedRowContainer) DoDeDuplicate() { f.deDuplicate = true f.keyToIndex = make(map[string]int) } // Len is part of the SortableRowContainer interface. func (f *DiskBackedRowContainer) Len() int { return f.src.Len() } // AddRow is part of the SortableRowContainer interface. func (f *DiskBackedRowContainer) AddRow(ctx context.Context, row rowenc.EncDatumRow) error { if err := f.src.AddRow(ctx, row); err != nil { if spilled, spillErr := f.spillIfMemErr(ctx, err); !spilled && spillErr == nil { // The error was not an out of memory error. return err } else if spillErr != nil { // A disk spill was attempted but there was an error in doing so. return spillErr } // Add the row that caused the memory error. return f.src.AddRow(ctx, row) } return nil } // AddRowWithDeDup is part of the DeDupingRowContainer interface. func (f *DiskBackedRowContainer) AddRowWithDeDup( ctx context.Context, row rowenc.EncDatumRow, ) (int, error) { if !f.UsingDisk() { if err := f.encodeKey(ctx, row); err != nil { return 0, err } encodedStr := string(f.scratchKey) idx, ok := f.keyToIndex[encodedStr] if ok { return idx, nil } idx = f.Len() if err := f.AddRow(ctx, row); err != nil { return 0, err } // AddRow may have spilled and deleted the map. if !f.UsingDisk() { f.keyToIndex[encodedStr] = idx } return idx, nil } // Using disk. return f.drc.AddRowWithDeDup(ctx, row) } func (f *DiskBackedRowContainer) encodeKey(ctx context.Context, row rowenc.EncDatumRow) error { if len(row) != len(f.mrc.types) { log.Fatalf(ctx, "invalid row length %d, expected %d", len(row), len(f.mrc.types)) } f.scratchKey = f.scratchKey[:0] for i, orderInfo := range f.mrc.ordering { col := orderInfo.ColIdx var err error f.scratchKey, err = row[col].Encode(f.mrc.types[col], &f.datumAlloc, f.encodings[i], f.scratchKey) if err != nil { return err } } return nil } // Sort is part of the SortableRowContainer interface. func (f *DiskBackedRowContainer) Sort(ctx context.Context) { f.src.Sort(ctx) } // Reorder implements ReorderableRowContainer. func (f *DiskBackedRowContainer) Reorder( ctx context.Context, ordering colinfo.ColumnOrdering, ) error { return f.src.Reorder(ctx, ordering) } // InitTopK is part of the SortableRowContainer interface. func (f *DiskBackedRowContainer) InitTopK() { f.src.InitTopK() } // MaybeReplaceMax is part of the SortableRowContainer interface. func (f *DiskBackedRowContainer) MaybeReplaceMax( ctx context.Context, row rowenc.EncDatumRow, ) error { return f.src.MaybeReplaceMax(ctx, row) } // NewIterator is part of the SortableRowContainer interface. func (f *DiskBackedRowContainer) NewIterator(ctx context.Context) RowIterator { return f.src.NewIterator(ctx) } // NewFinalIterator is part of the SortableRowContainer interface. func (f *DiskBackedRowContainer) NewFinalIterator(ctx context.Context) RowIterator { return f.src.NewFinalIterator(ctx) } // UnsafeReset resets the container for reuse. The DiskBackedRowContainer will // reset to use memory if it is using disk. func (f *DiskBackedRowContainer) UnsafeReset(ctx context.Context) error { if f.deDuplicate { f.keyToIndex = make(map[string]int) } if f.drc != nil { f.drc.Close(ctx) f.src = f.mrc f.drc = nil return nil } return f.mrc.UnsafeReset(ctx) } // Close is part of the SortableRowContainer interface. func (f *DiskBackedRowContainer) Close(ctx context.Context) { if f.drc != nil { f.drc.Close(ctx) } f.mrc.Close(ctx) if f.deDuplicate { f.keyToIndex = nil } } // Spilled returns whether or not the DiskBackedRowContainer spilled to disk // in its lifetime. func (f *DiskBackedRowContainer) Spilled() bool { return f.spilled } // UsingDisk returns whether or not the DiskBackedRowContainer is currently // using disk. func (f *DiskBackedRowContainer) UsingDisk() bool { return f.drc != nil } // spillIfMemErr checks err and calls SpillToDisk if the given err is an out of // memory error. Returns whether the DiskBackedRowContainer spilled to disk and // an error if one occurred while doing so. func (f *DiskBackedRowContainer) spillIfMemErr(ctx context.Context, err error) (bool, error) { if !sqlerrors.IsOutOfMemoryError(err) { return false, nil } if spillErr := f.SpillToDisk(ctx); spillErr != nil { return false, spillErr } log.VEventf(ctx, 2, "spilled to disk: %v", err) return true, nil } // SpillToDisk creates a disk row container, injects all the data from the // in-memory container into it, and clears the in-memory one afterwards. func (f *DiskBackedRowContainer) SpillToDisk(ctx context.Context) error { if f.UsingDisk() { return errors.New("already using disk") } drc := MakeDiskRowContainer(f.diskMonitor, f.mrc.types, f.mrc.ordering, f.engine) if f.deDuplicate { drc.DoDeDuplicate() // After spilling to disk we don't need this map to de-duplicate. The // DiskRowContainer will do the de-duplication. Calling AddRow() below // is correct since these rows are already de-duplicated. f.keyToIndex = nil } i := f.mrc.NewFinalIterator(ctx) defer i.Close() for i.Rewind(); ; i.Next() { if ok, err := i.Valid(); err != nil { return err } else if !ok { break } memRow, err := i.Row() if err != nil { return err } if err := drc.AddRow(ctx, memRow); err != nil { return err } } f.mrc.Clear(ctx) f.src = &drc f.drc = &drc f.spilled = true return nil } // DiskBackedIndexedRowContainer is a wrapper around DiskBackedRowContainer // that adds an index to each row added in the order of addition of those rows // by storing an extra int column at the end of each row. These indices can be // thought of as ordinals of the rows. // // Note: although DiskRowContainer appends unique rowIDs to the keys that the // rows are put at, MemRowContainer doesn't do something like that, so the code // that utilizes internal rowIDs of DiskRowContainer ends up being worse than // having this specialized container. type DiskBackedIndexedRowContainer struct { *DiskBackedRowContainer scratchEncRow rowenc.EncDatumRow storedTypes []*types.T datumAlloc rowenc.DatumAlloc rowAlloc rowenc.EncDatumRowAlloc idx uint64 // the index of the next row to be added into the container // These fields are for optimizations when container spilled to disk. diskRowIter RowIterator idxRowIter int // nextPosToCache is the index of the row to be cached next. If it is greater // than 0, the cache contains all rows with position in the range // [firstCachedRowPos, nextPosToCache). firstCachedRowPos int nextPosToCache int // indexedRowsCache is the cache of up to maxCacheSize contiguous rows. indexedRowsCache ring.Buffer // maxCacheSize indicates the maximum number of rows to be cached. It is // initialized to maxIndexedRowsCacheSize and dynamically adjusted if OOM // error is encountered. maxCacheSize int cacheMemAcc mon.BoundAccount hitCount int missCount int // DisableCache is intended for testing only. It can be set to true to // disable reading and writing from the row cache. DisableCache bool } var _ IndexedRowContainer = &DiskBackedIndexedRowContainer{} // NewDiskBackedIndexedRowContainer creates a DiskBackedIndexedRowContainer // with the given engine as the underlying store that rows are stored on when // it spills to disk. // Arguments: // - ordering is the output ordering; the order in which rows should be sorted. // - types is the schema of rows that will be added to this container. // - evalCtx defines the context in which to evaluate comparisons, only used // when storing rows in memory. // - engine is the underlying store that rows are stored on when the container // spills to disk. // - memoryMonitor is used to monitor this container's memory usage. // - diskMonitor is used to monitor this container's disk usage. func NewDiskBackedIndexedRowContainer( ordering colinfo.ColumnOrdering, typs []*types.T, evalCtx *tree.EvalContext, engine diskmap.Factory, memoryMonitor *mon.BytesMonitor, diskMonitor *mon.BytesMonitor, ) *DiskBackedIndexedRowContainer { d := DiskBackedIndexedRowContainer{} // We will be storing an index of each row as the last INT column. d.storedTypes = make([]*types.T, len(typs)+1) copy(d.storedTypes, typs) d.storedTypes[len(d.storedTypes)-1] = types.Int d.scratchEncRow = make(rowenc.EncDatumRow, len(d.storedTypes)) d.DiskBackedRowContainer = &DiskBackedRowContainer{} d.DiskBackedRowContainer.Init(ordering, d.storedTypes, evalCtx, engine, memoryMonitor, diskMonitor) d.maxCacheSize = maxIndexedRowsCacheSize d.cacheMemAcc = memoryMonitor.MakeBoundAccount() return &d } // AddRow implements SortableRowContainer. func (f *DiskBackedIndexedRowContainer) AddRow(ctx context.Context, row rowenc.EncDatumRow) error { copy(f.scratchEncRow, row) f.scratchEncRow[len(f.scratchEncRow)-1] = rowenc.DatumToEncDatum( types.Int, tree.NewDInt(tree.DInt(f.idx)), ) f.idx++ return f.DiskBackedRowContainer.AddRow(ctx, f.scratchEncRow) } // Reorder implements ReorderableRowContainer. func (f *DiskBackedIndexedRowContainer) Reorder( ctx context.Context, ordering colinfo.ColumnOrdering, ) error { if err := f.DiskBackedRowContainer.Reorder(ctx, ordering); err != nil { return err } f.resetCache(ctx) f.resetIterator() return nil } // resetCache resets cache-related fields allowing for reusing the underlying // already allocated memory. Since all rows in the cache are flushed, it also // clears the corresponding memory account. func (f *DiskBackedIndexedRowContainer) resetCache(ctx context.Context) { f.firstCachedRowPos = 0 f.nextPosToCache = 0 f.indexedRowsCache.Reset() f.cacheMemAcc.Clear(ctx) } func (f *DiskBackedIndexedRowContainer) resetIterator() { if f.diskRowIter != nil { f.diskRowIter.Close() f.diskRowIter = nil f.idxRowIter = 0 } } // UnsafeReset resets the underlying container (if it is using disk, it will be // reset to using memory). func (f *DiskBackedIndexedRowContainer) UnsafeReset(ctx context.Context) error { f.resetCache(ctx) f.resetIterator() f.idx = 0 return f.DiskBackedRowContainer.UnsafeReset(ctx) } // Close implements SortableRowContainer. func (f *DiskBackedIndexedRowContainer) Close(ctx context.Context) { if f.diskRowIter != nil { f.diskRowIter.Close() } f.cacheMemAcc.Close(ctx) f.DiskBackedRowContainer.Close(ctx) } const maxIndexedRowsCacheSize = 4096 // GetRow implements tree.IndexedRows. // // Getting a row by index is fast from an in-memory row container but is a lot // slower from a disk-backed one. In order to mitigate the impact we add // optimizations of maintaining a cache of tree.IndexedRow's and storing a disk // iterator along with the index of the row it currently points at. func (f *DiskBackedIndexedRowContainer) GetRow( ctx context.Context, pos int, ) (tree.IndexedRow, error) { var rowWithIdx rowenc.EncDatumRow var err error if f.UsingDisk() { if f.DisableCache { return f.getRowWithoutCache(ctx, pos), nil } // The cache contains all contiguous rows up to the biggest pos requested // so far (even if the rows were not requested explicitly). For example, // if the cache is empty and the request comes for a row at pos 3, the // cache will contain 4 rows at positions 0, 1, 2, and 3. if pos >= f.firstCachedRowPos && pos < f.nextPosToCache { requestedRowCachePos := pos - f.firstCachedRowPos f.hitCount++ return f.indexedRowsCache.Get(requestedRowCachePos).(tree.IndexedRow), nil } f.missCount++ if f.diskRowIter == nil { f.diskRowIter = f.DiskBackedRowContainer.drc.NewIterator(ctx) f.diskRowIter.Rewind() } if f.idxRowIter > pos { // The iterator has been advanced further than we need, so we need to // start iterating from the beginning. log.VEventf(ctx, 1, "rewinding: cache contains indices [%d, %d) but index %d requested", f.firstCachedRowPos, f.nextPosToCache, pos) f.idxRowIter = 0 f.diskRowIter.Rewind() f.resetCache(ctx) if pos-maxIndexedRowsCacheSize > f.nextPosToCache { // The requested pos is further away from the beginning of the // container for the cache to hold all the rows up to pos, so we need // to skip exactly pos-maxIndexedRowsCacheSize of them. f.nextPosToCache = pos - maxIndexedRowsCacheSize f.firstCachedRowPos = f.nextPosToCache } } for ; ; f.diskRowIter.Next() { if ok, err := f.diskRowIter.Valid(); err != nil { return nil, err } else if !ok { return nil, errors.Errorf("row at pos %d not found", pos) } if f.idxRowIter == f.nextPosToCache { rowWithIdx, err = f.diskRowIter.Row() if err != nil { return nil, err } for i := range rowWithIdx { if err := rowWithIdx[i].EnsureDecoded(f.storedTypes[i], &f.datumAlloc); err != nil { return nil, err } } row, rowIdx := rowWithIdx[:len(rowWithIdx)-1], rowWithIdx[len(rowWithIdx)-1].Datum if idx, ok := rowIdx.(*tree.DInt); ok { if f.indexedRowsCache.Len() == f.maxCacheSize { // The cache size is capped at f.maxCacheSize, so we reuse the row // with the smallest pos, put it as the last row, and advance // f.firstCachedRowPos. if err := f.reuseFirstRowInCache(ctx, int(*idx), row); err != nil { return nil, err } } else { // We choose to ignore minor details like IndexedRow overhead and // the cache overhead. usage := sizeOfInt + int64(row.Size()) if err := f.cacheMemAcc.Grow(ctx, usage); err != nil { if sqlerrors.IsOutOfMemoryError(err) { // We hit the memory limit, so we need to cap the cache size // and reuse the memory underlying first row in the cache. if f.indexedRowsCache.Len() == 0 { // The cache is empty, so there is no memory to be reused. return nil, err } f.maxCacheSize = f.indexedRowsCache.Len() if err := f.reuseFirstRowInCache(ctx, int(*idx), row); err != nil { return nil, err } } else { return nil, err } } else { // We actually need to copy the row into memory. ir := IndexedRow{int(*idx), f.rowAlloc.CopyRow(row)} f.indexedRowsCache.AddLast(ir) } } f.nextPosToCache++ } else { return nil, errors.Errorf("unexpected last column type: should be DInt but found %T", idx) } if f.idxRowIter == pos { return f.indexedRowsCache.GetLast().(tree.IndexedRow), nil } } f.idxRowIter++ } } rowWithIdx = f.DiskBackedRowContainer.mrc.EncRow(pos) row, rowIdx := rowWithIdx[:len(rowWithIdx)-1], rowWithIdx[len(rowWithIdx)-1].Datum if idx, ok := rowIdx.(*tree.DInt); ok { return IndexedRow{int(*idx), row}, nil } return nil, errors.Errorf("unexpected last column type: should be DInt but found %T", rowIdx) } // reuseFirstRowInCache reuses the underlying memory of the first row in the // cache to store 'row' and puts it as the last one in the cache. It adjusts // the memory account accordingly and, if necessary, removes some first rows. func (f *DiskBackedIndexedRowContainer) reuseFirstRowInCache( ctx context.Context, idx int, row rowenc.EncDatumRow, ) error { newRowSize := row.Size() for { if f.indexedRowsCache.Len() == 0 { return errors.Errorf("unexpectedly the cache of DiskBackedIndexedRowContainer contains zero rows") } indexedRowToReuse := f.indexedRowsCache.GetFirst().(IndexedRow) oldRowSize := indexedRowToReuse.Row.Size() delta := int64(newRowSize - oldRowSize) if delta > 0 { // New row takes up more memory than the old one. if err := f.cacheMemAcc.Grow(ctx, delta); err != nil { if sqlerrors.IsOutOfMemoryError(err) { // We need to actually reduce the cache size, so we remove the first // row and adjust the memory account, maxCacheSize, and // f.firstCachedRowPos accordingly. f.indexedRowsCache.RemoveFirst() f.cacheMemAcc.Shrink(ctx, int64(oldRowSize)) f.maxCacheSize-- f.firstCachedRowPos++ if f.indexedRowsCache.Len() == 0 { return err } continue } return err } } else if delta < 0 { f.cacheMemAcc.Shrink(ctx, -delta) } indexedRowToReuse.Idx = idx copy(indexedRowToReuse.Row, row) f.indexedRowsCache.RemoveFirst() f.indexedRowsCache.AddLast(indexedRowToReuse) f.firstCachedRowPos++ return nil } } // getRowWithoutCache returns the row at requested position without using the // cache. It utilizes the same disk row iterator along multiple consequent // calls and rewinds the iterator only when it has been advanced further than // the position requested. // // NOTE: this method should only be used for testing purposes. func (f *DiskBackedIndexedRowContainer) getRowWithoutCache( ctx context.Context, pos int, ) tree.IndexedRow { if !f.UsingDisk() { panic(errors.Errorf("getRowWithoutCache is called when the container is using memory")) } if f.diskRowIter == nil { f.diskRowIter = f.DiskBackedRowContainer.drc.NewIterator(ctx) f.diskRowIter.Rewind() } if f.idxRowIter > pos { // The iterator has been advanced further than we need, so we need to // start iterating from the beginning. f.idxRowIter = 0 f.diskRowIter.Rewind() } for ; ; f.diskRowIter.Next() { if ok, err := f.diskRowIter.Valid(); err != nil { panic(err) } else if !ok { panic(errors.AssertionFailedf("row at pos %d not found", pos)) } if f.idxRowIter == pos { rowWithIdx, err := f.diskRowIter.Row() if err != nil { panic(err) } for i := range rowWithIdx { if err := rowWithIdx[i].EnsureDecoded(f.storedTypes[i], &f.datumAlloc); err != nil { panic(err) } } row, rowIdx := rowWithIdx[:len(rowWithIdx)-1], rowWithIdx[len(rowWithIdx)-1].Datum if idx, ok := rowIdx.(*tree.DInt); ok { return IndexedRow{int(*idx), f.rowAlloc.CopyRow(row)} } panic(errors.Errorf("unexpected last column type: should be DInt but found %T", rowIdx)) } f.idxRowIter++ } } // IndexedRow is a row with a corresponding index. type IndexedRow struct { Idx int Row rowenc.EncDatumRow } // GetIdx implements tree.IndexedRow interface. func (ir IndexedRow) GetIdx() int { return ir.Idx } // GetDatum implements tree.IndexedRow interface. func (ir IndexedRow) GetDatum(colIdx int) (tree.Datum, error) { return ir.Row[colIdx].Datum, nil } // GetDatums implements tree.IndexedRow interface. func (ir IndexedRow) GetDatums(startColIdx, endColIdx int) (tree.Datums, error) { datums := make(tree.Datums, 0, endColIdx-startColIdx) for idx := startColIdx; idx < endColIdx; idx++ { datums = append(datums, ir.Row[idx].Datum) } return datums, nil } const sizeOfInt = int64(unsafe.Sizeof(int(0)))
pkg/sql/rowcontainer/row_container.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.00025452085537835956, 0.00017307783127762377, 0.00016188842710107565, 0.00017141937860287726, 0.000011284003448963631 ]
{ "id": 8, "code_window": [ "\treturn &memo.SetPrivate{\n", "\t\tLeftCols: leftAndOutCols,\n", "\t\tRightCols: opt.ColSetToList(right.Cols),\n", "\t\tOutCols: leftAndOutCols,\n", "\t}\n", "}\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\tRightCols: right.Cols.ToList(),\n" ], "file_path": "pkg/sql/opt/xform/select_funcs.go", "type": "replace", "edit_start_line_idx": 1003 }
# We are sending vt100 terminal sequences below, so inform readline # accordingly. set env(TERM) vt100 system "mkdir -p logs" # Keep the history in a test location, so as to not override the # developer's own history file when running out of Docker. set histfile "cockroach_sql_history" set ::env(COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING) "true" set ::env(COCKROACH_CONNECT_TIMEOUT) 15 set ::env(COCKROACH_SQL_CLI_HISTORY) $histfile # Set client commands as insecure. The server uses --insecure. set ::env(COCKROACH_INSECURE) "true" system "rm -f $histfile" # Everything in this test should be fast. Don't be tolerant for long # waits. set timeout 30 # When run via Docker the enclosing terminal has 0 columns and 0 rows, # and this confuses readline. Ensure sane defaults here. set stty_init "cols 80 rows 25" # Convenience function to tag what's going on in log files. proc report {text} { system "echo; echo \$(date '+.%y%m%d %H:%M:%S.%N') EXPECT TEST: '$text' | tee -a logs/expect-cmd.log" # We really want to have all files erasable outside of the container # even though the commands here run with uid 0. # Docker is obnoxious in that it doesn't support setting `umask`. # Also CockroachDB doesn't honor umask anyway. # So we simply come after the fact and adjust the permissions. # # The find may race with a cockroach process shutting down in the # background; cockroach might be deleting files as they are being # found, causing chmod to not find its target file. We ignore # these errors. system "find logs -exec chmod a+rw '{}' \\; || true" } # Catch signals proc mysig {} { report "EXPECT KILLED BY SIGNAL" exit 130 } trap mysig SIGINT trap mysig SIGTERM # Convenience functions to tag a test proc start_test {text} { report "START TEST: $text" } proc end_test {} { report "END TEST" } # Convenience wrapper function, which ensures that all expects are # mandatory (i.e. with a mandatory fail if the expected output doesn't # show up fast). proc handle_timeout {text} { report "TIMEOUT WAITING FOR \"$text\"" exit 1 } proc eexpect {text} { expect { $text {} timeout { handle_timeout $text } } } # Convenience function that sends Ctrl+C to the monitored process. proc interrupt {} { report "INTERRUPT TO FOREGROUND PROCESS" send "\003" sleep 0.4 } # Convenience function that sends Ctrl+D to the monitored process. # Leaves some upfront delay to let the readline process the time # to initialize the key binding. proc send_eof {} { report "EOF TO FOREGROUND PROCESS" sleep 0.4 send "\004" } # Convenience functions to start/shutdown the server. # Preserves the invariant that the server's PID is saved # in `server_pid`. proc start_server {argv} { report "BEGIN START SERVER" # Note: when changing this command line, update the telemetry tests # in test_flags.tcl. system "$argv start-single-node --insecure --max-sql-memory=128MB --pid-file=server_pid --listening-url-file=server_url --background -s=path=logs/db >>logs/expect-cmd.log 2>&1; $argv sql --insecure -e 'select 1'" report "START SERVER DONE" } proc stop_server {argv} { report "BEGIN STOP SERVER" # Trigger a normal shutdown. # If after 30 seconds the server hasn't shut down, kill the process and trigger an error. # Note: kill -CONT tests whether the PID exists (SIGCONT is a no-op for the process). system "kill -TERM `cat server_pid` 2>/dev/null; for i in `seq 1 30`; do kill -CONT `cat server_pid` 2>/dev/null || exit 0 echo still waiting sleep 1 done echo 'server still running?' # Send an unclean shutdown signal to trigger a stack trace dump. kill -ABRT `cat server_pid` 2>/dev/null # Sleep to increase the probability that the stack trace actually # makes it to disk before we force-kill the process. sleep 1 kill -KILL `cat server_pid` 2>/dev/null exit 1" report "END STOP SERVER" } proc flush_server_logs {} { report "BEGIN FLUSH LOGS" system "kill -HUP `cat server_pid` 2>/dev/null" # Wait for flush to occur. system "for i in `seq 1 3`; do grep 'hangup received, flushing logs' logs/db/logs/cockroach.log && exit 0; echo still waiting sleep 1 done echo 'server failed to flush logs?' exit 1" report "END FLUSH LOGS" } proc force_stop_server {argv} { report "BEGIN FORCE STOP SERVER" system "kill -KILL `cat server_pid`" report "END FORCE STOP SERVER" }
pkg/cli/interactive_tests/common.tcl
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.00018029013881459832, 0.00017234070401173085, 0.0001623637945158407, 0.00017267755174543709, 0.000004087004072061973 ]
{ "id": 8, "code_window": [ "\treturn &memo.SetPrivate{\n", "\t\tLeftCols: leftAndOutCols,\n", "\t\tRightCols: opt.ColSetToList(right.Cols),\n", "\t\tOutCols: leftAndOutCols,\n", "\t}\n", "}\n" ], "labels": [ "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\tRightCols: right.Cols.ToList(),\n" ], "file_path": "pkg/sql/opt/xform/select_funcs.go", "type": "replace", "edit_start_line_idx": 1003 }
// Copyright 2016 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package timeutil import ( "strings" "time" // embed tzdata in case system tzdata is not available. _ "time/tzdata" ) //go:generate go run generate_lowercase_timezones.go // LoadLocation returns the time.Location with the given name. // The name is taken to be a location name corresponding to a file // in the IANA Time Zone database, such as "America/New_York". // // We do not use Go's time.LoadLocation() directly because it maps // "Local" to the local time zone, whereas we want UTC. func LoadLocation(name string) (*time.Location, error) { loweredName := strings.ToLower(name) switch loweredName { case "local", "default": loweredName = "utc" name = "UTC" } // If we know this is a lowercase name in tzdata, use the uppercase form. if v, ok := lowercaseTimezones[loweredName]; ok { // If this location is not found, we may have a case where the tzdata names // have different values than the system tz names. // If this is the case, allback onto the default logic, where the name is read // off other sources before tzdata. if loc, err := time.LoadLocation(v); err == nil { return loc, nil } } return time.LoadLocation(name) }
pkg/util/timeutil/zoneinfo.go
0
https://github.com/cockroachdb/cockroach/commit/eb92b3b7b90aa7c5c97039f233ee64cd17f28295
[ 0.0001798461307771504, 0.00017454024055041373, 0.00016738183330744505, 0.00017617222329135984, 0.000004682089183916105 ]
{ "id": 1, "code_window": [ "\t\t\t\tmetaMu.Lock()\n", "\t\t\t\tmeta.endedCycle = intDataUpdateTracker.current()\n", "\t\t\t\tmeta, err = o.updateMetacacheListing(meta, rpc)\n", "\t\t\t\tif meta.status == scanStateError {\n", "\t\t\t\t\tcancel()\n", "\t\t\t\t\texit = true\n", "\t\t\t\t}\n", "\t\t\t\tmetaMu.Unlock()\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t\t\tlogger.LogIf(ctx, err)\n" ], "file_path": "cmd/metacache-set.go", "type": "add", "edit_start_line_idx": 656 }
/* * MinIO Cloud Storage, (C) 2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cmd import ( "bytes" "context" "encoding/gob" "encoding/json" "errors" "fmt" "io" "strconv" "strings" "sync" "time" jsoniter "github.com/json-iterator/go" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/color" "github.com/minio/minio/pkg/console" "github.com/minio/minio/pkg/hash" ) type listPathOptions struct { // ID of the listing. // This will be used to persist the list. ID string // Bucket of the listing. Bucket string // Directory inside the bucket. BaseDir string // Scan/return only content with prefix. Prefix string // FilterPrefix will return only results with this prefix when scanning. // Should never contain a slash. // Prefix should still be set. FilterPrefix string // Marker to resume listing. // The response will be the first entry AFTER this object name. Marker string // Limit the number of results. Limit int // The number of disks to ask. Special values: // 0 uses default number of disks. // -1 use at least 50% of disks or at least the default number. AskDisks int // InclDeleted will keep all entries where latest version is a delete marker. InclDeleted bool // Scan recursively. // If false only main directory will be scanned. // Should always be true if Separator is n SlashSeparator. Recursive bool // Separator to use. Separator string // Create indicates that the lister should not attempt to load an existing cache. Create bool // CurrentCycle indicates the current bloom cycle. // Will be used if a new scan is started. CurrentCycle uint64 // OldestCycle indicates the oldest cycle acceptable. OldestCycle uint64 // Include pure directories. IncludeDirectories bool // Transient is set if the cache is transient due to an error or being a reserved bucket. // This means the cache metadata will not be persisted on disk. // A transient result will never be returned from the cache so knowing the list id is required. Transient bool // discardResult will not persist the cache to storage. // When the initial results are returned listing will be canceled. discardResult bool } func init() { gob.Register(listPathOptions{}) } // newMetacache constructs a new metacache from the options. func (o listPathOptions) newMetacache() metacache { return metacache{ id: o.ID, bucket: o.Bucket, root: o.BaseDir, recursive: o.Recursive, status: scanStateStarted, error: "", started: UTCNow(), lastHandout: UTCNow(), lastUpdate: UTCNow(), ended: time.Time{}, startedCycle: o.CurrentCycle, endedCycle: 0, dataVersion: metacacheStreamVersion, filter: o.FilterPrefix, } } func (o *listPathOptions) debugf(format string, data ...interface{}) { if serverDebugLog { console.Debugf(format+"\n", data...) } } func (o *listPathOptions) debugln(data ...interface{}) { if serverDebugLog { console.Debugln(data...) } } // gatherResults will collect all results on the input channel and filter results according to the options. // Caller should close the channel when done. // The returned function will return the results once there is enough or input is closed. func (o *listPathOptions) gatherResults(in <-chan metaCacheEntry) func() (metaCacheEntriesSorted, error) { var resultsDone = make(chan metaCacheEntriesSorted) // Copy so we can mutate resCh := resultsDone resErr := io.EOF go func() { var results metaCacheEntriesSorted for entry := range in { if resCh == nil { // past limit continue } if !o.IncludeDirectories && entry.isDir() { continue } o.debugln("gather got:", entry.name) if o.Marker != "" && entry.name <= o.Marker { o.debugln("pre marker") continue } if !strings.HasPrefix(entry.name, o.Prefix) { o.debugln("not in prefix") continue } if !o.Recursive && !entry.isInDir(o.Prefix, o.Separator) { o.debugln("not in dir", o.Prefix, o.Separator) continue } if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() { o.debugln("latest is delete marker") continue } if o.Limit > 0 && results.len() >= o.Limit { // We have enough and we have more. // Do not return io.EOF if resCh != nil { resErr = nil resCh <- results resCh = nil } continue } o.debugln("adding...") results.o = append(results.o, entry) } if resCh != nil { resErr = io.EOF resCh <- results } }() return func() (metaCacheEntriesSorted, error) { return <-resultsDone, resErr } } // findFirstPart will find the part with 0 being the first that corresponds to the marker in the options. // io.ErrUnexpectedEOF is returned if the place containing the marker hasn't been scanned yet. // io.EOF indicates the marker is beyond the end of the stream and does not exist. func (o *listPathOptions) findFirstPart(fi FileInfo) (int, error) { search := o.Marker if search == "" { search = o.Prefix } if search == "" { return 0, nil } o.debugln("searching for ", search) var tmp metacacheBlock var json = jsoniter.ConfigCompatibleWithStandardLibrary i := 0 for { partKey := fmt.Sprintf("%s-metacache-part-%d", ReservedMetadataPrefixLower, i) v, ok := fi.Metadata[partKey] if !ok { o.debugln("no match in metadata, waiting") return -1, io.ErrUnexpectedEOF } err := json.Unmarshal([]byte(v), &tmp) if !ok { logger.LogIf(context.Background(), err) return -1, err } if tmp.First == "" && tmp.Last == "" && tmp.EOS { return 0, errFileNotFound } if tmp.First >= search { o.debugln("First >= search", v) return i, nil } if tmp.Last >= search { o.debugln("Last >= search", v) return i, nil } if tmp.EOS { o.debugln("no match, at EOS", v) return -3, io.EOF } o.debugln("First ", tmp.First, "<", search, " search", i) i++ } } // updateMetacacheListing will update the metacache listing. func (o *listPathOptions) updateMetacacheListing(m metacache, rpc *peerRESTClient) (metacache, error) { if o.Transient { return localMetacacheMgr.getTransient().updateCacheEntry(m) } if rpc == nil { return localMetacacheMgr.updateCacheEntry(m) } return rpc.UpdateMetacacheListing(context.Background(), m) } func getMetacacheBlockInfo(fi FileInfo, block int) (*metacacheBlock, error) { var tmp metacacheBlock partKey := fmt.Sprintf("%s-metacache-part-%d", ReservedMetadataPrefixLower, block) v, ok := fi.Metadata[partKey] if !ok { return nil, io.ErrUnexpectedEOF } return &tmp, json.Unmarshal([]byte(v), &tmp) } const metacachePrefix = ".metacache" func metacachePrefixForID(bucket, id string) string { return pathJoin(bucketMetaPrefix, bucket, metacachePrefix, id) } // objectPath returns the object path of the cache. func (o *listPathOptions) objectPath(block int) string { return pathJoin(metacachePrefixForID(o.Bucket, o.ID), "block-"+strconv.Itoa(block)+".s2") } func (o *listPathOptions) SetFilter() { switch { case metacacheSharePrefix: return case o.CurrentCycle != o.OldestCycle: // We have a clean bloom filter return case o.Prefix == o.BaseDir: // No additional prefix return } // Remove basedir. o.FilterPrefix = strings.TrimPrefix(o.Prefix, o.BaseDir) // Remove leading and trailing slashes. o.FilterPrefix = strings.Trim(o.FilterPrefix, slashSeparator) if strings.Contains(o.FilterPrefix, slashSeparator) { // Sanity check, should not happen. o.FilterPrefix = "" } } // filter will apply the options and return the number of objects requested by the limit. // Will return io.EOF if there are no more entries with the same filter. // The last entry can be used as a marker to resume the listing. func (r *metacacheReader) filter(o listPathOptions) (entries metaCacheEntriesSorted, err error) { // Forward to prefix, if any err = r.forwardTo(o.Prefix) if err != nil { return entries, err } if o.Marker != "" { err = r.forwardTo(o.Marker) if err != nil { return entries, err } next, err := r.peek() if err != nil { return entries, err } if next.name == o.Marker { err := r.skip(1) if err != nil { return entries, err } } } o.debugln("forwarded to ", o.Prefix, "marker:", o.Marker, "sep:", o.Separator) // Filter if !o.Recursive { entries.o = make(metaCacheEntries, 0, o.Limit) pastPrefix := false err := r.readFn(func(entry metaCacheEntry) bool { if o.Prefix != "" && !strings.HasPrefix(entry.name, o.Prefix) { // We are past the prefix, don't continue. pastPrefix = true return false } if !o.IncludeDirectories && entry.isDir() { return true } if !entry.isInDir(o.Prefix, o.Separator) { return true } if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() { return entries.len() < o.Limit } entries.o = append(entries.o, entry) return entries.len() < o.Limit }) if (err != nil && err.Error() == io.EOF.Error()) || pastPrefix || r.nextEOF() { return entries, io.EOF } return entries, err } // We should not need to filter more. return r.readN(o.Limit, o.InclDeleted, o.IncludeDirectories, o.Prefix) } func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOptions) (entries metaCacheEntriesSorted, err error) { retries := 0 rpc := globalNotificationSys.restClientFromHash(o.Bucket) for { select { case <-ctx.Done(): return entries, ctx.Err() default: } // If many failures, check the cache state. if retries > 10 { err := o.checkMetacacheState(ctx, rpc) if err != nil { return entries, fmt.Errorf("remote listing canceled: %w", err) } retries = 1 } const retryDelay = 500 * time.Millisecond // Load first part metadata... // All operations are performed without locks, so we must be careful and allow for failures. // Read metadata associated with the object from a disk. if retries > 0 { disks := er.getOnlineDisks() if len(disks) == 0 { time.Sleep(retryDelay) retries++ continue } _, err := disks[0].ReadVersion(ctx, minioMetaBucket, o.objectPath(0), "", false) if err != nil { time.Sleep(retryDelay) retries++ continue } } // Read metadata associated with the object from all disks. fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, minioMetaBucket, o.objectPath(0), ObjectOptions{}, true) if err != nil { switch toObjectErr(err, minioMetaBucket, o.objectPath(0)).(type) { case ObjectNotFound: retries++ time.Sleep(retryDelay) continue case InsufficientReadQuorum: retries++ time.Sleep(retryDelay) continue default: return entries, fmt.Errorf("reading first part metadata: %w", err) } } partN, err := o.findFirstPart(fi) switch { case err == nil: case errors.Is(err, io.ErrUnexpectedEOF): if retries == 10 { err := o.checkMetacacheState(ctx, rpc) if err != nil { return entries, fmt.Errorf("remote listing canceled: %w", err) } retries = -1 } retries++ time.Sleep(retryDelay) continue case errors.Is(err, io.EOF): return entries, io.EOF } // We got a stream to start at. loadedPart := 0 buf := bufferPool.Get().(*bytes.Buffer) defer func() { buf.Reset() bufferPool.Put(buf) }() for { select { case <-ctx.Done(): return entries, ctx.Err() default: } if partN != loadedPart { if retries > 10 { err := o.checkMetacacheState(ctx, rpc) if err != nil { return entries, fmt.Errorf("waiting for next part %d: %w", partN, err) } retries = 1 } if retries > 0 { // Load from one disk only disks := er.getOnlineDisks() if len(disks) == 0 { time.Sleep(retryDelay) retries++ continue } _, err := disks[0].ReadVersion(ctx, minioMetaBucket, o.objectPath(partN), "", false) if err != nil { time.Sleep(retryDelay) retries++ continue } } // Load first part metadata... fi, metaArr, onlineDisks, err = er.getObjectFileInfo(ctx, minioMetaBucket, o.objectPath(partN), ObjectOptions{}, true) if err != nil { time.Sleep(retryDelay) retries++ continue } loadedPart = partN bi, err := getMetacacheBlockInfo(fi, partN) logger.LogIf(ctx, err) if err == nil { if bi.pastPrefix(o.Prefix) { return entries, io.EOF } } } buf.Reset() err := er.getObjectWithFileInfo(ctx, minioMetaBucket, o.objectPath(partN), 0, fi.Size, buf, fi, metaArr, onlineDisks) if err != nil { switch toObjectErr(err, minioMetaBucket, o.objectPath(partN)).(type) { case ObjectNotFound: retries++ time.Sleep(retryDelay) continue case InsufficientReadQuorum: retries++ time.Sleep(retryDelay) continue default: logger.LogIf(ctx, err) return entries, err } } tmp, err := newMetacacheReader(buf) if err != nil { return entries, err } e, err := tmp.filter(o) entries.o = append(entries.o, e.o...) if o.Limit > 0 && entries.len() > o.Limit { entries.truncate(o.Limit) return entries, nil } if err == nil { // We stopped within the listing, we are done for now... return entries, nil } if !errors.Is(err, io.EOF) { logger.LogIf(ctx, err) return entries, err } // We finished at the end of the block. // And should not expect any more results. bi, err := getMetacacheBlockInfo(fi, partN) logger.LogIf(ctx, err) if err != nil || bi.EOS { // We are done and there are no more parts. return entries, io.EOF } if bi.endedPrefix(o.Prefix) { // Nothing more for prefix. return entries, io.EOF } partN++ retries = 0 } } } func (er erasureObjects) SetDriveCount() int { return er.setDriveCount } // Will return io.EOF if continuing would not yield more results. func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entries metaCacheEntriesSorted, err error) { o.debugf(color.Green("listPath:")+" with options: %#v", o) // See if we have the listing stored. if !o.Create && !o.discardResult { entries, err := er.streamMetadataParts(ctx, o) if IsErr(err, []error{ nil, context.Canceled, context.DeadlineExceeded, }...) { // Expected good errors we don't need to return error. return entries, nil } if !errors.Is(err, io.EOF) { // io.EOF is expected and should be returned but no need to log it. // Log an return errors on unexpected errors. logger.LogIf(ctx, err) } return entries, err } meta := o.newMetacache() rpc := globalNotificationSys.restClientFromHash(o.Bucket) var metaMu sync.Mutex o.debugln(color.Green("listPath:")+" scanning bucket:", o.Bucket, "basedir:", o.BaseDir, "prefix:", o.Prefix, "marker:", o.Marker) // Disconnect from call above, but cancel on exit. ctx, cancel := context.WithCancel(GlobalContext) // We need to ask disks. disks := er.getOnlineDisks() defer func() { o.debugln(color.Green("listPath:")+" returning:", entries.len(), "err:", err) if err != nil && !errors.Is(err, io.EOF) { go func(err string) { metaMu.Lock() if meta.status != scanStateError { meta.error = err meta.status = scanStateError } meta, _ = o.updateMetacacheListing(meta, rpc) metaMu.Unlock() }(err.Error()) cancel() } }() askDisks := o.AskDisks listingQuorum := askDisks - 1 // Special case: ask all disks if the drive count is 4 if askDisks == -1 || er.SetDriveCount() == 4 { askDisks = len(disks) // with 'strict' quorum list on all online disks. listingQuorum = getReadQuorum(er.SetDriveCount()) } if len(disks) < askDisks { err = InsufficientReadQuorum{} logger.LogIf(ctx, fmt.Errorf("listPath: Insufficient disks, %d of %d needed are available", len(disks), askDisks)) cancel() return } // Select askDisks random disks. if len(disks) > askDisks { disks = disks[:askDisks] } // Create output for our results. var cacheCh chan metaCacheEntry if !o.discardResult { cacheCh = make(chan metaCacheEntry, metacacheBlockSize) } // Create filter for results. filterCh := make(chan metaCacheEntry, 100) filteredResults := o.gatherResults(filterCh) closeChannels := func() { if !o.discardResult { close(cacheCh) } close(filterCh) } // Cancel listing on return if non-saved list. if o.discardResult { defer cancel() } go func() { defer cancel() // Save continuous updates go func() { var err error ticker := time.NewTicker(10 * time.Second) defer ticker.Stop() var exit bool for !exit { select { case <-ticker.C: case <-ctx.Done(): exit = true } metaMu.Lock() meta.endedCycle = intDataUpdateTracker.current() meta, err = o.updateMetacacheListing(meta, rpc) if meta.status == scanStateError { cancel() exit = true } metaMu.Unlock() logger.LogIf(ctx, err) } }() const retryDelay = 200 * time.Millisecond const maxTries = 5 var bw *metacacheBlockWriter // Don't save single object listings. if !o.discardResult { // Write results to disk. bw = newMetacacheBlockWriter(cacheCh, func(b *metacacheBlock) error { // if the block is 0 bytes and its a first block skip it. // skip only this for Transient caches. if len(b.data) == 0 && b.n == 0 && o.Transient { return nil } o.debugln(color.Green("listPath:")+" saving block", b.n, "to", o.objectPath(b.n)) r, err := hash.NewReader(bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data)), false) logger.LogIf(ctx, err) custom := b.headerKV() _, err = er.putObject(ctx, minioMetaBucket, o.objectPath(b.n), NewPutObjReader(r, nil, nil), ObjectOptions{ UserDefined: custom, NoLock: true, // No need to hold namespace lock, each prefix caches uniquely. }) if err != nil { metaMu.Lock() if meta.error != "" { meta.status = scanStateError meta.error = err.Error() } metaMu.Unlock() cancel() return err } if b.n == 0 { return nil } // Update block 0 metadata. var retries int for { err := er.updateObjectMeta(ctx, minioMetaBucket, o.objectPath(0), b.headerKV(), ObjectOptions{}) if err == nil { break } switch err.(type) { case ObjectNotFound: return err case InsufficientReadQuorum: default: logger.LogIf(ctx, err) } if retries >= maxTries { return err } retries++ time.Sleep(retryDelay) } return nil }) } // How to resolve results. resolver := metadataResolutionParams{ dirQuorum: listingQuorum, objQuorum: listingQuorum, bucket: o.Bucket, } err := listPathRaw(ctx, listPathRawOptions{ disks: disks, bucket: o.Bucket, path: o.BaseDir, recursive: o.Recursive, filterPrefix: o.FilterPrefix, minDisks: listingQuorum, agreed: func(entry metaCacheEntry) { if !o.discardResult { cacheCh <- entry } filterCh <- entry }, partial: func(entries metaCacheEntries, nAgreed int, errs []error) { // Results Disagree :-( entry, ok := entries.resolve(&resolver) if ok { if !o.discardResult { cacheCh <- *entry } filterCh <- *entry } }, }) metaMu.Lock() if err != nil { meta.status = scanStateError meta.error = err.Error() } // Save success if meta.error == "" { meta.status = scanStateSuccess meta.endedCycle = intDataUpdateTracker.current() } meta, _ = o.updateMetacacheListing(meta, rpc) metaMu.Unlock() closeChannels() if !o.discardResult { if err := bw.Close(); err != nil { metaMu.Lock() meta.error = err.Error() meta.status = scanStateError meta, err = o.updateMetacacheListing(meta, rpc) metaMu.Unlock() } } }() return filteredResults() } type listPathRawOptions struct { disks []StorageAPI bucket, path string recursive bool filterPrefix string // Minimum number of good disks to continue. // An error will be returned if this many disks returned an error. minDisks int reportNotFound bool // Callbacks with results: // If set to nil, it will not be called. // agreed is called if all disks agreed. agreed func(entry metaCacheEntry) // partial will be returned when there is disagreement between disks. // if disk did not return any result, but also haven't errored // the entry will be empty and errs will partial func(entries metaCacheEntries, nAgreed int, errs []error) // finished will be called when all streams have finished and // more than one disk returned an error. // Will not be called if everything operates as expected. finished func(errs []error) } // listPathRaw will list a path on the provided drives. // See listPathRawOptions on how results are delivered. // Directories are always returned. // Cache will be bypassed. // Context cancellation will be respected but may take a while to effectuate. func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) { disks := opts.disks if len(disks) == 0 { return fmt.Errorf("listPathRaw: 0 drives provided") } // Disconnect from call above, but cancel on exit. ctx, cancel := context.WithCancel(GlobalContext) defer cancel() askDisks := len(disks) readers := make([]*metacacheReader, askDisks) for i := range disks { r, w := io.Pipe() d := disks[i] readers[i], err = newMetacacheReader(r) if err != nil { return err } // Send request to each disk. go func() { werr := d.WalkDir(ctx, WalkDirOptions{ Bucket: opts.bucket, BaseDir: opts.path, Recursive: opts.recursive, ReportNotFound: opts.reportNotFound, FilterPrefix: opts.filterPrefix}, w) w.CloseWithError(werr) if werr != io.EOF && werr != nil && werr.Error() != errFileNotFound.Error() && werr.Error() != errVolumeNotFound.Error() { logger.LogIf(ctx, werr) } }() } topEntries := make(metaCacheEntries, len(readers)) errs := make([]error, len(readers)) for { // Get the top entry from each var current metaCacheEntry var atEOF, fnf, hasErr, agree int for i := range topEntries { topEntries[i] = metaCacheEntry{} } select { case <-ctx.Done(): return ctx.Err() default: } for i, r := range readers { if errs[i] != nil { hasErr++ continue } entry, err := r.peek() switch err { case io.EOF: atEOF++ continue case nil: default: if err.Error() == errFileNotFound.Error() { atEOF++ fnf++ continue } if err.Error() == errVolumeNotFound.Error() { atEOF++ fnf++ continue } hasErr++ errs[i] = err continue } // If no current, add it. if current.name == "" { topEntries[i] = entry current = entry agree++ continue } // If exact match, we agree. if current.matches(&entry, opts.bucket) { topEntries[i] = entry agree++ continue } // If only the name matches we didn't agree, but add it for resolution. if entry.name == current.name { topEntries[i] = entry continue } // We got different entries if entry.name > current.name { continue } // We got a new, better current. // Clear existing entries. for i := range topEntries[:i] { topEntries[i] = metaCacheEntry{} } agree = 1 current = entry topEntries[i] = entry } // Stop if we exceed number of bad disks if hasErr > len(disks)-opts.minDisks && hasErr > 0 { if opts.finished != nil { opts.finished(errs) } var combinedErr []string for i, err := range errs { if err != nil { combinedErr = append(combinedErr, fmt.Sprintf("disk %d returned: %s", i, err)) } } return errors.New(strings.Join(combinedErr, ", ")) } // Break if all at EOF or error. if atEOF+hasErr == len(readers) { if hasErr > 0 && opts.finished != nil { opts.finished(errs) } break } if fnf == len(readers) { return errFileNotFound } if agree == len(readers) { // Everybody agreed for _, r := range readers { r.skip(1) } if opts.agreed != nil { opts.agreed(current) } continue } if opts.partial != nil { opts.partial(topEntries, agree, errs) } // Skip the inputs we used. for i, r := range readers { if topEntries[i].name != "" { r.skip(1) } } } return nil }
cmd/metacache-set.go
1
https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a
[ 0.9982183575630188, 0.09412689507007599, 0.00016010491526685655, 0.00017508406017441303, 0.28432542085647583 ]
{ "id": 1, "code_window": [ "\t\t\t\tmetaMu.Lock()\n", "\t\t\t\tmeta.endedCycle = intDataUpdateTracker.current()\n", "\t\t\t\tmeta, err = o.updateMetacacheListing(meta, rpc)\n", "\t\t\t\tif meta.status == scanStateError {\n", "\t\t\t\t\tcancel()\n", "\t\t\t\t\texit = true\n", "\t\t\t\t}\n", "\t\t\t\tmetaMu.Unlock()\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t\t\tlogger.LogIf(ctx, err)\n" ], "file_path": "cmd/metacache-set.go", "type": "add", "edit_start_line_idx": 656 }
/* * MinIO Cloud Storage (C) 2016 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { minioBrowserPrefix, SORT_ORDER_DESC } from "./constants.js" export const sortObjectsByName = (objects, order) => { let folders = objects.filter(object => object.name.endsWith("/")) let files = objects.filter(object => !object.name.endsWith("/")) folders = folders.sort((a, b) => { if (a.name.toLowerCase() < b.name.toLowerCase()) return -1 if (a.name.toLowerCase() > b.name.toLowerCase()) return 1 return 0 }) files = files.sort((a, b) => { if (a.name.toLowerCase() < b.name.toLowerCase()) return -1 if (a.name.toLowerCase() > b.name.toLowerCase()) return 1 return 0 }) if (order === SORT_ORDER_DESC) { folders = folders.reverse() files = files.reverse() } return [...folders, ...files] } export const sortObjectsBySize = (objects, order) => { let folders = objects.filter(object => object.name.endsWith("/")) let files = objects.filter(object => !object.name.endsWith("/")) files = files.sort((a, b) => a.size - b.size) if (order === SORT_ORDER_DESC) files = files.reverse() return [...folders, ...files] } export const sortObjectsByDate = (objects, order) => { let folders = objects.filter(object => object.name.endsWith("/")) let files = objects.filter(object => !object.name.endsWith("/")) files = files.sort( (a, b) => new Date(a.lastModified).getTime() - new Date(b.lastModified).getTime() ) if (order === SORT_ORDER_DESC) files = files.reverse() return [...folders, ...files] } export const pathSlice = path => { path = path.replace(minioBrowserPrefix, "") let prefix = "" let bucket = "" if (!path) return { bucket, prefix } let objectIndex = path.indexOf("/", 1) if (objectIndex == -1) { bucket = path.slice(1) return { bucket, prefix } } bucket = path.slice(1, objectIndex) prefix = path.slice(objectIndex + 1) return { bucket, prefix } } export const pathJoin = (bucket, prefix) => { if (!prefix) prefix = "" return minioBrowserPrefix + "/" + bucket + "/" + prefix } export const getRandomAccessKey = () => { const alphaNumericTable = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" let arr = new Uint8Array(20) window.crypto.getRandomValues(arr) const random = Array.prototype.map.call(arr, v => { const i = v % alphaNumericTable.length return alphaNumericTable.charAt(i) }) return random.join("") } export const getRandomSecretKey = () => { let arr = new Uint8Array(40) window.crypto.getRandomValues(arr) const binStr = Array.prototype.map .call(arr, v => { return String.fromCharCode(v) }) .join("") const base64Str = btoa(binStr) return base64Str.replace(/\//g, "+").substr(0, 40) } export const getRandomString = length => { var text = "" var possible = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" for (var i = 0; i < length; i++) { text += possible.charAt(Math.floor(Math.random() * possible.length)) } return text }
browser/app/js/utils.js
0
https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a
[ 0.0001787678338587284, 0.0001757093268679455, 0.00017101294361054897, 0.00017618332640267909, 0.0000021569812815869227 ]
{ "id": 1, "code_window": [ "\t\t\t\tmetaMu.Lock()\n", "\t\t\t\tmeta.endedCycle = intDataUpdateTracker.current()\n", "\t\t\t\tmeta, err = o.updateMetacacheListing(meta, rpc)\n", "\t\t\t\tif meta.status == scanStateError {\n", "\t\t\t\t\tcancel()\n", "\t\t\t\t\texit = true\n", "\t\t\t\t}\n", "\t\t\t\tmetaMu.Unlock()\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t\t\tlogger.LogIf(ctx, err)\n" ], "file_path": "cmd/metacache-set.go", "type": "add", "edit_start_line_idx": 656 }
/* * MinIO Cloud Storage, (C) 2016 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cmd import ( "context" "os" "os/exec" "syscall" ) // Type of service signals currently supported. type serviceSignal int const ( serviceRestart serviceSignal = iota // Restarts the server. serviceStop // Stops the server. serviceReloadDynamic // Reload dynamic config values. // Add new service requests here. ) // Global service signal channel. var globalServiceSignalCh chan serviceSignal // GlobalServiceDoneCh - Global service done channel. var GlobalServiceDoneCh <-chan struct{} // GlobalContext context that is canceled when server is requested to shut down. var GlobalContext context.Context // cancelGlobalContext can be used to indicate server shutdown. var cancelGlobalContext context.CancelFunc func initGlobalContext() { GlobalContext, cancelGlobalContext = context.WithCancel(context.Background()) GlobalServiceDoneCh = GlobalContext.Done() globalServiceSignalCh = make(chan serviceSignal) } // restartProcess starts a new process passing it the active fd's. It // doesn't fork, but starts a new process using the same environment and // arguments as when it was originally started. This allows for a newly // deployed binary to be started. It returns the pid of the newly started // process when successful. func restartProcess() error { // Use the original binary location. This works with symlinks such that if // the file it points to has been changed we will use the updated symlink. argv0, err := exec.LookPath(os.Args[0]) if err != nil { return err } // Invokes the execve system call. // Re-uses the same pid. This preserves the pid over multiple server-respawns. return syscall.Exec(argv0, os.Args, os.Environ()) }
cmd/service.go
0
https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a
[ 0.0013749315403401852, 0.0003217090561520308, 0.00016515959578100592, 0.00017228428623639047, 0.0003980974433943629 ]
{ "id": 1, "code_window": [ "\t\t\t\tmetaMu.Lock()\n", "\t\t\t\tmeta.endedCycle = intDataUpdateTracker.current()\n", "\t\t\t\tmeta, err = o.updateMetacacheListing(meta, rpc)\n", "\t\t\t\tif meta.status == scanStateError {\n", "\t\t\t\t\tcancel()\n", "\t\t\t\t\texit = true\n", "\t\t\t\t}\n", "\t\t\t\tmetaMu.Unlock()\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t\t\tlogger.LogIf(ctx, err)\n" ], "file_path": "cmd/metacache-set.go", "type": "add", "edit_start_line_idx": 656 }
/* * MinIO Cloud Storage, (C) 2018,2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package dns import ( "context" "encoding/json" "errors" "fmt" "net" "sort" "strings" "time" "github.com/coredns/coredns/plugin/etcd/msg" "github.com/minio/minio-go/v7/pkg/set" "go.etcd.io/etcd/clientv3" ) // ErrNoEntriesFound - Indicates no entries were found for the given key (directory) var ErrNoEntriesFound = errors.New("No entries found for this key") const etcdPathSeparator = "/" // create a new coredns service record for the bucket. func newCoreDNSMsg(ip string, port string, ttl uint32, t time.Time) ([]byte, error) { return json.Marshal(&SrvRecord{ Host: ip, Port: json.Number(port), TTL: ttl, CreationDate: t, }) } // Close closes the internal etcd client and cannot be used further func (c *CoreDNS) Close() error { c.etcdClient.Close() return nil } // List - Retrieves list of DNS entries for the domain. func (c *CoreDNS) List() (map[string][]SrvRecord, error) { var srvRecords = map[string][]SrvRecord{} for _, domainName := range c.domainNames { key := msg.Path(fmt.Sprintf("%s.", domainName), c.prefixPath) records, err := c.list(key) if err != nil { return nil, err } for _, record := range records { if record.Key == "" { continue } srvRecords[record.Key] = append(srvRecords[record.Key], record) } } return srvRecords, nil } // Get - Retrieves DNS records for a bucket. func (c *CoreDNS) Get(bucket string) ([]SrvRecord, error) { var srvRecords []SrvRecord for _, domainName := range c.domainNames { key := msg.Path(fmt.Sprintf("%s.%s.", bucket, domainName), c.prefixPath) records, err := c.list(key) if err != nil { return nil, err } // Make sure we have record.Key is empty // this can only happen when record.Key // has bucket entry with exact prefix // match any record.Key which do not // match the prefixes we skip them. for _, record := range records { if record.Key != "" { continue } srvRecords = append(srvRecords, record) } } if len(srvRecords) == 0 { return nil, ErrNoEntriesFound } return srvRecords, nil } // msgUnPath converts a etcd path to domainname. func msgUnPath(s string) string { ks := strings.Split(strings.Trim(s, etcdPathSeparator), etcdPathSeparator) for i, j := 0, len(ks)-1; i < j; i, j = i+1, j-1 { ks[i], ks[j] = ks[j], ks[i] } return strings.Join(ks, ".") } // Retrieves list of entries under the key passed. // Note that this method fetches entries upto only two levels deep. func (c *CoreDNS) list(key string) ([]SrvRecord, error) { ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout) r, err := c.etcdClient.Get(ctx, key, clientv3.WithPrefix()) defer cancel() if err != nil { return nil, err } if r.Count == 0 { key = strings.TrimSuffix(key, etcdPathSeparator) r, err = c.etcdClient.Get(ctx, key) if err != nil { return nil, err } } var srvRecords []SrvRecord for _, n := range r.Kvs { var srvRecord SrvRecord if err = json.Unmarshal([]byte(n.Value), &srvRecord); err != nil { return nil, err } srvRecord.Key = strings.TrimPrefix(string(n.Key), key) srvRecord.Key = strings.TrimSuffix(srvRecord.Key, srvRecord.Host) // Skip non-bucket entry like for a key // /skydns/net/miniocloud/10.0.0.1 that may exist as // dns entry for the server (rather than the bucket // itself). if srvRecord.Key == "" { continue } srvRecord.Key = msgUnPath(srvRecord.Key) srvRecords = append(srvRecords, srvRecord) } sort.Slice(srvRecords, func(i int, j int) bool { return srvRecords[i].Key < srvRecords[j].Key }) return srvRecords, nil } // Put - Adds DNS entries into etcd endpoint in CoreDNS etcd message format. func (c *CoreDNS) Put(bucket string) error { c.Delete(bucket) // delete any existing entries. t := time.Now().UTC() for ip := range c.domainIPs { bucketMsg, err := newCoreDNSMsg(ip, c.domainPort, defaultTTL, t) if err != nil { return err } for _, domainName := range c.domainNames { key := msg.Path(fmt.Sprintf("%s.%s", bucket, domainName), c.prefixPath) key = key + etcdPathSeparator + ip ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout) _, err = c.etcdClient.Put(ctx, key, string(bucketMsg)) defer cancel() if err != nil { ctx, cancel = context.WithTimeout(context.Background(), defaultContextTimeout) c.etcdClient.Delete(ctx, key) defer cancel() return err } } } return nil } // Delete - Removes DNS entries added in Put(). func (c *CoreDNS) Delete(bucket string) error { for _, domainName := range c.domainNames { key := msg.Path(fmt.Sprintf("%s.%s.", bucket, domainName), c.prefixPath) srvRecords, err := c.list(key) if err != nil { return err } for _, record := range srvRecords { dctx, dcancel := context.WithTimeout(context.Background(), defaultContextTimeout) if _, err = c.etcdClient.Delete(dctx, key+etcdPathSeparator+record.Host); err != nil { dcancel() return err } dcancel() } } return nil } // DeleteRecord - Removes a specific DNS entry func (c *CoreDNS) DeleteRecord(record SrvRecord) error { for _, domainName := range c.domainNames { key := msg.Path(fmt.Sprintf("%s.%s.", record.Key, domainName), c.prefixPath) dctx, dcancel := context.WithTimeout(context.Background(), defaultContextTimeout) if _, err := c.etcdClient.Delete(dctx, key+etcdPathSeparator+record.Host); err != nil { dcancel() return err } dcancel() } return nil } // String stringer name for this implementation of dns.Store func (c *CoreDNS) String() string { return "etcdDNS" } // CoreDNS - represents dns config for coredns server. type CoreDNS struct { domainNames []string domainIPs set.StringSet domainPort string prefixPath string etcdClient *clientv3.Client } // EtcdOption - functional options pattern style type EtcdOption func(*CoreDNS) // DomainNames set a list of domain names used by this CoreDNS // client setting, note this will fail if set to empty when // constructor initializes. func DomainNames(domainNames []string) EtcdOption { return func(args *CoreDNS) { args.domainNames = domainNames } } // DomainIPs set a list of custom domain IPs, note this will // fail if set to empty when constructor initializes. func DomainIPs(domainIPs set.StringSet) EtcdOption { return func(args *CoreDNS) { args.domainIPs = domainIPs } } // DomainPort - is a string version of server port func DomainPort(domainPort string) EtcdOption { return func(args *CoreDNS) { args.domainPort = domainPort } } // CoreDNSPath - custom prefix on etcd to populate DNS // service records, optional and can be empty. // if empty then c.prefixPath is used i.e "/skydns" func CoreDNSPath(prefix string) EtcdOption { return func(args *CoreDNS) { args.prefixPath = prefix } } // NewCoreDNS - initialize a new coreDNS set/unset values. func NewCoreDNS(cfg clientv3.Config, setters ...EtcdOption) (Store, error) { etcdClient, err := clientv3.New(cfg) if err != nil { return nil, err } args := &CoreDNS{ etcdClient: etcdClient, } for _, setter := range setters { setter(args) } if len(args.domainNames) == 0 || args.domainIPs.IsEmpty() { return nil, errors.New("invalid argument") } // strip ports off of domainIPs domainIPsWithoutPorts := args.domainIPs.ApplyFunc(func(ip string) string { host, _, err := net.SplitHostPort(ip) if err != nil { if strings.Contains(err.Error(), "missing port in address") { host = ip } } return host }) args.domainIPs = domainIPsWithoutPorts return args, nil }
cmd/config/dns/etcd_dns.go
0
https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a
[ 0.0019785950426012278, 0.0004500114300753921, 0.00016338241402991116, 0.00017254590056836605, 0.0005534435622394085 ]
{ "id": 3, "code_window": [ "\t\t\t\tmetaMu.Lock()\n", "\t\t\t\tmeta.error = err.Error()\n", "\t\t\t\tmeta.status = scanStateError\n", "\t\t\t\tmeta, err = o.updateMetacacheListing(meta, rpc)\n", "\t\t\t\tmetaMu.Unlock()\n", "\t\t\t}\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t\tmeta, _ = o.updateMetacacheListing(meta, rpc)\n" ], "file_path": "cmd/metacache-set.go", "type": "replace", "edit_start_line_idx": 774 }
// +build ignore package gorules import "github.com/quasilyte/go-ruleguard/dsl/fluent" // This is a collection of rules for ruleguard: https://github.com/quasilyte/go-ruleguard // Remove extra conversions: mdempsky/unconvert func unconvert(m fluent.Matcher) { m.Match("int($x)").Where(m["x"].Type.Is("int") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x") m.Match("float32($x)").Where(m["x"].Type.Is("float32") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x") m.Match("float64($x)").Where(m["x"].Type.Is("float64") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x") // m.Match("byte($x)").Where(m["x"].Type.Is("byte")).Report("unnecessary conversion").Suggest("$x") // m.Match("rune($x)").Where(m["x"].Type.Is("rune")).Report("unnecessary conversion").Suggest("$x") m.Match("bool($x)").Where(m["x"].Type.Is("bool") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x") m.Match("int8($x)").Where(m["x"].Type.Is("int8") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x") m.Match("int16($x)").Where(m["x"].Type.Is("int16") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x") m.Match("int32($x)").Where(m["x"].Type.Is("int32") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x") m.Match("int64($x)").Where(m["x"].Type.Is("int64") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x") m.Match("uint8($x)").Where(m["x"].Type.Is("uint8") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x") m.Match("uint16($x)").Where(m["x"].Type.Is("uint16") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x") m.Match("uint32($x)").Where(m["x"].Type.Is("uint32") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x") m.Match("uint64($x)").Where(m["x"].Type.Is("uint64") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x") m.Match("time.Duration($x)").Where(m["x"].Type.Is("time.Duration") && !m["x"].Text.Matches("^[0-9]*$")).Report("unnecessary conversion").Suggest("$x") } // Don't use == or != with time.Time // https://github.com/dominikh/go-tools/issues/47 : Wontfix func timeeq(m fluent.Matcher) { m.Match("$t0 == $t1").Where(m["t0"].Type.Is("time.Time")).Report("using == with time.Time") m.Match("$t0 != $t1").Where(m["t0"].Type.Is("time.Time")).Report("using != with time.Time") m.Match(`map[$k]$v`).Where(m["k"].Type.Is("time.Time")).Report("map with time.Time keys are easy to misuse") } // Wrong err in error check func wrongerr(m fluent.Matcher) { m.Match("if $*_, $err0 := $*_; $err1 != nil { $*_ }"). Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("if $*_, $err0 := $*_; $err1 != nil { $*_ }"). Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("if $*_, $err0 = $*_; $err1 != nil { $*_ }"). Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("if $*_, $err0 = $*_; $err1 != nil { $*_ }"). Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("if $*_, $err0 := $*_; $err1 == nil { $*_ }"). Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("if $*_, $err0 := $*_; $err1 == nil { $*_ }"). Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("if $*_, $err0 = $*_; $err1 == nil { $*_ }"). Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("if $*_, $err0 = $*_; $err1 == nil { $*_ }"). Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("$*_, $err0 := $*_; if $err1 != nil { $*_ }"). Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("$*_, $err0 := $*_; if $err1 != nil { $*_ }"). Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("$*_, $err0 := $*_; if $err1 == nil { $*_ }"). Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("$*_, $err0 := $*_; if $err1 == nil { $*_ }"). Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("$*_, $err0 = $*_; if $err1 != nil { $*_ }"). Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("$*_, $err0 = $*_; if $err1 != nil { $*_ }"). Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("$*_, $err0 = $*_; if $err1 == nil { $*_ }"). Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") m.Match("$*_, $err0 = $*_; if $err1 == nil { $*_ }"). Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")). Report("maybe wrong err in error check") } // err but no an error func errnoterror(m fluent.Matcher) { // Would be easier to check for all err identifiers instead, but then how do we get the type from m[] ? m.Match( "if $*_, err := $x; $err != nil { $*_ } else if $_ { $*_ }", "if $*_, err := $x; $err != nil { $*_ } else { $*_ }", "if $*_, err := $x; $err != nil { $*_ }", "if $*_, err = $x; $err != nil { $*_ } else if $_ { $*_ }", "if $*_, err = $x; $err != nil { $*_ } else { $*_ }", "if $*_, err = $x; $err != nil { $*_ }", "$*_, err := $x; if $err != nil { $*_ } else if $_ { $*_ }", "$*_, err := $x; if $err != nil { $*_ } else { $*_ }", "$*_, err := $x; if $err != nil { $*_ }", "$*_, err = $x; if $err != nil { $*_ } else if $_ { $*_ }", "$*_, err = $x; if $err != nil { $*_ } else { $*_ }", "$*_, err = $x; if $err != nil { $*_ }", ). Where(m["err"].Text == "err" && !m["err"].Type.Is("error") && m["x"].Text != "recover()"). Report("err variable not error type") } // Identical if and else bodies func ifbodythenbody(m fluent.Matcher) { m.Match("if $*_ { $body } else { $body }"). Report("identical if and else bodies") // Lots of false positives. // m.Match("if $*_ { $body } else if $*_ { $body }"). // Report("identical if and else bodies") } // Odd inequality: A - B < 0 instead of != // Too many false positives. /* func subtractnoteq(m fluent.Matcher) { m.Match("$a - $b < 0").Report("consider $a != $b") m.Match("$a - $b > 0").Report("consider $a != $b") m.Match("0 < $a - $b").Report("consider $a != $b") m.Match("0 > $a - $b").Report("consider $a != $b") } */ // Self-assignment func selfassign(m fluent.Matcher) { m.Match("$x = $x").Report("useless self-assignment") } // Odd nested ifs func oddnestedif(m fluent.Matcher) { m.Match("if $x { if $x { $*_ }; $*_ }", "if $x == $y { if $x != $y {$*_ }; $*_ }", "if $x != $y { if $x == $y {$*_ }; $*_ }", "if $x { if !$x { $*_ }; $*_ }", "if !$x { if $x { $*_ }; $*_ }"). Report("odd nested ifs") m.Match("for $x { if $x { $*_ }; $*_ }", "for $x == $y { if $x != $y {$*_ }; $*_ }", "for $x != $y { if $x == $y {$*_ }; $*_ }", "for $x { if !$x { $*_ }; $*_ }", "for !$x { if $x { $*_ }; $*_ }"). Report("odd nested for/ifs") } // odd bitwise expressions func oddbitwise(m fluent.Matcher) { m.Match("$x | $x", "$x | ^$x", "^$x | $x"). Report("odd bitwise OR") m.Match("$x & $x", "$x & ^$x", "^$x & $x"). Report("odd bitwise AND") m.Match("$x &^ $x"). Report("odd bitwise AND-NOT") } // odd sequence of if tests with return func ifreturn(m fluent.Matcher) { m.Match("if $x { return $*_ }; if $x {$*_ }").Report("odd sequence of if test") m.Match("if $x { return $*_ }; if !$x {$*_ }").Report("odd sequence of if test") m.Match("if !$x { return $*_ }; if $x {$*_ }").Report("odd sequence of if test") m.Match("if $x == $y { return $*_ }; if $x != $y {$*_ }").Report("odd sequence of if test") m.Match("if $x != $y { return $*_ }; if $x == $y {$*_ }").Report("odd sequence of if test") } func oddifsequence(m fluent.Matcher) { /* m.Match("if $x { $*_ }; if $x {$*_ }").Report("odd sequence of if test") m.Match("if $x == $y { $*_ }; if $y == $x {$*_ }").Report("odd sequence of if tests") m.Match("if $x != $y { $*_ }; if $y != $x {$*_ }").Report("odd sequence of if tests") m.Match("if $x < $y { $*_ }; if $y > $x {$*_ }").Report("odd sequence of if tests") m.Match("if $x <= $y { $*_ }; if $y >= $x {$*_ }").Report("odd sequence of if tests") m.Match("if $x > $y { $*_ }; if $y < $x {$*_ }").Report("odd sequence of if tests") m.Match("if $x >= $y { $*_ }; if $y <= $x {$*_ }").Report("odd sequence of if tests") */ } // odd sequence of nested if tests func nestedifsequence(m fluent.Matcher) { /* m.Match("if $x < $y { if $x >= $y {$*_ }; $*_ }").Report("odd sequence of nested if tests") m.Match("if $x <= $y { if $x > $y {$*_ }; $*_ }").Report("odd sequence of nested if tests") m.Match("if $x > $y { if $x <= $y {$*_ }; $*_ }").Report("odd sequence of nested if tests") m.Match("if $x >= $y { if $x < $y {$*_ }; $*_ }").Report("odd sequence of nested if tests") */ } // odd sequence of assignments func identicalassignments(m fluent.Matcher) { m.Match("$x = $y; $y = $x").Report("odd sequence of assignments") } func oddcompoundop(m fluent.Matcher) { m.Match("$x += $x + $_", "$x += $x - $_"). Report("odd += expression") m.Match("$x -= $x + $_", "$x -= $x - $_"). Report("odd -= expression") } func constswitch(m fluent.Matcher) { m.Match("switch $x { $*_ }", "switch $*_; $x { $*_ }"). Where(m["x"].Const && !m["x"].Text.Matches(`^runtime\.`)). Report("constant switch") } func oddcomparisons(m fluent.Matcher) { m.Match( "$x - $y == 0", "$x - $y != 0", "$x - $y < 0", "$x - $y <= 0", "$x - $y > 0", "$x - $y >= 0", "$x ^ $y == 0", "$x ^ $y != 0", ).Report("odd comparison") } func oddmathbits(m fluent.Matcher) { m.Match( "64 - bits.LeadingZeros64($x)", "32 - bits.LeadingZeros32($x)", "16 - bits.LeadingZeros16($x)", "8 - bits.LeadingZeros8($x)", ).Report("odd math/bits expression: use bits.Len*() instead?") } func floateq(m fluent.Matcher) { m.Match( "$x == $y", "$x != $y", ). Where(m["x"].Type.Is("float32") && !m["x"].Const && !m["y"].Text.Matches("0(.0+)?")). Report("floating point tested for equality") m.Match( "$x == $y", "$x != $y", ). Where(m["x"].Type.Is("float64") && !m["x"].Const && !m["y"].Text.Matches("0(.0+)?")). Report("floating point tested for equality") m.Match("switch $x { $*_ }", "switch $*_; $x { $*_ }"). Where(m["x"].Type.Is("float32")). Report("floating point as switch expression") m.Match("switch $x { $*_ }", "switch $*_; $x { $*_ }"). Where(m["x"].Type.Is("float64")). Report("floating point as switch expression") } func badexponent(m fluent.Matcher) { m.Match( "2 ^ $x", "10 ^ $x", ). Report("caret (^) is not exponentiation") } func floatloop(m fluent.Matcher) { m.Match( "for $i := $x; $i < $y; $i += $z { $*_ }", "for $i = $x; $i < $y; $i += $z { $*_ }", ). Where(m["i"].Type.Is("float64")). Report("floating point for loop counter") m.Match( "for $i := $x; $i < $y; $i += $z { $*_ }", "for $i = $x; $i < $y; $i += $z { $*_ }", ). Where(m["i"].Type.Is("float32")). Report("floating point for loop counter") } func urlredacted(m fluent.Matcher) { m.Match( "log.Println($x, $*_)", "log.Println($*_, $x, $*_)", "log.Println($*_, $x)", "log.Printf($*_, $x, $*_)", "log.Printf($*_, $x)", "log.Println($x, $*_)", "log.Println($*_, $x, $*_)", "log.Println($*_, $x)", "log.Printf($*_, $x, $*_)", "log.Printf($*_, $x)", ). Where(m["x"].Type.Is("*url.URL")). Report("consider $x.Redacted() when outputting URLs") } func sprinterr(m fluent.Matcher) { m.Match(`fmt.Sprint($err)`, `fmt.Sprintf("%s", $err)`, `fmt.Sprintf("%v", $err)`, ). Where(m["err"].Type.Is("error")). Report("maybe call $err.Error() instead of fmt.Sprint()?") } func largeloopcopy(m fluent.Matcher) { m.Match( `for $_, $v := range $_ { $*_ }`, ). Where(m["v"].Type.Size > 512). Report(`loop copies large value each iteration`) } func joinpath(m fluent.Matcher) { m.Match( `strings.Join($_, "/")`, `strings.Join($_, "\\")`, "strings.Join($_, `\\`)", ). Report(`did you mean path.Join() or filepath.Join() ?`) } func readfull(m fluent.Matcher) { m.Match(`$n, $err := io.ReadFull($_, $slice) if $err != nil || $n != len($slice) { $*_ }`, `$n, $err := io.ReadFull($_, $slice) if $n != len($slice) || $err != nil { $*_ }`, `$n, $err = io.ReadFull($_, $slice) if $err != nil || $n != len($slice) { $*_ }`, `$n, $err = io.ReadFull($_, $slice) if $n != len($slice) || $err != nil { $*_ }`, `if $n, $err := io.ReadFull($_, $slice); $n != len($slice) || $err != nil { $*_ }`, `if $n, $err := io.ReadFull($_, $slice); $err != nil || $n != len($slice) { $*_ }`, `if $n, $err = io.ReadFull($_, $slice); $n != len($slice) || $err != nil { $*_ }`, `if $n, $err = io.ReadFull($_, $slice); $err != nil || $n != len($slice) { $*_ }`, ).Report("io.ReadFull() returns err == nil iff n == len(slice)") } func nilerr(m fluent.Matcher) { m.Match( `if err == nil { return err }`, `if err == nil { return $*_, err }`, ). Report(`return nil error instead of nil value`) } func mailaddress(m fluent.Matcher) { m.Match( "fmt.Sprintf(`\"%s\" <%s>`, $NAME, $EMAIL)", "fmt.Sprintf(`\"%s\"<%s>`, $NAME, $EMAIL)", "fmt.Sprintf(`%s <%s>`, $NAME, $EMAIL)", "fmt.Sprintf(`%s<%s>`, $NAME, $EMAIL)", `fmt.Sprintf("\"%s\"<%s>", $NAME, $EMAIL)`, `fmt.Sprintf("\"%s\" <%s>", $NAME, $EMAIL)`, `fmt.Sprintf("%s<%s>", $NAME, $EMAIL)`, `fmt.Sprintf("%s <%s>", $NAME, $EMAIL)`, ). Report("use net/mail Address.String() instead of fmt.Sprintf()"). Suggest("(&mail.Address{Name:$NAME, Address:$EMAIL}).String()") } func errnetclosed(m fluent.Matcher) { m.Match( `strings.Contains($err.Error(), $text)`, ). Where(m["text"].Text.Matches("\".*closed network connection.*\"")). Report(`String matching against error texts is fragile; use net.ErrClosed instead`). Suggest(`errors.Is($err, net.ErrClosed)`) } func httpheaderadd(m fluent.Matcher) { m.Match( `$H.Add($KEY, $VALUE)`, ). Where(m["H"].Type.Is("http.Header")). Report("use http.Header.Set method instead of Add to overwrite all existing header values"). Suggest(`$H.Set($KEY, $VALUE)`) } func hmacnew(m fluent.Matcher) { m.Match("hmac.New(func() hash.Hash { return $x }, $_)", `$f := func() hash.Hash { return $x } $*_ hmac.New($f, $_)`, ).Where(m["x"].Pure). Report("invalid hash passed to hmac.New()") } func writestring(m fluent.Matcher) { m.Match(`io.WriteString($w, string($b))`). Where(m["b"].Type.Is("[]byte")). Suggest("$w.Write($b)") } func badlock(m fluent.Matcher) { // Shouldn't give many false positives without type filter // as Lock+Unlock pairs in combination with defer gives us pretty // a good chance to guess correctly. If we constrain the type to sync.Mutex // then it'll be harder to match embedded locks and custom methods // that may forward the call to the sync.Mutex (or other synchronization primitive). m.Match(`$mu.Lock(); defer $mu.RUnlock()`).Report(`maybe $mu.RLock() was intended?`) m.Match(`$mu.RLock(); defer $mu.Unlock()`).Report(`maybe $mu.Lock() was intended?`) }
ruleguard.rules.go
1
https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a
[ 0.003202756168320775, 0.00024017832765821368, 0.00016335236432496458, 0.0001737733109621331, 0.0004373062402009964 ]
{ "id": 3, "code_window": [ "\t\t\t\tmetaMu.Lock()\n", "\t\t\t\tmeta.error = err.Error()\n", "\t\t\t\tmeta.status = scanStateError\n", "\t\t\t\tmeta, err = o.updateMetacacheListing(meta, rpc)\n", "\t\t\t\tmetaMu.Unlock()\n", "\t\t\t}\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t\tmeta, _ = o.updateMetacacheListing(meta, rpc)\n" ], "file_path": "cmd/metacache-set.go", "type": "replace", "edit_start_line_idx": 774 }
/* * MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cmd import ( "context" "encoding/base64" "encoding/xml" "fmt" "net/http" "net/url" "path" "strconv" "strings" "time" xhttp "github.com/minio/minio/cmd/http" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/handlers" ) const ( // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision. maxObjectList = metacacheBlockSize - (metacacheBlockSize / 10) // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse. maxDeleteList = 10000 // Limit number of objects deleted in a delete call. maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse. maxPartsList = 10000 // Limit number of parts in a listPartsResponse. ) // LocationResponse - format for location response. type LocationResponse struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint" json:"-"` Location string `xml:",chardata"` } // ListVersionsResponse - format for list bucket versions response. type ListVersionsResponse struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResult" json:"-"` Name string Prefix string KeyMarker string // When response is truncated (the IsTruncated element value in the response // is true), you can use the key name in this field as marker in the subsequent // request to get next set of objects. Server lists objects in alphabetical // order Note: This element is returned only if you have delimiter request parameter // specified. If response does not include the NextMaker and it is truncated, // you can use the value of the last Key in the response as the marker in the // subsequent request to get the next set of object keys. NextKeyMarker string `xml:"NextKeyMarker,omitempty"` // When the number of responses exceeds the value of MaxKeys, // NextVersionIdMarker specifies the first object version not // returned that satisfies the search criteria. Use this value // for the version-id-marker request parameter in a subsequent request. NextVersionIDMarker string `xml:"NextVersionIdMarker"` // Marks the last version of the Key returned in a truncated response. VersionIDMarker string `xml:"VersionIdMarker"` MaxKeys int Delimiter string // A flag that indicates whether or not ListObjects returned all of the results // that satisfied the search criteria. IsTruncated bool CommonPrefixes []CommonPrefix Versions []ObjectVersion // Encoding type used to encode object keys in the response. EncodingType string `xml:"EncodingType,omitempty"` } // ListObjectsResponse - format for list objects response. type ListObjectsResponse struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"` Name string Prefix string Marker string // When response is truncated (the IsTruncated element value in the response // is true), you can use the key name in this field as marker in the subsequent // request to get next set of objects. Server lists objects in alphabetical // order Note: This element is returned only if you have delimiter request parameter // specified. If response does not include the NextMaker and it is truncated, // you can use the value of the last Key in the response as the marker in the // subsequent request to get the next set of object keys. NextMarker string `xml:"NextMarker,omitempty"` MaxKeys int Delimiter string // A flag that indicates whether or not ListObjects returned all of the results // that satisfied the search criteria. IsTruncated bool Contents []Object CommonPrefixes []CommonPrefix // Encoding type used to encode object keys in the response. EncodingType string `xml:"EncodingType,omitempty"` } // ListObjectsV2Response - format for list objects response. type ListObjectsV2Response struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"` Name string Prefix string StartAfter string `xml:"StartAfter,omitempty"` // When response is truncated (the IsTruncated element value in the response // is true), you can use the key name in this field as marker in the subsequent // request to get next set of objects. Server lists objects in alphabetical // order Note: This element is returned only if you have delimiter request parameter // specified. If response does not include the NextMaker and it is truncated, // you can use the value of the last Key in the response as the marker in the // subsequent request to get the next set of object keys. ContinuationToken string `xml:"ContinuationToken,omitempty"` NextContinuationToken string `xml:"NextContinuationToken,omitempty"` KeyCount int MaxKeys int Delimiter string // A flag that indicates whether or not ListObjects returned all of the results // that satisfied the search criteria. IsTruncated bool Contents []Object CommonPrefixes []CommonPrefix // Encoding type used to encode object keys in the response. EncodingType string `xml:"EncodingType,omitempty"` } // Part container for part metadata. type Part struct { PartNumber int LastModified string ETag string Size int64 } // ListPartsResponse - format for list parts response. type ListPartsResponse struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"` Bucket string Key string UploadID string `xml:"UploadId"` Initiator Initiator Owner Owner // The class of storage used to store the object. StorageClass string PartNumberMarker int NextPartNumberMarker int MaxParts int IsTruncated bool // List of parts. Parts []Part `xml:"Part"` } // ListMultipartUploadsResponse - format for list multipart uploads response. type ListMultipartUploadsResponse struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult" json:"-"` Bucket string KeyMarker string UploadIDMarker string `xml:"UploadIdMarker"` NextKeyMarker string NextUploadIDMarker string `xml:"NextUploadIdMarker"` Delimiter string Prefix string EncodingType string `xml:"EncodingType,omitempty"` MaxUploads int IsTruncated bool // List of pending uploads. Uploads []Upload `xml:"Upload"` // Delimed common prefixes. CommonPrefixes []CommonPrefix } // ListBucketsResponse - format for list buckets response type ListBucketsResponse struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult" json:"-"` Owner Owner // Container for one or more buckets. Buckets struct { Buckets []Bucket `xml:"Bucket"` } // Buckets are nested } // Upload container for in progress multipart upload type Upload struct { Key string UploadID string `xml:"UploadId"` Initiator Initiator Owner Owner StorageClass string Initiated string } // CommonPrefix container for prefix response in ListObjectsResponse type CommonPrefix struct { Prefix string } // Bucket container for bucket metadata type Bucket struct { Name string CreationDate string // time string of format "2006-01-02T15:04:05.000Z" } // ObjectVersion container for object version metadata type ObjectVersion struct { Object IsLatest bool VersionID string `xml:"VersionId"` isDeleteMarker bool } // MarshalXML - marshal ObjectVersion func (o ObjectVersion) MarshalXML(e *xml.Encoder, start xml.StartElement) error { if o.isDeleteMarker { start.Name.Local = "DeleteMarker" } else { start.Name.Local = "Version" } type objectVersionWrapper ObjectVersion return e.EncodeElement(objectVersionWrapper(o), start) } // StringMap is a map[string]string. type StringMap map[string]string // MarshalXML - StringMap marshals into XML. func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error { tokens := []xml.Token{start} for key, value := range s { t := xml.StartElement{} t.Name = xml.Name{ Space: "", Local: key, } tokens = append(tokens, t, xml.CharData(value), xml.EndElement{Name: t.Name}) } tokens = append(tokens, xml.EndElement{ Name: start.Name, }) for _, t := range tokens { if err := e.EncodeToken(t); err != nil { return err } } // flush to ensure tokens are written return e.Flush() } // Object container for object metadata type Object struct { Key string LastModified string // time string of format "2006-01-02T15:04:05.000Z" ETag string Size int64 // Owner of the object. Owner Owner // The class of storage used to store the object. StorageClass string // UserMetadata user-defined metadata UserMetadata StringMap `xml:"UserMetadata,omitempty"` } // CopyObjectResponse container returns ETag and LastModified of the successfully copied object type CopyObjectResponse struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult" json:"-"` LastModified string // time string of format "2006-01-02T15:04:05.000Z" ETag string // md5sum of the copied object. } // CopyObjectPartResponse container returns ETag and LastModified of the successfully copied object type CopyObjectPartResponse struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyPartResult" json:"-"` LastModified string // time string of format "2006-01-02T15:04:05.000Z" ETag string // md5sum of the copied object part. } // Initiator inherit from Owner struct, fields are same type Initiator Owner // Owner - bucket owner/principal type Owner struct { ID string DisplayName string } // InitiateMultipartUploadResponse container for InitiateMultiPartUpload response, provides uploadID to start MultiPart upload type InitiateMultipartUploadResponse struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult" json:"-"` Bucket string Key string UploadID string `xml:"UploadId"` } // CompleteMultipartUploadResponse container for completed multipart upload response type CompleteMultipartUploadResponse struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult" json:"-"` Location string Bucket string Key string ETag string } // DeleteError structure. type DeleteError struct { Code string Message string Key string VersionID string `xml:"VersionId"` } // DeleteObjectsResponse container for multiple object deletes. type DeleteObjectsResponse struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` // Collection of all deleted objects DeletedObjects []DeletedObject `xml:"Deleted,omitempty"` // Collection of errors deleting certain objects. Errors []DeleteError `xml:"Error,omitempty"` } // PostResponse container for POST object request when success_action_status is set to 201 type PostResponse struct { Bucket string Key string ETag string Location string } // returns "https" if the tls boolean is true, "http" otherwise. func getURLScheme(tls bool) string { if tls { return httpsScheme } return httpScheme } // getObjectLocation gets the fully qualified URL of an object. func getObjectLocation(r *http.Request, domains []string, bucket, object string) string { // unit tests do not have host set. if r.Host == "" { return path.Clean(r.URL.Path) } proto := handlers.GetSourceScheme(r) if proto == "" { proto = getURLScheme(globalIsTLS) } u := &url.URL{ Host: r.Host, Path: path.Join(SlashSeparator, bucket, object), Scheme: proto, } // If domain is set then we need to use bucket DNS style. for _, domain := range domains { if strings.HasPrefix(r.Host, bucket+"."+domain) { u.Path = path.Join(SlashSeparator, object) break } } return u.String() } // generates ListBucketsResponse from array of BucketInfo which can be // serialized to match XML and JSON API spec output. func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse { listbuckets := make([]Bucket, 0, len(buckets)) var data = ListBucketsResponse{} var owner = Owner{} owner.ID = globalMinioDefaultOwnerID for _, bucket := range buckets { var listbucket = Bucket{} listbucket.Name = bucket.Name listbucket.CreationDate = bucket.Created.UTC().Format(iso8601TimeFormat) listbuckets = append(listbuckets, listbucket) } data.Owner = owner data.Buckets.Buckets = listbuckets return data } // generates an ListBucketVersions response for the said bucket with other enumerated options. func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse { versions := make([]ObjectVersion, 0, len(resp.Objects)) var owner = Owner{} var data = ListVersionsResponse{} owner.ID = globalMinioDefaultOwnerID for _, object := range resp.Objects { var content = ObjectVersion{} if object.Name == "" { continue } content.Key = s3EncodeName(object.Name, encodingType) content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat) if object.ETag != "" { content.ETag = "\"" + object.ETag + "\"" } content.Size = object.Size if object.StorageClass != "" { content.StorageClass = object.StorageClass } else { content.StorageClass = globalMinioDefaultStorageClass } content.Owner = owner content.VersionID = object.VersionID if content.VersionID == "" { content.VersionID = nullVersionID } content.IsLatest = object.IsLatest content.isDeleteMarker = object.DeleteMarker versions = append(versions, content) } data.Name = bucket data.Versions = versions data.EncodingType = encodingType data.Prefix = s3EncodeName(prefix, encodingType) data.KeyMarker = s3EncodeName(marker, encodingType) data.Delimiter = s3EncodeName(delimiter, encodingType) data.MaxKeys = maxKeys data.NextKeyMarker = s3EncodeName(resp.NextMarker, encodingType) data.NextVersionIDMarker = resp.NextVersionIDMarker data.VersionIDMarker = versionIDMarker data.IsTruncated = resp.IsTruncated prefixes := make([]CommonPrefix, 0, len(resp.Prefixes)) for _, prefix := range resp.Prefixes { var prefixItem = CommonPrefix{} prefixItem.Prefix = s3EncodeName(prefix, encodingType) prefixes = append(prefixes, prefixItem) } data.CommonPrefixes = prefixes return data } // generates an ListObjectsV1 response for the said bucket with other enumerated options. func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse { contents := make([]Object, 0, len(resp.Objects)) var owner = Owner{} var data = ListObjectsResponse{} owner.ID = globalMinioDefaultOwnerID for _, object := range resp.Objects { var content = Object{} if object.Name == "" { continue } content.Key = s3EncodeName(object.Name, encodingType) content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat) if object.ETag != "" { content.ETag = "\"" + object.ETag + "\"" } content.Size = object.Size if object.StorageClass != "" { content.StorageClass = object.StorageClass } else { content.StorageClass = globalMinioDefaultStorageClass } content.Owner = owner contents = append(contents, content) } data.Name = bucket data.Contents = contents data.EncodingType = encodingType data.Prefix = s3EncodeName(prefix, encodingType) data.Marker = s3EncodeName(marker, encodingType) data.Delimiter = s3EncodeName(delimiter, encodingType) data.MaxKeys = maxKeys data.NextMarker = s3EncodeName(resp.NextMarker, encodingType) data.IsTruncated = resp.IsTruncated prefixes := make([]CommonPrefix, 0, len(resp.Prefixes)) for _, prefix := range resp.Prefixes { var prefixItem = CommonPrefix{} prefixItem.Prefix = s3EncodeName(prefix, encodingType) prefixes = append(prefixes, prefixItem) } data.CommonPrefixes = prefixes return data } // generates an ListObjectsV2 response for the said bucket with other enumerated options. func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response { contents := make([]Object, 0, len(objects)) var owner = Owner{} var data = ListObjectsV2Response{} if fetchOwner { owner.ID = globalMinioDefaultOwnerID } for _, object := range objects { var content = Object{} if object.Name == "" { continue } content.Key = s3EncodeName(object.Name, encodingType) content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat) if object.ETag != "" { content.ETag = "\"" + object.ETag + "\"" } content.Size = object.Size if object.StorageClass != "" { content.StorageClass = object.StorageClass } else { content.StorageClass = globalMinioDefaultStorageClass } content.Owner = owner if metadata { content.UserMetadata = make(StringMap) for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) { if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { // Do not need to send any internal metadata // values to client. continue } // https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) { continue } content.UserMetadata[k] = v } } contents = append(contents, content) } data.Name = bucket data.Contents = contents data.EncodingType = encodingType data.StartAfter = s3EncodeName(startAfter, encodingType) data.Delimiter = s3EncodeName(delimiter, encodingType) data.Prefix = s3EncodeName(prefix, encodingType) data.MaxKeys = maxKeys data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token)) data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken)) data.IsTruncated = isTruncated commonPrefixes := make([]CommonPrefix, 0, len(prefixes)) for _, prefix := range prefixes { var prefixItem = CommonPrefix{} prefixItem.Prefix = s3EncodeName(prefix, encodingType) commonPrefixes = append(commonPrefixes, prefixItem) } data.CommonPrefixes = commonPrefixes data.KeyCount = len(data.Contents) + len(data.CommonPrefixes) return data } // generates CopyObjectResponse from etag and lastModified time. func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectResponse { return CopyObjectResponse{ ETag: "\"" + etag + "\"", LastModified: lastModified.UTC().Format(iso8601TimeFormat), } } // generates CopyObjectPartResponse from etag and lastModified time. func generateCopyObjectPartResponse(etag string, lastModified time.Time) CopyObjectPartResponse { return CopyObjectPartResponse{ ETag: "\"" + etag + "\"", LastModified: lastModified.UTC().Format(iso8601TimeFormat), } } // generates InitiateMultipartUploadResponse for given bucket, key and uploadID. func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) InitiateMultipartUploadResponse { return InitiateMultipartUploadResponse{ Bucket: bucket, Key: key, UploadID: uploadID, } } // generates CompleteMultipartUploadResponse for given bucket, key, location and ETag. func generateCompleteMultpartUploadResponse(bucket, key, location, etag string) CompleteMultipartUploadResponse { return CompleteMultipartUploadResponse{ Location: location, Bucket: bucket, Key: key, // AWS S3 quotes the ETag in XML, make sure we are compatible here. ETag: "\"" + etag + "\"", } } // generates ListPartsResponse from ListPartsInfo. func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) ListPartsResponse { listPartsResponse := ListPartsResponse{} listPartsResponse.Bucket = partsInfo.Bucket listPartsResponse.Key = s3EncodeName(partsInfo.Object, encodingType) listPartsResponse.UploadID = partsInfo.UploadID listPartsResponse.StorageClass = globalMinioDefaultStorageClass listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID listPartsResponse.Owner.ID = globalMinioDefaultOwnerID listPartsResponse.MaxParts = partsInfo.MaxParts listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker listPartsResponse.IsTruncated = partsInfo.IsTruncated listPartsResponse.NextPartNumberMarker = partsInfo.NextPartNumberMarker listPartsResponse.Parts = make([]Part, len(partsInfo.Parts)) for index, part := range partsInfo.Parts { newPart := Part{} newPart.PartNumber = part.PartNumber newPart.ETag = "\"" + part.ETag + "\"" newPart.Size = part.Size newPart.LastModified = part.LastModified.UTC().Format(iso8601TimeFormat) listPartsResponse.Parts[index] = newPart } return listPartsResponse } // generates ListMultipartUploadsResponse for given bucket and ListMultipartsInfo. func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMultipartsInfo, encodingType string) ListMultipartUploadsResponse { listMultipartUploadsResponse := ListMultipartUploadsResponse{} listMultipartUploadsResponse.Bucket = bucket listMultipartUploadsResponse.Delimiter = s3EncodeName(multipartsInfo.Delimiter, encodingType) listMultipartUploadsResponse.IsTruncated = multipartsInfo.IsTruncated listMultipartUploadsResponse.EncodingType = encodingType listMultipartUploadsResponse.Prefix = s3EncodeName(multipartsInfo.Prefix, encodingType) listMultipartUploadsResponse.KeyMarker = s3EncodeName(multipartsInfo.KeyMarker, encodingType) listMultipartUploadsResponse.NextKeyMarker = s3EncodeName(multipartsInfo.NextKeyMarker, encodingType) listMultipartUploadsResponse.MaxUploads = multipartsInfo.MaxUploads listMultipartUploadsResponse.NextUploadIDMarker = multipartsInfo.NextUploadIDMarker listMultipartUploadsResponse.UploadIDMarker = multipartsInfo.UploadIDMarker listMultipartUploadsResponse.CommonPrefixes = make([]CommonPrefix, len(multipartsInfo.CommonPrefixes)) for index, commonPrefix := range multipartsInfo.CommonPrefixes { listMultipartUploadsResponse.CommonPrefixes[index] = CommonPrefix{ Prefix: s3EncodeName(commonPrefix, encodingType), } } listMultipartUploadsResponse.Uploads = make([]Upload, len(multipartsInfo.Uploads)) for index, upload := range multipartsInfo.Uploads { newUpload := Upload{} newUpload.UploadID = upload.UploadID newUpload.Key = s3EncodeName(upload.Object, encodingType) newUpload.Initiated = upload.Initiated.UTC().Format(iso8601TimeFormat) listMultipartUploadsResponse.Uploads[index] = newUpload } return listMultipartUploadsResponse } // generate multi objects delete response. func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, errs []DeleteError) DeleteObjectsResponse { deleteResp := DeleteObjectsResponse{} if !quiet { deleteResp.DeletedObjects = deletedObjects } if len(errs) == len(deletedObjects) { deleteResp.DeletedObjects = nil } deleteResp.Errors = errs return deleteResp } func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { setCommonHeaders(w) if mType != mimeNone { w.Header().Set(xhttp.ContentType, string(mType)) } w.Header().Set(xhttp.ContentLength, strconv.Itoa(len(response))) w.WriteHeader(statusCode) if response != nil { w.Write(response) w.(http.Flusher).Flush() } } // mimeType represents various MIME type used API responses. type mimeType string const ( // Means no response type. mimeNone mimeType = "" // Means response type is JSON. mimeJSON mimeType = "application/json" // Means response type is XML. mimeXML mimeType = "application/xml" ) // writeSuccessResponseJSON writes success headers and response if any, // with content-type set to `application/json`. func writeSuccessResponseJSON(w http.ResponseWriter, response []byte) { writeResponse(w, http.StatusOK, response, mimeJSON) } // writeSuccessResponseXML writes success headers and response if any, // with content-type set to `application/xml`. func writeSuccessResponseXML(w http.ResponseWriter, response []byte) { writeResponse(w, http.StatusOK, response, mimeXML) } // writeSuccessNoContent writes success headers with http status 204 func writeSuccessNoContent(w http.ResponseWriter) { writeResponse(w, http.StatusNoContent, nil, mimeNone) } // writeRedirectSeeOther writes Location header with http status 303 func writeRedirectSeeOther(w http.ResponseWriter, location string) { w.Header().Set(xhttp.Location, location) writeResponse(w, http.StatusSeeOther, nil, mimeNone) } func writeSuccessResponseHeadersOnly(w http.ResponseWriter) { writeResponse(w, http.StatusOK, nil, mimeNone) } // writeErrorRespone writes error headers func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL, browser bool) { switch err.Code { case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum": // Set retry-after header to indicate user-agents to retry request after 120secs. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After w.Header().Set(xhttp.RetryAfter, "120") case "InvalidRegion": err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalServerRegion) case "AuthorizationHeaderMalformed": err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion) case "AccessDenied": // The request is from browser and also if browser // is enabled we need to redirect. if browser && globalBrowserEnabled { w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path) w.WriteHeader(http.StatusTemporaryRedirect) return } } // Generate error response. errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(xhttp.AmzRequestID), globalDeploymentID) encodedErrorResponse := encodeResponse(errorResponse) writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML) } func writeErrorResponseHeadersOnly(w http.ResponseWriter, err APIError) { writeResponse(w, err.HTTPStatusCode, nil, mimeNone) } func writeErrorResponseString(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) { // Generate string error response. writeResponse(w, err.HTTPStatusCode, []byte(err.Description), mimeNone) } // writeErrorResponseJSON - writes error response in JSON format; // useful for admin APIs. func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) { // Generate error response. errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(xhttp.AmzRequestID), globalDeploymentID) encodedErrorResponse := encodeResponseJSON(errorResponse) writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON) } // writeCustomErrorResponseJSON - similar to writeErrorResponseJSON, // but accepts the error message directly (this allows messages to be // dynamically generated.) func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError, errBody string, reqURL *url.URL) { reqInfo := logger.GetReqInfo(ctx) errorResponse := APIErrorResponse{ Code: err.Code, Message: errBody, Resource: reqURL.Path, BucketName: reqInfo.BucketName, Key: reqInfo.ObjectName, RequestID: w.Header().Get(xhttp.AmzRequestID), HostID: globalDeploymentID, } encodedErrorResponse := encodeResponseJSON(errorResponse) writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON) }
cmd/api-response.go
0
https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a
[ 0.0009771394543349743, 0.00019253947539255023, 0.0001605888392077759, 0.00017230059893336147, 0.00009466397750657052 ]
{ "id": 3, "code_window": [ "\t\t\t\tmetaMu.Lock()\n", "\t\t\t\tmeta.error = err.Error()\n", "\t\t\t\tmeta.status = scanStateError\n", "\t\t\t\tmeta, err = o.updateMetacacheListing(meta, rpc)\n", "\t\t\t\tmetaMu.Unlock()\n", "\t\t\t}\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t\tmeta, _ = o.updateMetacacheListing(meta, rpc)\n" ], "file_path": "cmd/metacache-set.go", "type": "replace", "edit_start_line_idx": 774 }
/* * MinIO Cloud Storage, (C) 2019 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package storageclass import ( "encoding/json" "errors" "fmt" "strconv" "strings" "github.com/minio/minio/cmd/config" "github.com/minio/minio/pkg/env" ) // Standard constants for all storage class const ( // Reduced redundancy storage class RRS = "REDUCED_REDUNDANCY" // Standard storage class STANDARD = "STANDARD" // DMA storage class DMA = "DMA" // Valid values are "write" and "read+write" DMAWrite = "write" DMAReadWrite = "read+write" ) // Standard constats for config info storage class const ( ClassStandard = "standard" ClassRRS = "rrs" ClassDMA = "dma" // Reduced redundancy storage class environment variable RRSEnv = "MINIO_STORAGE_CLASS_RRS" // Standard storage class environment variable StandardEnv = "MINIO_STORAGE_CLASS_STANDARD" // DMA storage class environment variable DMAEnv = "MINIO_STORAGE_CLASS_DMA" // Supported storage class scheme is EC schemePrefix = "EC" // Min parity disks minParityDisks = 2 // Default RRS parity is always minimum parity. defaultRRSParity = minParityDisks // Default DMA value defaultDMA = DMAWrite ) // DefaultKVS - default storage class config var ( DefaultKVS = config.KVS{ config.KV{ Key: ClassStandard, Value: "", }, config.KV{ Key: ClassRRS, Value: "EC:2", }, config.KV{ Key: ClassDMA, Value: defaultDMA, }, } ) // StorageClass - holds storage class information type StorageClass struct { Parity int DMA string } // Config storage class configuration type Config struct { Standard StorageClass `json:"standard"` RRS StorageClass `json:"rrs"` DMA StorageClass `json:"dma"` } // UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON. func (sCfg *Config) UnmarshalJSON(data []byte) error { type Alias Config aux := &struct { *Alias }{ Alias: (*Alias)(sCfg), } return json.Unmarshal(data, &aux) } // IsValid - returns true if input string is a valid // storage class kind supported. func IsValid(sc string) bool { return sc == RRS || sc == STANDARD || sc == DMA } // UnmarshalText unmarshals storage class from its textual form into // storageClass structure. func (sc *StorageClass) UnmarshalText(b []byte) error { scStr := string(b) if scStr == "" { return nil } if scStr == DMAWrite { sc.DMA = DMAWrite return nil } if scStr == DMAReadWrite { sc.DMA = DMAReadWrite return nil } s, err := parseStorageClass(scStr) if err != nil { return err } sc.Parity = s.Parity return nil } // MarshalText - marshals storage class string. func (sc *StorageClass) MarshalText() ([]byte, error) { if sc.Parity != 0 { return []byte(fmt.Sprintf("%s:%d", schemePrefix, sc.Parity)), nil } return []byte(sc.DMA), nil } func (sc *StorageClass) String() string { if sc.Parity != 0 { return fmt.Sprintf("%s:%d", schemePrefix, sc.Parity) } return sc.DMA } // Parses given storageClassEnv and returns a storageClass structure. // Supported Storage Class format is "Scheme:Number of parity disks". // Currently only supported scheme is "EC". func parseStorageClass(storageClassEnv string) (sc StorageClass, err error) { s := strings.Split(storageClassEnv, ":") // only two elements allowed in the string - "scheme" and "number of parity disks" if len(s) > 2 { return StorageClass{}, config.ErrStorageClassValue(nil).Msg("Too many sections in " + storageClassEnv) } else if len(s) < 2 { return StorageClass{}, config.ErrStorageClassValue(nil).Msg("Too few sections in " + storageClassEnv) } // only allowed scheme is "EC" if s[0] != schemePrefix { return StorageClass{}, config.ErrStorageClassValue(nil).Msg("Unsupported scheme " + s[0] + ". Supported scheme is EC") } // Number of parity disks should be integer parityDisks, err := strconv.Atoi(s[1]) if err != nil { return StorageClass{}, config.ErrStorageClassValue(err) } return StorageClass{ Parity: parityDisks, }, nil } // Validates the parity disks. func validateParity(ssParity, rrsParity, setDriveCount int) (err error) { if ssParity == 0 && rrsParity == 0 { return nil } // SS parity disks should be greater than or equal to minParityDisks. // Parity below minParityDisks is not supported. if ssParity < minParityDisks { return fmt.Errorf("Standard storage class parity %d should be greater than or equal to %d", ssParity, minParityDisks) } // RRS parity disks should be greater than or equal to minParityDisks. // Parity below minParityDisks is not supported. if rrsParity < minParityDisks { return fmt.Errorf("Reduced redundancy storage class parity %d should be greater than or equal to %d", rrsParity, minParityDisks) } if ssParity > setDriveCount/2 { return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, setDriveCount/2) } if rrsParity > setDriveCount/2 { return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, setDriveCount/2) } if ssParity > 0 && rrsParity > 0 { if ssParity < rrsParity { return fmt.Errorf("Standard storage class parity disks %d should be greater than or equal to Reduced redundancy storage class parity disks %d", ssParity, rrsParity) } } return nil } // GetParityForSC - Returns the data and parity drive count based on storage class // If storage class is set using the env vars MINIO_STORAGE_CLASS_RRS and MINIO_STORAGE_CLASS_STANDARD // or config.json fields // -- corresponding values are returned // If storage class is not set during startup, default values are returned // -- Default for Reduced Redundancy Storage class is, parity = 2 and data = N-Parity // -- Default for Standard Storage class is, parity = N/2, data = N/2 // If storage class is empty // -- standard storage class is assumed and corresponding data and parity is returned func (sCfg Config) GetParityForSC(sc string) (parity int) { switch strings.TrimSpace(sc) { case RRS: // set the rrs parity if available if sCfg.RRS.Parity == 0 { return defaultRRSParity } return sCfg.RRS.Parity default: return sCfg.Standard.Parity } } // GetDMA - returns DMA configuration. func (sCfg Config) GetDMA() string { return sCfg.DMA.DMA } // Enabled returns if etcd is enabled. func Enabled(kvs config.KVS) bool { ssc := kvs.Get(ClassStandard) rrsc := kvs.Get(ClassRRS) return ssc != "" || rrsc != "" } // LookupConfig - lookup storage class config and override with valid environment settings if any. func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) { cfg = Config{} cfg.Standard.Parity = setDriveCount / 2 cfg.RRS.Parity = defaultRRSParity if err = config.CheckValidKeys(config.StorageClassSubSys, kvs, DefaultKVS); err != nil { return Config{}, err } ssc := env.Get(StandardEnv, kvs.Get(ClassStandard)) rrsc := env.Get(RRSEnv, kvs.Get(ClassRRS)) dma := env.Get(DMAEnv, kvs.Get(ClassDMA)) // Check for environment variables and parse into storageClass struct if ssc != "" { cfg.Standard, err = parseStorageClass(ssc) if err != nil { return Config{}, err } } if cfg.Standard.Parity == 0 { cfg.Standard.Parity = setDriveCount / 2 } if rrsc != "" { cfg.RRS, err = parseStorageClass(rrsc) if err != nil { return Config{}, err } } if cfg.RRS.Parity == 0 { cfg.RRS.Parity = defaultRRSParity } if dma == "" { dma = defaultDMA } if dma != DMAReadWrite && dma != DMAWrite { return Config{}, errors.New(`valid dma values are "read-write" and "write"`) } cfg.DMA.DMA = dma // Validation is done after parsing both the storage classes. This is needed because we need one // storage class value to deduce the correct value of the other storage class. if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, setDriveCount); err != nil { return Config{}, err } return cfg, nil }
cmd/config/storageclass/storage-class.go
0
https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a
[ 0.00022693038044963032, 0.00017124747682828456, 0.0001615197688806802, 0.00016872254491318017, 0.00001107995558413677 ]
{ "id": 3, "code_window": [ "\t\t\t\tmetaMu.Lock()\n", "\t\t\t\tmeta.error = err.Error()\n", "\t\t\t\tmeta.status = scanStateError\n", "\t\t\t\tmeta, err = o.updateMetacacheListing(meta, rpc)\n", "\t\t\t\tmetaMu.Unlock()\n", "\t\t\t}\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t\t\t\tmeta, _ = o.updateMetacacheListing(meta, rpc)\n" ], "file_path": "cmd/metacache-set.go", "type": "replace", "edit_start_line_idx": 774 }
/* * Minio Cloud Storage, (C) 2019 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cmd import ( "bufio" "context" "errors" "math/rand" "net/http" "sort" "strconv" "time" "github.com/gorilla/mux" "github.com/minio/minio/pkg/dsync" ) const ( // Lock maintenance interval. lockMaintenanceInterval = 30 * time.Second // Lock validity check interval. lockValidityCheckInterval = 5 * time.Second ) // To abstract a node over network. type lockRESTServer struct { ll *localLocker } func (l *lockRESTServer) writeErrorResponse(w http.ResponseWriter, err error) { w.WriteHeader(http.StatusForbidden) w.Write([]byte(err.Error())) } // IsValid - To authenticate and verify the time difference. func (l *lockRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool { if l.ll == nil { l.writeErrorResponse(w, errLockNotInitialized) return false } if err := storageServerRequestValidate(r); err != nil { l.writeErrorResponse(w, err) return false } return true } func getLockArgs(r *http.Request) (args dsync.LockArgs, err error) { quorum, err := strconv.Atoi(r.URL.Query().Get(lockRESTQuorum)) if err != nil { return args, err } args = dsync.LockArgs{ Owner: r.URL.Query().Get(lockRESTOwner), UID: r.URL.Query().Get(lockRESTUID), Source: r.URL.Query().Get(lockRESTSource), Quorum: quorum, } var resources []string bio := bufio.NewScanner(r.Body) for bio.Scan() { resources = append(resources, bio.Text()) } if err := bio.Err(); err != nil { return args, err } sort.Strings(resources) args.Resources = resources return args, nil } // HealthHandler returns success if request is authenticated. func (l *lockRESTServer) HealthHandler(w http.ResponseWriter, r *http.Request) { l.IsValid(w, r) } // LockHandler - Acquires a lock. func (l *lockRESTServer) LockHandler(w http.ResponseWriter, r *http.Request) { if !l.IsValid(w, r) { l.writeErrorResponse(w, errors.New("Invalid request")) return } args, err := getLockArgs(r) if err != nil { l.writeErrorResponse(w, err) return } success, err := l.ll.Lock(r.Context(), args) if err == nil && !success { err = errLockConflict } if err != nil { l.writeErrorResponse(w, err) return } } // UnlockHandler - releases the acquired lock. func (l *lockRESTServer) UnlockHandler(w http.ResponseWriter, r *http.Request) { if !l.IsValid(w, r) { l.writeErrorResponse(w, errors.New("Invalid request")) return } args, err := getLockArgs(r) if err != nil { l.writeErrorResponse(w, err) return } _, err = l.ll.Unlock(args) // Ignore the Unlock() "reply" return value because if err == nil, "reply" is always true // Consequently, if err != nil, reply is always false if err != nil { l.writeErrorResponse(w, err) return } } // LockHandler - Acquires an RLock. func (l *lockRESTServer) RLockHandler(w http.ResponseWriter, r *http.Request) { if !l.IsValid(w, r) { l.writeErrorResponse(w, errors.New("Invalid request")) return } args, err := getLockArgs(r) if err != nil { l.writeErrorResponse(w, err) return } success, err := l.ll.RLock(r.Context(), args) if err == nil && !success { err = errLockConflict } if err != nil { l.writeErrorResponse(w, err) return } } // RUnlockHandler - releases the acquired read lock. func (l *lockRESTServer) RUnlockHandler(w http.ResponseWriter, r *http.Request) { if !l.IsValid(w, r) { l.writeErrorResponse(w, errors.New("Invalid request")) return } args, err := getLockArgs(r) if err != nil { l.writeErrorResponse(w, err) return } // Ignore the RUnlock() "reply" return value because if err == nil, "reply" is always true. // Consequently, if err != nil, reply is always false if _, err = l.ll.RUnlock(args); err != nil { l.writeErrorResponse(w, err) return } } // ExpiredHandler - query expired lock status. func (l *lockRESTServer) ExpiredHandler(w http.ResponseWriter, r *http.Request) { if !l.IsValid(w, r) { l.writeErrorResponse(w, errors.New("Invalid request")) return } args, err := getLockArgs(r) if err != nil { l.writeErrorResponse(w, err) return } expired, err := l.ll.Expired(r.Context(), args) if err != nil { l.writeErrorResponse(w, err) return } if !expired { l.writeErrorResponse(w, errLockNotExpired) return } } // nameLockRequesterInfoPair is a helper type for lock maintenance type nameLockRequesterInfoPair struct { name string lri lockRequesterInfo } // getLongLivedLocks returns locks that are older than a certain time and // have not been 'checked' for validity too soon enough func getLongLivedLocks(interval time.Duration) []nameLockRequesterInfoPair { nlrip := []nameLockRequesterInfoPair{} globalLockServer.mutex.Lock() for name, lriArray := range globalLockServer.lockMap { for idx := range lriArray { // Check whether enough time has gone by since last check if time.Since(lriArray[idx].TimeLastCheck) >= interval { nlrip = append(nlrip, nameLockRequesterInfoPair{ name: name, lri: lriArray[idx], }) lriArray[idx].TimeLastCheck = UTCNow() } } } globalLockServer.mutex.Unlock() return nlrip } // lockMaintenance loops over locks that have been active for some time and checks back // with the original server whether it is still alive or not // // Following logic inside ignores the errors generated for Dsync.Active operation. // - server at client down // - some network error (and server is up normally) // // We will ignore the error, and we will retry later to get a resolve on this lock func lockMaintenance(ctx context.Context, interval time.Duration) error { objAPI := newObjectLayerFn() if objAPI == nil { return nil } z, ok := objAPI.(*erasureServerPools) if !ok { return nil } type nlock struct { locks int writer bool } updateNlocks := func(nlripsMap map[string]nlock, name string, writer bool) { nlk, ok := nlripsMap[name] if !ok { nlripsMap[name] = nlock{ locks: 1, writer: writer, } } else { nlk.locks++ nlripsMap[name] = nlk } } // Validate if long lived locks are indeed clean. // Get list of long lived locks to check for staleness. nlrips := getLongLivedLocks(interval) nlripsMap := make(map[string]nlock, len(nlrips)) for _, nlrip := range nlrips { for _, c := range z.GetAllLockers() { ctx, cancel := context.WithTimeout(GlobalContext, 5*time.Second) // Call back to original server verify whether the lock is // still active (based on name & uid) expired, err := c.Expired(ctx, dsync.LockArgs{ Owner: nlrip.lri.Owner, UID: nlrip.lri.UID, Resources: []string{nlrip.name}, }) cancel() if err != nil { updateNlocks(nlripsMap, nlrip.name, nlrip.lri.Writer) continue } if !expired { updateNlocks(nlripsMap, nlrip.name, nlrip.lri.Writer) } } // less than the quorum, we have locks expired. if nlripsMap[nlrip.name].locks < nlrip.lri.Quorum { // Purge the stale entry if it exists. globalLockServer.removeEntryIfExists(nlrip) } } return nil } // Start lock maintenance from all lock servers. func startLockMaintenance(ctx context.Context) { // Wait until the object API is ready // no need to start the lock maintenance // if ObjectAPI is not initialized. for { objAPI := newObjectLayerFn() if objAPI == nil { time.Sleep(time.Second) continue } break } // Initialize a new ticker with a minute between each ticks. ticker := time.NewTicker(lockMaintenanceInterval) // Stop the timer upon service closure and cleanup the go-routine. defer ticker.Stop() r := rand.New(rand.NewSource(UTCNow().UnixNano())) for { // Verifies every minute for locks held more than 2 minutes. select { case <-ctx.Done(): return case <-ticker.C: // Start with random sleep time, so as to avoid // "synchronous checks" between servers duration := time.Duration(r.Float64() * float64(lockMaintenanceInterval)) time.Sleep(duration) if err := lockMaintenance(ctx, lockValidityCheckInterval); err != nil { // Sleep right after an error. duration := time.Duration(r.Float64() * float64(lockMaintenanceInterval)) time.Sleep(duration) } } } } // registerLockRESTHandlers - register lock rest router. func registerLockRESTHandlers(router *mux.Router) { lockServer := &lockRESTServer{ ll: newLocker(), } subrouter := router.PathPrefix(lockRESTPrefix).Subrouter() subrouter.Methods(http.MethodPost).Path(lockRESTVersionPrefix + lockRESTMethodHealth).HandlerFunc(httpTraceHdrs(lockServer.HealthHandler)) subrouter.Methods(http.MethodPost).Path(lockRESTVersionPrefix + lockRESTMethodLock).HandlerFunc(httpTraceHdrs(lockServer.LockHandler)) subrouter.Methods(http.MethodPost).Path(lockRESTVersionPrefix + lockRESTMethodRLock).HandlerFunc(httpTraceHdrs(lockServer.RLockHandler)) subrouter.Methods(http.MethodPost).Path(lockRESTVersionPrefix + lockRESTMethodUnlock).HandlerFunc(httpTraceHdrs(lockServer.UnlockHandler)) subrouter.Methods(http.MethodPost).Path(lockRESTVersionPrefix + lockRESTMethodRUnlock).HandlerFunc(httpTraceHdrs(lockServer.RUnlockHandler)) subrouter.Methods(http.MethodPost).Path(lockRESTVersionPrefix + lockRESTMethodExpired).HandlerFunc(httpTraceAll(lockServer.ExpiredHandler)) globalLockServer = lockServer.ll go startLockMaintenance(GlobalContext) }
cmd/lock-rest-server.go
0
https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a
[ 0.0009473938262090087, 0.00027055703685618937, 0.0001623003336135298, 0.00018178379104938358, 0.00019193673506379128 ]