hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 4,
"code_window": [
"\tif offset == 0 && globalStorageClass.GetDMA() == storageclass.DMAReadWrite {\n",
"\t\tfile, err = disk.OpenFileDirectIO(filePath, os.O_RDONLY, 0666)\n",
"\t} else {\n",
"\t\t// Open the fileile fileor reading.\n",
"\t\tfile, err = os.Open(filePath)\n",
"\t}\n",
"\tif err != nil {\n",
"\t\tswitch {\n",
"\t\tcase osIsNotExist(err):\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// Open the file for reading.\n"
],
"file_path": "cmd/xl-storage.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"encoding/gob"
"encoding/json"
"errors"
"fmt"
"io"
"strconv"
"strings"
"sync"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/color"
"github.com/minio/minio/pkg/console"
"github.com/minio/minio/pkg/hash"
)
type listPathOptions struct {
// ID of the listing.
// This will be used to persist the list.
ID string
// Bucket of the listing.
Bucket string
// Directory inside the bucket.
BaseDir string
// Scan/return only content with prefix.
Prefix string
// FilterPrefix will return only results with this prefix when scanning.
// Should never contain a slash.
// Prefix should still be set.
FilterPrefix string
// Marker to resume listing.
// The response will be the first entry AFTER this object name.
Marker string
// Limit the number of results.
Limit int
// The number of disks to ask. Special values:
// 0 uses default number of disks.
// -1 use at least 50% of disks or at least the default number.
AskDisks int
// InclDeleted will keep all entries where latest version is a delete marker.
InclDeleted bool
// Scan recursively.
// If false only main directory will be scanned.
// Should always be true if Separator is n SlashSeparator.
Recursive bool
// Separator to use.
Separator string
// Create indicates that the lister should not attempt to load an existing cache.
Create bool
// CurrentCycle indicates the current bloom cycle.
// Will be used if a new scan is started.
CurrentCycle uint64
// OldestCycle indicates the oldest cycle acceptable.
OldestCycle uint64
// Include pure directories.
IncludeDirectories bool
// Transient is set if the cache is transient due to an error or being a reserved bucket.
// This means the cache metadata will not be persisted on disk.
// A transient result will never be returned from the cache so knowing the list id is required.
Transient bool
// discardResult will not persist the cache to storage.
// When the initial results are returned listing will be canceled.
discardResult bool
}
func init() {
gob.Register(listPathOptions{})
}
// newMetacache constructs a new metacache from the options.
func (o listPathOptions) newMetacache() metacache {
return metacache{
id: o.ID,
bucket: o.Bucket,
root: o.BaseDir,
recursive: o.Recursive,
status: scanStateStarted,
error: "",
started: UTCNow(),
lastHandout: UTCNow(),
lastUpdate: UTCNow(),
ended: time.Time{},
startedCycle: o.CurrentCycle,
endedCycle: 0,
dataVersion: metacacheStreamVersion,
filter: o.FilterPrefix,
}
}
func (o *listPathOptions) debugf(format string, data ...interface{}) {
if serverDebugLog {
console.Debugf(format+"\n", data...)
}
}
func (o *listPathOptions) debugln(data ...interface{}) {
if serverDebugLog {
console.Debugln(data...)
}
}
// gatherResults will collect all results on the input channel and filter results according to the options.
// Caller should close the channel when done.
// The returned function will return the results once there is enough or input is closed.
func (o *listPathOptions) gatherResults(in <-chan metaCacheEntry) func() (metaCacheEntriesSorted, error) {
var resultsDone = make(chan metaCacheEntriesSorted)
// Copy so we can mutate
resCh := resultsDone
resErr := io.EOF
go func() {
var results metaCacheEntriesSorted
for entry := range in {
if resCh == nil {
// past limit
continue
}
if !o.IncludeDirectories && entry.isDir() {
continue
}
o.debugln("gather got:", entry.name)
if o.Marker != "" && entry.name <= o.Marker {
o.debugln("pre marker")
continue
}
if !strings.HasPrefix(entry.name, o.Prefix) {
o.debugln("not in prefix")
continue
}
if !o.Recursive && !entry.isInDir(o.Prefix, o.Separator) {
o.debugln("not in dir", o.Prefix, o.Separator)
continue
}
if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() {
o.debugln("latest is delete marker")
continue
}
if o.Limit > 0 && results.len() >= o.Limit {
// We have enough and we have more.
// Do not return io.EOF
if resCh != nil {
resErr = nil
resCh <- results
resCh = nil
}
continue
}
o.debugln("adding...")
results.o = append(results.o, entry)
}
if resCh != nil {
resErr = io.EOF
resCh <- results
}
}()
return func() (metaCacheEntriesSorted, error) {
return <-resultsDone, resErr
}
}
// findFirstPart will find the part with 0 being the first that corresponds to the marker in the options.
// io.ErrUnexpectedEOF is returned if the place containing the marker hasn't been scanned yet.
// io.EOF indicates the marker is beyond the end of the stream and does not exist.
func (o *listPathOptions) findFirstPart(fi FileInfo) (int, error) {
search := o.Marker
if search == "" {
search = o.Prefix
}
if search == "" {
return 0, nil
}
o.debugln("searching for ", search)
var tmp metacacheBlock
var json = jsoniter.ConfigCompatibleWithStandardLibrary
i := 0
for {
partKey := fmt.Sprintf("%s-metacache-part-%d", ReservedMetadataPrefixLower, i)
v, ok := fi.Metadata[partKey]
if !ok {
o.debugln("no match in metadata, waiting")
return -1, io.ErrUnexpectedEOF
}
err := json.Unmarshal([]byte(v), &tmp)
if !ok {
logger.LogIf(context.Background(), err)
return -1, err
}
if tmp.First == "" && tmp.Last == "" && tmp.EOS {
return 0, errFileNotFound
}
if tmp.First >= search {
o.debugln("First >= search", v)
return i, nil
}
if tmp.Last >= search {
o.debugln("Last >= search", v)
return i, nil
}
if tmp.EOS {
o.debugln("no match, at EOS", v)
return -3, io.EOF
}
o.debugln("First ", tmp.First, "<", search, " search", i)
i++
}
}
// updateMetacacheListing will update the metacache listing.
func (o *listPathOptions) updateMetacacheListing(m metacache, rpc *peerRESTClient) (metacache, error) {
if o.Transient {
return localMetacacheMgr.getTransient().updateCacheEntry(m)
}
if rpc == nil {
return localMetacacheMgr.updateCacheEntry(m)
}
return rpc.UpdateMetacacheListing(context.Background(), m)
}
func getMetacacheBlockInfo(fi FileInfo, block int) (*metacacheBlock, error) {
var tmp metacacheBlock
partKey := fmt.Sprintf("%s-metacache-part-%d", ReservedMetadataPrefixLower, block)
v, ok := fi.Metadata[partKey]
if !ok {
return nil, io.ErrUnexpectedEOF
}
return &tmp, json.Unmarshal([]byte(v), &tmp)
}
const metacachePrefix = ".metacache"
func metacachePrefixForID(bucket, id string) string {
return pathJoin(bucketMetaPrefix, bucket, metacachePrefix, id)
}
// objectPath returns the object path of the cache.
func (o *listPathOptions) objectPath(block int) string {
return pathJoin(metacachePrefixForID(o.Bucket, o.ID), "block-"+strconv.Itoa(block)+".s2")
}
func (o *listPathOptions) SetFilter() {
switch {
case metacacheSharePrefix:
return
case o.CurrentCycle != o.OldestCycle:
// We have a clean bloom filter
return
case o.Prefix == o.BaseDir:
// No additional prefix
return
}
// Remove basedir.
o.FilterPrefix = strings.TrimPrefix(o.Prefix, o.BaseDir)
// Remove leading and trailing slashes.
o.FilterPrefix = strings.Trim(o.FilterPrefix, slashSeparator)
if strings.Contains(o.FilterPrefix, slashSeparator) {
// Sanity check, should not happen.
o.FilterPrefix = ""
}
}
// filter will apply the options and return the number of objects requested by the limit.
// Will return io.EOF if there are no more entries with the same filter.
// The last entry can be used as a marker to resume the listing.
func (r *metacacheReader) filter(o listPathOptions) (entries metaCacheEntriesSorted, err error) {
// Forward to prefix, if any
err = r.forwardTo(o.Prefix)
if err != nil {
return entries, err
}
if o.Marker != "" {
err = r.forwardTo(o.Marker)
if err != nil {
return entries, err
}
next, err := r.peek()
if err != nil {
return entries, err
}
if next.name == o.Marker {
err := r.skip(1)
if err != nil {
return entries, err
}
}
}
o.debugln("forwarded to ", o.Prefix, "marker:", o.Marker, "sep:", o.Separator)
// Filter
if !o.Recursive {
entries.o = make(metaCacheEntries, 0, o.Limit)
pastPrefix := false
err := r.readFn(func(entry metaCacheEntry) bool {
if o.Prefix != "" && !strings.HasPrefix(entry.name, o.Prefix) {
// We are past the prefix, don't continue.
pastPrefix = true
return false
}
if !o.IncludeDirectories && entry.isDir() {
return true
}
if !entry.isInDir(o.Prefix, o.Separator) {
return true
}
if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() {
return entries.len() < o.Limit
}
entries.o = append(entries.o, entry)
return entries.len() < o.Limit
})
if (err != nil && err.Error() == io.EOF.Error()) || pastPrefix || r.nextEOF() {
return entries, io.EOF
}
return entries, err
}
// We should not need to filter more.
return r.readN(o.Limit, o.InclDeleted, o.IncludeDirectories, o.Prefix)
}
func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOptions) (entries metaCacheEntriesSorted, err error) {
retries := 0
rpc := globalNotificationSys.restClientFromHash(o.Bucket)
for {
select {
case <-ctx.Done():
return entries, ctx.Err()
default:
}
// If many failures, check the cache state.
if retries > 10 {
err := o.checkMetacacheState(ctx, rpc)
if err != nil {
return entries, fmt.Errorf("remote listing canceled: %w", err)
}
retries = 1
}
const retryDelay = 500 * time.Millisecond
// Load first part metadata...
// All operations are performed without locks, so we must be careful and allow for failures.
// Read metadata associated with the object from a disk.
if retries > 0 {
disks := er.getOnlineDisks()
if len(disks) == 0 {
time.Sleep(retryDelay)
retries++
continue
}
_, err := disks[0].ReadVersion(ctx, minioMetaBucket, o.objectPath(0), "", false)
if err != nil {
time.Sleep(retryDelay)
retries++
continue
}
}
// Read metadata associated with the object from all disks.
fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, minioMetaBucket, o.objectPath(0), ObjectOptions{}, true)
if err != nil {
switch toObjectErr(err, minioMetaBucket, o.objectPath(0)).(type) {
case ObjectNotFound:
retries++
time.Sleep(retryDelay)
continue
case InsufficientReadQuorum:
retries++
time.Sleep(retryDelay)
continue
default:
return entries, fmt.Errorf("reading first part metadata: %w", err)
}
}
partN, err := o.findFirstPart(fi)
switch {
case err == nil:
case errors.Is(err, io.ErrUnexpectedEOF):
if retries == 10 {
err := o.checkMetacacheState(ctx, rpc)
if err != nil {
return entries, fmt.Errorf("remote listing canceled: %w", err)
}
retries = -1
}
retries++
time.Sleep(retryDelay)
continue
case errors.Is(err, io.EOF):
return entries, io.EOF
}
// We got a stream to start at.
loadedPart := 0
buf := bufferPool.Get().(*bytes.Buffer)
defer func() {
buf.Reset()
bufferPool.Put(buf)
}()
for {
select {
case <-ctx.Done():
return entries, ctx.Err()
default:
}
if partN != loadedPart {
if retries > 10 {
err := o.checkMetacacheState(ctx, rpc)
if err != nil {
return entries, fmt.Errorf("waiting for next part %d: %w", partN, err)
}
retries = 1
}
if retries > 0 {
// Load from one disk only
disks := er.getOnlineDisks()
if len(disks) == 0 {
time.Sleep(retryDelay)
retries++
continue
}
_, err := disks[0].ReadVersion(ctx, minioMetaBucket, o.objectPath(partN), "", false)
if err != nil {
time.Sleep(retryDelay)
retries++
continue
}
}
// Load first part metadata...
fi, metaArr, onlineDisks, err = er.getObjectFileInfo(ctx, minioMetaBucket, o.objectPath(partN), ObjectOptions{}, true)
if err != nil {
time.Sleep(retryDelay)
retries++
continue
}
loadedPart = partN
bi, err := getMetacacheBlockInfo(fi, partN)
logger.LogIf(ctx, err)
if err == nil {
if bi.pastPrefix(o.Prefix) {
return entries, io.EOF
}
}
}
buf.Reset()
err := er.getObjectWithFileInfo(ctx, minioMetaBucket, o.objectPath(partN), 0, fi.Size, buf, fi, metaArr, onlineDisks)
if err != nil {
switch toObjectErr(err, minioMetaBucket, o.objectPath(partN)).(type) {
case ObjectNotFound:
retries++
time.Sleep(retryDelay)
continue
case InsufficientReadQuorum:
retries++
time.Sleep(retryDelay)
continue
default:
logger.LogIf(ctx, err)
return entries, err
}
}
tmp, err := newMetacacheReader(buf)
if err != nil {
return entries, err
}
e, err := tmp.filter(o)
entries.o = append(entries.o, e.o...)
if o.Limit > 0 && entries.len() > o.Limit {
entries.truncate(o.Limit)
return entries, nil
}
if err == nil {
// We stopped within the listing, we are done for now...
return entries, nil
}
if !errors.Is(err, io.EOF) {
logger.LogIf(ctx, err)
return entries, err
}
// We finished at the end of the block.
// And should not expect any more results.
bi, err := getMetacacheBlockInfo(fi, partN)
logger.LogIf(ctx, err)
if err != nil || bi.EOS {
// We are done and there are no more parts.
return entries, io.EOF
}
if bi.endedPrefix(o.Prefix) {
// Nothing more for prefix.
return entries, io.EOF
}
partN++
retries = 0
}
}
}
func (er erasureObjects) SetDriveCount() int {
return er.setDriveCount
}
// Will return io.EOF if continuing would not yield more results.
func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entries metaCacheEntriesSorted, err error) {
o.debugf(color.Green("listPath:")+" with options: %#v", o)
// See if we have the listing stored.
if !o.Create && !o.discardResult {
entries, err := er.streamMetadataParts(ctx, o)
if IsErr(err, []error{
nil,
context.Canceled,
context.DeadlineExceeded,
}...) {
// Expected good errors we don't need to return error.
return entries, nil
}
if !errors.Is(err, io.EOF) { // io.EOF is expected and should be returned but no need to log it.
// Log an return errors on unexpected errors.
logger.LogIf(ctx, err)
}
return entries, err
}
meta := o.newMetacache()
rpc := globalNotificationSys.restClientFromHash(o.Bucket)
var metaMu sync.Mutex
o.debugln(color.Green("listPath:")+" scanning bucket:", o.Bucket, "basedir:", o.BaseDir, "prefix:", o.Prefix, "marker:", o.Marker)
// Disconnect from call above, but cancel on exit.
ctx, cancel := context.WithCancel(GlobalContext)
// We need to ask disks.
disks := er.getOnlineDisks()
defer func() {
o.debugln(color.Green("listPath:")+" returning:", entries.len(), "err:", err)
if err != nil && !errors.Is(err, io.EOF) {
go func(err string) {
metaMu.Lock()
if meta.status != scanStateError {
meta.error = err
meta.status = scanStateError
}
meta, _ = o.updateMetacacheListing(meta, rpc)
metaMu.Unlock()
}(err.Error())
cancel()
}
}()
askDisks := o.AskDisks
listingQuorum := askDisks - 1
// Special case: ask all disks if the drive count is 4
if askDisks == -1 || er.SetDriveCount() == 4 {
askDisks = len(disks) // with 'strict' quorum list on all online disks.
listingQuorum = getReadQuorum(er.SetDriveCount())
}
if len(disks) < askDisks {
err = InsufficientReadQuorum{}
logger.LogIf(ctx, fmt.Errorf("listPath: Insufficient disks, %d of %d needed are available", len(disks), askDisks))
cancel()
return
}
// Select askDisks random disks.
if len(disks) > askDisks {
disks = disks[:askDisks]
}
// Create output for our results.
var cacheCh chan metaCacheEntry
if !o.discardResult {
cacheCh = make(chan metaCacheEntry, metacacheBlockSize)
}
// Create filter for results.
filterCh := make(chan metaCacheEntry, 100)
filteredResults := o.gatherResults(filterCh)
closeChannels := func() {
if !o.discardResult {
close(cacheCh)
}
close(filterCh)
}
// Cancel listing on return if non-saved list.
if o.discardResult {
defer cancel()
}
go func() {
defer cancel()
// Save continuous updates
go func() {
var err error
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
var exit bool
for !exit {
select {
case <-ticker.C:
case <-ctx.Done():
exit = true
}
metaMu.Lock()
meta.endedCycle = intDataUpdateTracker.current()
meta, err = o.updateMetacacheListing(meta, rpc)
if meta.status == scanStateError {
cancel()
exit = true
}
metaMu.Unlock()
logger.LogIf(ctx, err)
}
}()
const retryDelay = 200 * time.Millisecond
const maxTries = 5
var bw *metacacheBlockWriter
// Don't save single object listings.
if !o.discardResult {
// Write results to disk.
bw = newMetacacheBlockWriter(cacheCh, func(b *metacacheBlock) error {
// if the block is 0 bytes and its a first block skip it.
// skip only this for Transient caches.
if len(b.data) == 0 && b.n == 0 && o.Transient {
return nil
}
o.debugln(color.Green("listPath:")+" saving block", b.n, "to", o.objectPath(b.n))
r, err := hash.NewReader(bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data)), false)
logger.LogIf(ctx, err)
custom := b.headerKV()
_, err = er.putObject(ctx, minioMetaBucket, o.objectPath(b.n), NewPutObjReader(r, nil, nil), ObjectOptions{
UserDefined: custom,
NoLock: true, // No need to hold namespace lock, each prefix caches uniquely.
})
if err != nil {
metaMu.Lock()
if meta.error != "" {
meta.status = scanStateError
meta.error = err.Error()
}
metaMu.Unlock()
cancel()
return err
}
if b.n == 0 {
return nil
}
// Update block 0 metadata.
var retries int
for {
err := er.updateObjectMeta(ctx, minioMetaBucket, o.objectPath(0), b.headerKV(), ObjectOptions{})
if err == nil {
break
}
switch err.(type) {
case ObjectNotFound:
return err
case InsufficientReadQuorum:
default:
logger.LogIf(ctx, err)
}
if retries >= maxTries {
return err
}
retries++
time.Sleep(retryDelay)
}
return nil
})
}
// How to resolve results.
resolver := metadataResolutionParams{
dirQuorum: listingQuorum,
objQuorum: listingQuorum,
bucket: o.Bucket,
}
err := listPathRaw(ctx, listPathRawOptions{
disks: disks,
bucket: o.Bucket,
path: o.BaseDir,
recursive: o.Recursive,
filterPrefix: o.FilterPrefix,
minDisks: listingQuorum,
agreed: func(entry metaCacheEntry) {
if !o.discardResult {
cacheCh <- entry
}
filterCh <- entry
},
partial: func(entries metaCacheEntries, nAgreed int, errs []error) {
// Results Disagree :-(
entry, ok := entries.resolve(&resolver)
if ok {
if !o.discardResult {
cacheCh <- *entry
}
filterCh <- *entry
}
},
})
metaMu.Lock()
if err != nil {
meta.status = scanStateError
meta.error = err.Error()
}
// Save success
if meta.error == "" {
meta.status = scanStateSuccess
meta.endedCycle = intDataUpdateTracker.current()
}
meta, _ = o.updateMetacacheListing(meta, rpc)
metaMu.Unlock()
closeChannels()
if !o.discardResult {
if err := bw.Close(); err != nil {
metaMu.Lock()
meta.error = err.Error()
meta.status = scanStateError
meta, err = o.updateMetacacheListing(meta, rpc)
metaMu.Unlock()
}
}
}()
return filteredResults()
}
type listPathRawOptions struct {
disks []StorageAPI
bucket, path string
recursive bool
filterPrefix string
// Minimum number of good disks to continue.
// An error will be returned if this many disks returned an error.
minDisks int
reportNotFound bool
// Callbacks with results:
// If set to nil, it will not be called.
// agreed is called if all disks agreed.
agreed func(entry metaCacheEntry)
// partial will be returned when there is disagreement between disks.
// if disk did not return any result, but also haven't errored
// the entry will be empty and errs will
partial func(entries metaCacheEntries, nAgreed int, errs []error)
// finished will be called when all streams have finished and
// more than one disk returned an error.
// Will not be called if everything operates as expected.
finished func(errs []error)
}
// listPathRaw will list a path on the provided drives.
// See listPathRawOptions on how results are delivered.
// Directories are always returned.
// Cache will be bypassed.
// Context cancellation will be respected but may take a while to effectuate.
func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
disks := opts.disks
if len(disks) == 0 {
return fmt.Errorf("listPathRaw: 0 drives provided")
}
// Disconnect from call above, but cancel on exit.
ctx, cancel := context.WithCancel(GlobalContext)
defer cancel()
askDisks := len(disks)
readers := make([]*metacacheReader, askDisks)
for i := range disks {
r, w := io.Pipe()
d := disks[i]
readers[i], err = newMetacacheReader(r)
if err != nil {
return err
}
// Send request to each disk.
go func() {
werr := d.WalkDir(ctx, WalkDirOptions{
Bucket: opts.bucket,
BaseDir: opts.path,
Recursive: opts.recursive,
ReportNotFound: opts.reportNotFound,
FilterPrefix: opts.filterPrefix}, w)
w.CloseWithError(werr)
if werr != io.EOF && werr != nil && werr.Error() != errFileNotFound.Error() && werr.Error() != errVolumeNotFound.Error() {
logger.LogIf(ctx, werr)
}
}()
}
topEntries := make(metaCacheEntries, len(readers))
errs := make([]error, len(readers))
for {
// Get the top entry from each
var current metaCacheEntry
var atEOF, fnf, hasErr, agree int
for i := range topEntries {
topEntries[i] = metaCacheEntry{}
}
select {
case <-ctx.Done():
return ctx.Err()
default:
}
for i, r := range readers {
if errs[i] != nil {
hasErr++
continue
}
entry, err := r.peek()
switch err {
case io.EOF:
atEOF++
continue
case nil:
default:
if err.Error() == errFileNotFound.Error() {
atEOF++
fnf++
continue
}
if err.Error() == errVolumeNotFound.Error() {
atEOF++
fnf++
continue
}
hasErr++
errs[i] = err
continue
}
// If no current, add it.
if current.name == "" {
topEntries[i] = entry
current = entry
agree++
continue
}
// If exact match, we agree.
if current.matches(&entry, opts.bucket) {
topEntries[i] = entry
agree++
continue
}
// If only the name matches we didn't agree, but add it for resolution.
if entry.name == current.name {
topEntries[i] = entry
continue
}
// We got different entries
if entry.name > current.name {
continue
}
// We got a new, better current.
// Clear existing entries.
for i := range topEntries[:i] {
topEntries[i] = metaCacheEntry{}
}
agree = 1
current = entry
topEntries[i] = entry
}
// Stop if we exceed number of bad disks
if hasErr > len(disks)-opts.minDisks && hasErr > 0 {
if opts.finished != nil {
opts.finished(errs)
}
var combinedErr []string
for i, err := range errs {
if err != nil {
combinedErr = append(combinedErr, fmt.Sprintf("disk %d returned: %s", i, err))
}
}
return errors.New(strings.Join(combinedErr, ", "))
}
// Break if all at EOF or error.
if atEOF+hasErr == len(readers) {
if hasErr > 0 && opts.finished != nil {
opts.finished(errs)
}
break
}
if fnf == len(readers) {
return errFileNotFound
}
if agree == len(readers) {
// Everybody agreed
for _, r := range readers {
r.skip(1)
}
if opts.agreed != nil {
opts.agreed(current)
}
continue
}
if opts.partial != nil {
opts.partial(topEntries, agree, errs)
}
// Skip the inputs we used.
for i, r := range readers {
if topEntries[i].name != "" {
r.skip(1)
}
}
}
return nil
}
| cmd/metacache-set.go | 1 | https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a | [
0.0006970961112529039,
0.00018359938985668123,
0.00015575169527437538,
0.0001701335422694683,
0.00006409100751625374
] |
{
"id": 4,
"code_window": [
"\tif offset == 0 && globalStorageClass.GetDMA() == storageclass.DMAReadWrite {\n",
"\t\tfile, err = disk.OpenFileDirectIO(filePath, os.O_RDONLY, 0666)\n",
"\t} else {\n",
"\t\t// Open the fileile fileor reading.\n",
"\t\tfile, err = os.Open(filePath)\n",
"\t}\n",
"\tif err != nil {\n",
"\t\tswitch {\n",
"\t\tcase osIsNotExist(err):\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// Open the file for reading.\n"
],
"file_path": "cmd/xl-storage.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"strings"
"testing"
"github.com/minio/cli"
)
// Test RegisterGatewayCommand
func TestRegisterGatewayCommand(t *testing.T) {
var err error
cmd := cli.Command{Name: "test"}
err = RegisterGatewayCommand(cmd)
if err != nil {
t.Errorf("RegisterGatewayCommand got unexpected error: %s", err)
}
}
// Test running a registered gateway command with a flag
func TestRunRegisteredGatewayCommand(t *testing.T) {
var err error
flagName := "test-flag"
flagValue := "foo"
cmd := cli.Command{
Name: "test-run-with-flag",
Flags: []cli.Flag{
cli.StringFlag{Name: flagName},
},
Action: func(ctx *cli.Context) {
if actual := ctx.String(flagName); actual != flagValue {
t.Errorf("value of %s expects %s, but got %s", flagName, flagValue, actual)
}
},
}
err = RegisterGatewayCommand(cmd)
if err != nil {
t.Errorf("RegisterGatewayCommand got unexpected error: %s", err)
}
if err = newApp("minio").Run(
[]string{"minio", "gateway", cmd.Name, fmt.Sprintf("--%s", flagName), flagValue}); err != nil {
t.Errorf("running registered gateway command got unexpected error: %s", err)
}
}
// Test parseGatewayEndpoint
func TestParseGatewayEndpoint(t *testing.T) {
testCases := []struct {
arg string
endPoint string
secure bool
errReturned bool
}{
{"http://127.0.0.1:9000", "127.0.0.1:9000", false, false},
{"https://127.0.0.1:9000", "127.0.0.1:9000", true, false},
{"http://play.min.io:9000", "play.min.io:9000", false, false},
{"https://play.min.io:9000", "play.min.io:9000", true, false},
{"ftp://127.0.0.1:9000", "", false, true},
{"ftp://play.min.io:9000", "", false, true},
{"play.min.io:9000", "play.min.io:9000", true, false},
}
for i, test := range testCases {
endPoint, secure, err := ParseGatewayEndpoint(test.arg)
errReturned := err != nil
if endPoint != test.endPoint ||
secure != test.secure ||
errReturned != test.errReturned {
t.Errorf("Test %d: expected %s,%t,%t got %s,%t,%t",
i+1, test.endPoint, test.secure, test.errReturned,
endPoint, secure, errReturned)
}
}
}
// Test validateGatewayArguments
func TestValidateGatewayArguments(t *testing.T) {
nonLoopBackIPs := localIP4.FuncMatch(func(ip string, matchString string) bool {
return !strings.HasPrefix(ip, "127.")
}, "")
if len(nonLoopBackIPs) == 0 {
t.Fatalf("No non-loop back IP address found for this host")
}
nonLoopBackIP := nonLoopBackIPs.ToSlice()[0]
testCases := []struct {
serverAddr string
endpointAddr string
valid bool
}{
{":9000", "http://localhost:9001", true},
{":9000", "http://google.com", true},
{"123.123.123.123:9000", "http://localhost:9000", false},
{":9000", "http://localhost:9000", false},
{":9000", nonLoopBackIP + ":9000", false},
}
for i, test := range testCases {
err := ValidateGatewayArguments(test.serverAddr, test.endpointAddr)
if test.valid && err != nil {
t.Errorf("Test %d expected not to return error but got %s", i+1, err)
}
if !test.valid && err == nil {
t.Errorf("Test %d expected to fail but it did not", i+1)
}
}
}
| cmd/gateway-main_test.go | 0 | https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a | [
0.0007553440518677235,
0.00021476540132425725,
0.00016476446762681007,
0.00017027067951858044,
0.00015609462570864707
] |
{
"id": 4,
"code_window": [
"\tif offset == 0 && globalStorageClass.GetDMA() == storageclass.DMAReadWrite {\n",
"\t\tfile, err = disk.OpenFileDirectIO(filePath, os.O_RDONLY, 0666)\n",
"\t} else {\n",
"\t\t// Open the fileile fileor reading.\n",
"\t\tfile, err = os.Open(filePath)\n",
"\t}\n",
"\tif err != nil {\n",
"\t\tswitch {\n",
"\t\tcase osIsNotExist(err):\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// Open the file for reading.\n"
],
"file_path": "cmd/xl-storage.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"encoding/hex"
"encoding/json"
"testing"
"github.com/dustin/go-humanize"
jsoniter "github.com/json-iterator/go"
)
func TestIsXLMetaFormatValid(t *testing.T) {
tests := []struct {
name int
version string
format string
want bool
}{
{1, "123", "fs", false},
{2, "123", xlMetaFormat, false},
{3, xlMetaVersion100, "test", false},
{4, xlMetaVersion101, "hello", false},
{5, xlMetaVersion100, xlMetaFormat, true},
{6, xlMetaVersion101, xlMetaFormat, true},
}
for _, tt := range tests {
if got := isXLMetaFormatValid(tt.version, tt.format); got != tt.want {
t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want)
}
}
}
func TestIsXLMetaErasureInfoValid(t *testing.T) {
tests := []struct {
name int
data int
parity int
want bool
}{
{1, 5, 6, false},
{2, 5, 5, true},
{3, 0, 5, false},
{4, 5, 0, false},
{5, 5, 0, false},
{6, 5, 4, true},
}
for _, tt := range tests {
if got := isXLMetaErasureInfoValid(tt.data, tt.parity); got != tt.want {
t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want)
}
}
}
// newTestXLMetaV1 - initializes new xlMetaV1Object, adds version, allocates a fresh erasure info and metadata.
func newTestXLMetaV1() xlMetaV1Object {
xlMeta := xlMetaV1Object{}
xlMeta.Version = xlMetaVersion101
xlMeta.Format = xlMetaFormat
xlMeta.Minio.Release = "test"
xlMeta.Erasure = ErasureInfo{
Algorithm: "klauspost/reedsolomon/vandermonde",
DataBlocks: 5,
ParityBlocks: 5,
BlockSize: 10485760,
Index: 10,
Distribution: []int{9, 10, 1, 2, 3, 4, 5, 6, 7, 8},
}
xlMeta.Stat = StatInfo{
Size: int64(20),
ModTime: UTCNow(),
}
// Set meta data.
xlMeta.Meta = make(map[string]string)
xlMeta.Meta["testKey1"] = "val1"
xlMeta.Meta["testKey2"] = "val2"
return xlMeta
}
func (m *xlMetaV1Object) AddTestObjectCheckSum(partNumber int, algorithm BitrotAlgorithm, hash string) {
checksum, err := hex.DecodeString(hash)
if err != nil {
panic(err)
}
m.Erasure.Checksums[partNumber-1] = ChecksumInfo{partNumber, algorithm, checksum}
}
// AddTestObjectPart - add a new object part in order.
func (m *xlMetaV1Object) AddTestObjectPart(partNumber int, partSize int64) {
partInfo := ObjectPartInfo{
Number: partNumber,
Size: partSize,
}
// Proceed to include new part info.
m.Parts[partNumber-1] = partInfo
}
// Constructs xlMetaV1Object{} for given number of parts and converts it into bytes.
func getXLMetaBytes(totalParts int) []byte {
xlSampleMeta := getSampleXLMeta(totalParts)
xlMetaBytes, err := json.Marshal(xlSampleMeta)
if err != nil {
panic(err)
}
return xlMetaBytes
}
// Returns sample xlMetaV1Object{} for number of parts.
func getSampleXLMeta(totalParts int) xlMetaV1Object {
xlMeta := newTestXLMetaV1()
// Number of checksum info == total parts.
xlMeta.Erasure.Checksums = make([]ChecksumInfo, totalParts)
// total number of parts.
xlMeta.Parts = make([]ObjectPartInfo, totalParts)
for i := 0; i < totalParts; i++ {
// hard coding hash and algo value for the checksum, Since we are benchmarking the parsing of xl.meta the magnitude doesn't affect the test,
// The magnitude doesn't make a difference, only the size does.
xlMeta.AddTestObjectCheckSum(i+1, BLAKE2b512, "a23f5eff248c4372badd9f3b2455a285cd4ca86c3d9a570b091d3fc5cd7ca6d9484bbea3f8c5d8d4f84daae96874419eda578fd736455334afbac2c924b3915a")
xlMeta.AddTestObjectPart(i+1, 67108864)
}
return xlMeta
}
// Compare the unmarshaled XLMetaV1 with the one obtained from jsoniter parsing.
func compareXLMetaV1(t *testing.T, unMarshalXLMeta, jsoniterXLMeta xlMetaV1Object) {
// Start comparing the fields of xlMetaV1Object obtained from jsoniter parsing with one parsed using json unmarshaling.
if unMarshalXLMeta.Version != jsoniterXLMeta.Version {
t.Errorf("Expected the Version to be \"%s\", but got \"%s\".", unMarshalXLMeta.Version, jsoniterXLMeta.Version)
}
if unMarshalXLMeta.Format != jsoniterXLMeta.Format {
t.Errorf("Expected the format to be \"%s\", but got \"%s\".", unMarshalXLMeta.Format, jsoniterXLMeta.Format)
}
if unMarshalXLMeta.Stat.Size != jsoniterXLMeta.Stat.Size {
t.Errorf("Expected the stat size to be %v, but got %v.", unMarshalXLMeta.Stat.Size, jsoniterXLMeta.Stat.Size)
}
if !unMarshalXLMeta.Stat.ModTime.Equal(jsoniterXLMeta.Stat.ModTime) {
t.Errorf("Expected the modTime to be \"%v\", but got \"%v\".", unMarshalXLMeta.Stat.ModTime, jsoniterXLMeta.Stat.ModTime)
}
if unMarshalXLMeta.Erasure.Algorithm != jsoniterXLMeta.Erasure.Algorithm {
t.Errorf("Expected the erasure algorithm to be \"%v\", but got \"%v\".", unMarshalXLMeta.Erasure.Algorithm, jsoniterXLMeta.Erasure.Algorithm)
}
if unMarshalXLMeta.Erasure.DataBlocks != jsoniterXLMeta.Erasure.DataBlocks {
t.Errorf("Expected the erasure data blocks to be %v, but got %v.", unMarshalXLMeta.Erasure.DataBlocks, jsoniterXLMeta.Erasure.DataBlocks)
}
if unMarshalXLMeta.Erasure.ParityBlocks != jsoniterXLMeta.Erasure.ParityBlocks {
t.Errorf("Expected the erasure parity blocks to be %v, but got %v.", unMarshalXLMeta.Erasure.ParityBlocks, jsoniterXLMeta.Erasure.ParityBlocks)
}
if unMarshalXLMeta.Erasure.BlockSize != jsoniterXLMeta.Erasure.BlockSize {
t.Errorf("Expected the erasure block size to be %v, but got %v.", unMarshalXLMeta.Erasure.BlockSize, jsoniterXLMeta.Erasure.BlockSize)
}
if unMarshalXLMeta.Erasure.Index != jsoniterXLMeta.Erasure.Index {
t.Errorf("Expected the erasure index to be %v, but got %v.", unMarshalXLMeta.Erasure.Index, jsoniterXLMeta.Erasure.Index)
}
if len(unMarshalXLMeta.Erasure.Distribution) != len(jsoniterXLMeta.Erasure.Distribution) {
t.Errorf("Expected the size of Erasure Distribution to be %d, but got %d.", len(unMarshalXLMeta.Erasure.Distribution), len(jsoniterXLMeta.Erasure.Distribution))
} else {
for i := 0; i < len(unMarshalXLMeta.Erasure.Distribution); i++ {
if unMarshalXLMeta.Erasure.Distribution[i] != jsoniterXLMeta.Erasure.Distribution[i] {
t.Errorf("Expected the Erasure Distribution to be %d, got %d.", unMarshalXLMeta.Erasure.Distribution[i], jsoniterXLMeta.Erasure.Distribution[i])
}
}
}
if len(unMarshalXLMeta.Erasure.Checksums) != len(jsoniterXLMeta.Erasure.Checksums) {
t.Errorf("Expected the size of Erasure Checksums to be %d, but got %d.", len(unMarshalXLMeta.Erasure.Checksums), len(jsoniterXLMeta.Erasure.Checksums))
} else {
for i := 0; i < len(unMarshalXLMeta.Erasure.Checksums); i++ {
if unMarshalXLMeta.Erasure.Checksums[i].PartNumber != jsoniterXLMeta.Erasure.Checksums[i].PartNumber {
t.Errorf("Expected the Erasure Checksum PartNumber to be \"%d\", got \"%d\".", unMarshalXLMeta.Erasure.Checksums[i].PartNumber, jsoniterXLMeta.Erasure.Checksums[i].PartNumber)
}
if unMarshalXLMeta.Erasure.Checksums[i].Algorithm != jsoniterXLMeta.Erasure.Checksums[i].Algorithm {
t.Errorf("Expected the Erasure Checksum Algorithm to be \"%s\", got \"%s\".", unMarshalXLMeta.Erasure.Checksums[i].Algorithm, jsoniterXLMeta.Erasure.Checksums[i].Algorithm)
}
if !bytes.Equal(unMarshalXLMeta.Erasure.Checksums[i].Hash, jsoniterXLMeta.Erasure.Checksums[i].Hash) {
t.Errorf("Expected the Erasure Checksum Hash to be \"%s\", got \"%s\".", unMarshalXLMeta.Erasure.Checksums[i].Hash, jsoniterXLMeta.Erasure.Checksums[i].Hash)
}
}
}
if unMarshalXLMeta.Minio.Release != jsoniterXLMeta.Minio.Release {
t.Errorf("Expected the Release string to be \"%s\", but got \"%s\".", unMarshalXLMeta.Minio.Release, jsoniterXLMeta.Minio.Release)
}
if len(unMarshalXLMeta.Parts) != len(jsoniterXLMeta.Parts) {
t.Errorf("Expected info of %d parts to be present, but got %d instead.", len(unMarshalXLMeta.Parts), len(jsoniterXLMeta.Parts))
} else {
for i := 0; i < len(unMarshalXLMeta.Parts); i++ {
if unMarshalXLMeta.Parts[i].Number != jsoniterXLMeta.Parts[i].Number {
t.Errorf("Expected the number of part %d to be \"%d\", got \"%d\".", i+1, unMarshalXLMeta.Parts[i].Number, jsoniterXLMeta.Parts[i].Number)
}
if unMarshalXLMeta.Parts[i].Size != jsoniterXLMeta.Parts[i].Size {
t.Errorf("Expected the size of part %d to be %v, got %v.", i+1, unMarshalXLMeta.Parts[i].Size, jsoniterXLMeta.Parts[i].Size)
}
}
}
for key, val := range unMarshalXLMeta.Meta {
jsoniterVal, exists := jsoniterXLMeta.Meta[key]
if !exists {
t.Errorf("No meta data entry for Key \"%s\" exists.", key)
}
if val != jsoniterVal {
t.Errorf("Expected the value for Meta data key \"%s\" to be \"%s\", but got \"%s\".", key, val, jsoniterVal)
}
}
}
// Tests the correctness of constructing XLMetaV1 using jsoniter lib.
// The result will be compared with the result obtained from json.unMarshal of the byte data.
func TestGetXLMetaV1Jsoniter1(t *testing.T) {
xlMetaJSON := getXLMetaBytes(1)
var unMarshalXLMeta xlMetaV1Object
if err := json.Unmarshal(xlMetaJSON, &unMarshalXLMeta); err != nil {
t.Errorf("Unmarshalling failed: %v", err)
}
var jsoniterXLMeta xlMetaV1Object
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err := json.Unmarshal(xlMetaJSON, &jsoniterXLMeta); err != nil {
t.Errorf("jsoniter parsing of XLMeta failed: %v", err)
}
compareXLMetaV1(t, unMarshalXLMeta, jsoniterXLMeta)
}
// Tests the correctness of constructing XLMetaV1 using jsoniter lib for XLMetaV1 of size 10 parts.
// The result will be compared with the result obtained from json.unMarshal of the byte data.
func TestGetXLMetaV1Jsoniter10(t *testing.T) {
xlMetaJSON := getXLMetaBytes(10)
var unMarshalXLMeta xlMetaV1Object
if err := json.Unmarshal(xlMetaJSON, &unMarshalXLMeta); err != nil {
t.Errorf("Unmarshalling failed: %v", err)
}
var jsoniterXLMeta xlMetaV1Object
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err := json.Unmarshal(xlMetaJSON, &jsoniterXLMeta); err != nil {
t.Errorf("jsoniter parsing of XLMeta failed: %v", err)
}
compareXLMetaV1(t, unMarshalXLMeta, jsoniterXLMeta)
}
// Test the predicted part size from the part index
func TestGetPartSizeFromIdx(t *testing.T) {
// Create test cases
testCases := []struct {
totalSize int64
partSize int64
partIndex int
expectedSize int64
}{
// Total size is zero
{0, 10, 1, 0},
// part size 2MiB, total size 4MiB
{4 * humanize.MiByte, 2 * humanize.MiByte, 1, 2 * humanize.MiByte},
{4 * humanize.MiByte, 2 * humanize.MiByte, 2, 2 * humanize.MiByte},
{4 * humanize.MiByte, 2 * humanize.MiByte, 3, 0},
// part size 2MiB, total size 5MiB
{5 * humanize.MiByte, 2 * humanize.MiByte, 1, 2 * humanize.MiByte},
{5 * humanize.MiByte, 2 * humanize.MiByte, 2, 2 * humanize.MiByte},
{5 * humanize.MiByte, 2 * humanize.MiByte, 3, 1 * humanize.MiByte},
{5 * humanize.MiByte, 2 * humanize.MiByte, 4, 0},
}
for i, testCase := range testCases {
s, err := calculatePartSizeFromIdx(GlobalContext, testCase.totalSize, testCase.partSize, testCase.partIndex)
if err != nil {
t.Errorf("Test %d: Expected to pass but failed. %s", i+1, err)
}
if err == nil && s != testCase.expectedSize {
t.Errorf("Test %d: The calculated part size is incorrect: expected = %d, found = %d\n", i+1, testCase.expectedSize, s)
}
}
testCasesFailure := []struct {
totalSize int64
partSize int64
partIndex int
err error
}{
// partSize is 0, returns error.
{10, 0, 1, errPartSizeZero},
// partIndex is 0, returns error.
{10, 1, 0, errPartSizeIndex},
// Total size is -1, returns error.
{-2, 10, 1, errInvalidArgument},
}
for i, testCaseFailure := range testCasesFailure {
_, err := calculatePartSizeFromIdx(GlobalContext, testCaseFailure.totalSize, testCaseFailure.partSize, testCaseFailure.partIndex)
if err == nil {
t.Errorf("Test %d: Expected to failed but passed. %s", i+1, err)
}
if err != nil && err != testCaseFailure.err {
t.Errorf("Test %d: Expected err %s, but got %s", i+1, testCaseFailure.err, err)
}
}
}
| cmd/xl-storage-format_test.go | 0 | https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a | [
0.00017833983292803168,
0.00017167895566672087,
0.00016064489318523556,
0.0001724070607451722,
0.000004533438641374232
] |
{
"id": 4,
"code_window": [
"\tif offset == 0 && globalStorageClass.GetDMA() == storageclass.DMAReadWrite {\n",
"\t\tfile, err = disk.OpenFileDirectIO(filePath, os.O_RDONLY, 0666)\n",
"\t} else {\n",
"\t\t// Open the fileile fileor reading.\n",
"\t\tfile, err = os.Open(filePath)\n",
"\t}\n",
"\tif err != nil {\n",
"\t\tswitch {\n",
"\t\tcase osIsNotExist(err):\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// Open the file for reading.\n"
],
"file_path": "cmd/xl-storage.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/madmin"
)
// BucketQuotaSys - map of bucket and quota configuration.
type BucketQuotaSys struct {
bucketStorageCache timedValue
}
// Get - Get quota configuration.
func (sys *BucketQuotaSys) Get(bucketName string) (*madmin.BucketQuota, error) {
if globalIsGateway {
objAPI := newObjectLayerFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
return &madmin.BucketQuota{}, nil
}
return globalBucketMetadataSys.GetQuotaConfig(bucketName)
}
// NewBucketQuotaSys returns initialized BucketQuotaSys
func NewBucketQuotaSys() *BucketQuotaSys {
return &BucketQuotaSys{}
}
// parseBucketQuota parses BucketQuota from json
func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota, err error) {
quotaCfg = &madmin.BucketQuota{}
if err = json.Unmarshal(data, quotaCfg); err != nil {
return quotaCfg, err
}
if !quotaCfg.IsValid() {
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)
}
return
}
func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64) error {
objAPI := newObjectLayerFn()
if objAPI == nil {
return errServerNotInitialized
}
sys.bucketStorageCache.Once.Do(func() {
sys.bucketStorageCache.TTL = 1 * time.Second
sys.bucketStorageCache.Update = func() (interface{}, error) {
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
defer done()
return loadDataUsageFromBackend(ctx, objAPI)
}
})
q, err := sys.Get(bucket)
if err != nil {
return err
}
if q != nil && q.Type == madmin.HardQuota && q.Quota > 0 {
v, err := sys.bucketStorageCache.Get()
if err != nil {
return err
}
dui := v.(DataUsageInfo)
bui, ok := dui.BucketsUsage[bucket]
if !ok {
// bucket not found, cannot enforce quota
// call will fail anyways later.
return nil
}
if (bui.Size + uint64(size)) >= q.Quota {
return BucketQuotaExceeded{Bucket: bucket}
}
}
return nil
}
func enforceBucketQuota(ctx context.Context, bucket string, size int64) error {
if size < 0 {
return nil
}
return globalBucketQuotaSys.check(ctx, bucket, size)
}
// enforceFIFOQuota deletes objects in FIFO order until sufficient objects
// have been deleted so as to bring bucket usage within quota.
func enforceFIFOQuotaBucket(ctx context.Context, objectAPI ObjectLayer, bucket string, bui BucketUsageInfo) {
// Check if the current bucket has quota restrictions, if not skip it
cfg, err := globalBucketQuotaSys.Get(bucket)
if err != nil {
return
}
if cfg.Type != madmin.FIFOQuota {
return
}
var toFree uint64
if bui.Size > cfg.Quota && cfg.Quota > 0 {
toFree = bui.Size - cfg.Quota
}
if toFree <= 0 {
return
}
// Allocate new results channel to receive ObjectInfo.
objInfoCh := make(chan ObjectInfo)
versioned := globalBucketVersioningSys.Enabled(bucket)
// Walk through all objects
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh, ObjectOptions{WalkVersions: versioned}); err != nil {
logger.LogIf(ctx, err)
return
}
// reuse the fileScorer used by disk cache to score entries by
// ModTime to find the oldest objects in bucket to delete. In
// the context of bucket quota enforcement - number of hits are
// irrelevant.
scorer, err := newFileScorer(toFree, time.Now().Unix(), 1)
if err != nil {
logger.LogIf(ctx, err)
return
}
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
for obj := range objInfoCh {
if obj.DeleteMarker {
// Delete markers are automatically added for FIFO purge.
scorer.addFileWithObjInfo(obj, 1)
continue
}
// skip objects currently under retention
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
continue
}
scorer.addFileWithObjInfo(obj, 1)
}
// If we saw less than quota we are good.
if scorer.seenBytes <= cfg.Quota {
return
}
// Calculate how much we want to delete now.
toFreeNow := scorer.seenBytes - cfg.Quota
// We were less over quota than we thought. Adjust so we delete less.
// If we are more over, leave it for the next run to pick up.
if toFreeNow < toFree {
if !scorer.adjustSaveBytes(int64(toFreeNow) - int64(toFree)) {
// We got below or at quota.
return
}
}
var objects []ObjectToDelete
numKeys := len(scorer.fileObjInfos())
for i, obj := range scorer.fileObjInfos() {
objects = append(objects, ObjectToDelete{
ObjectName: obj.Name,
VersionID: obj.VersionID,
})
if len(objects) < maxDeleteList && (i < numKeys-1) {
// skip deletion until maxDeleteList or end of slice
continue
}
if len(objects) == 0 {
break
}
// Deletes a list of objects.
_, deleteErrs := objectAPI.DeleteObjects(ctx, bucket, objects, ObjectOptions{
Versioned: versioned,
})
for i := range deleteErrs {
if deleteErrs[i] != nil {
logger.LogIf(ctx, deleteErrs[i])
continue
}
// Notify object deleted event.
sendEvent(eventArgs{
EventName: event.ObjectRemovedDelete,
BucketName: bucket,
Object: obj,
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
})
}
objects = nil
}
}
| cmd/bucket-quota.go | 0 | https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a | [
0.0006109121604822576,
0.0001943152310559526,
0.0001621011324459687,
0.00017113742069341242,
0.00009196901373798028
] |
{
"id": 5,
"code_window": [
"\tm.Match(\n",
"\t\t`for $_, $v := range $_ { $*_ }`,\n",
"\t).\n",
"\t\tWhere(m[\"v\"].Type.Size > 512).\n",
"\t\tReport(`loop copies large value each iteration`)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tWhere(m[\"v\"].Type.Size > 1024).\n"
],
"file_path": "ruleguard.rules.go",
"type": "replace",
"edit_start_line_idx": 352
} | /*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"encoding/gob"
"encoding/json"
"errors"
"fmt"
"io"
"strconv"
"strings"
"sync"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/color"
"github.com/minio/minio/pkg/console"
"github.com/minio/minio/pkg/hash"
)
type listPathOptions struct {
// ID of the listing.
// This will be used to persist the list.
ID string
// Bucket of the listing.
Bucket string
// Directory inside the bucket.
BaseDir string
// Scan/return only content with prefix.
Prefix string
// FilterPrefix will return only results with this prefix when scanning.
// Should never contain a slash.
// Prefix should still be set.
FilterPrefix string
// Marker to resume listing.
// The response will be the first entry AFTER this object name.
Marker string
// Limit the number of results.
Limit int
// The number of disks to ask. Special values:
// 0 uses default number of disks.
// -1 use at least 50% of disks or at least the default number.
AskDisks int
// InclDeleted will keep all entries where latest version is a delete marker.
InclDeleted bool
// Scan recursively.
// If false only main directory will be scanned.
// Should always be true if Separator is n SlashSeparator.
Recursive bool
// Separator to use.
Separator string
// Create indicates that the lister should not attempt to load an existing cache.
Create bool
// CurrentCycle indicates the current bloom cycle.
// Will be used if a new scan is started.
CurrentCycle uint64
// OldestCycle indicates the oldest cycle acceptable.
OldestCycle uint64
// Include pure directories.
IncludeDirectories bool
// Transient is set if the cache is transient due to an error or being a reserved bucket.
// This means the cache metadata will not be persisted on disk.
// A transient result will never be returned from the cache so knowing the list id is required.
Transient bool
// discardResult will not persist the cache to storage.
// When the initial results are returned listing will be canceled.
discardResult bool
}
func init() {
gob.Register(listPathOptions{})
}
// newMetacache constructs a new metacache from the options.
func (o listPathOptions) newMetacache() metacache {
return metacache{
id: o.ID,
bucket: o.Bucket,
root: o.BaseDir,
recursive: o.Recursive,
status: scanStateStarted,
error: "",
started: UTCNow(),
lastHandout: UTCNow(),
lastUpdate: UTCNow(),
ended: time.Time{},
startedCycle: o.CurrentCycle,
endedCycle: 0,
dataVersion: metacacheStreamVersion,
filter: o.FilterPrefix,
}
}
func (o *listPathOptions) debugf(format string, data ...interface{}) {
if serverDebugLog {
console.Debugf(format+"\n", data...)
}
}
func (o *listPathOptions) debugln(data ...interface{}) {
if serverDebugLog {
console.Debugln(data...)
}
}
// gatherResults will collect all results on the input channel and filter results according to the options.
// Caller should close the channel when done.
// The returned function will return the results once there is enough or input is closed.
func (o *listPathOptions) gatherResults(in <-chan metaCacheEntry) func() (metaCacheEntriesSorted, error) {
var resultsDone = make(chan metaCacheEntriesSorted)
// Copy so we can mutate
resCh := resultsDone
resErr := io.EOF
go func() {
var results metaCacheEntriesSorted
for entry := range in {
if resCh == nil {
// past limit
continue
}
if !o.IncludeDirectories && entry.isDir() {
continue
}
o.debugln("gather got:", entry.name)
if o.Marker != "" && entry.name <= o.Marker {
o.debugln("pre marker")
continue
}
if !strings.HasPrefix(entry.name, o.Prefix) {
o.debugln("not in prefix")
continue
}
if !o.Recursive && !entry.isInDir(o.Prefix, o.Separator) {
o.debugln("not in dir", o.Prefix, o.Separator)
continue
}
if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() {
o.debugln("latest is delete marker")
continue
}
if o.Limit > 0 && results.len() >= o.Limit {
// We have enough and we have more.
// Do not return io.EOF
if resCh != nil {
resErr = nil
resCh <- results
resCh = nil
}
continue
}
o.debugln("adding...")
results.o = append(results.o, entry)
}
if resCh != nil {
resErr = io.EOF
resCh <- results
}
}()
return func() (metaCacheEntriesSorted, error) {
return <-resultsDone, resErr
}
}
// findFirstPart will find the part with 0 being the first that corresponds to the marker in the options.
// io.ErrUnexpectedEOF is returned if the place containing the marker hasn't been scanned yet.
// io.EOF indicates the marker is beyond the end of the stream and does not exist.
func (o *listPathOptions) findFirstPart(fi FileInfo) (int, error) {
search := o.Marker
if search == "" {
search = o.Prefix
}
if search == "" {
return 0, nil
}
o.debugln("searching for ", search)
var tmp metacacheBlock
var json = jsoniter.ConfigCompatibleWithStandardLibrary
i := 0
for {
partKey := fmt.Sprintf("%s-metacache-part-%d", ReservedMetadataPrefixLower, i)
v, ok := fi.Metadata[partKey]
if !ok {
o.debugln("no match in metadata, waiting")
return -1, io.ErrUnexpectedEOF
}
err := json.Unmarshal([]byte(v), &tmp)
if !ok {
logger.LogIf(context.Background(), err)
return -1, err
}
if tmp.First == "" && tmp.Last == "" && tmp.EOS {
return 0, errFileNotFound
}
if tmp.First >= search {
o.debugln("First >= search", v)
return i, nil
}
if tmp.Last >= search {
o.debugln("Last >= search", v)
return i, nil
}
if tmp.EOS {
o.debugln("no match, at EOS", v)
return -3, io.EOF
}
o.debugln("First ", tmp.First, "<", search, " search", i)
i++
}
}
// updateMetacacheListing will update the metacache listing.
func (o *listPathOptions) updateMetacacheListing(m metacache, rpc *peerRESTClient) (metacache, error) {
if o.Transient {
return localMetacacheMgr.getTransient().updateCacheEntry(m)
}
if rpc == nil {
return localMetacacheMgr.updateCacheEntry(m)
}
return rpc.UpdateMetacacheListing(context.Background(), m)
}
func getMetacacheBlockInfo(fi FileInfo, block int) (*metacacheBlock, error) {
var tmp metacacheBlock
partKey := fmt.Sprintf("%s-metacache-part-%d", ReservedMetadataPrefixLower, block)
v, ok := fi.Metadata[partKey]
if !ok {
return nil, io.ErrUnexpectedEOF
}
return &tmp, json.Unmarshal([]byte(v), &tmp)
}
const metacachePrefix = ".metacache"
func metacachePrefixForID(bucket, id string) string {
return pathJoin(bucketMetaPrefix, bucket, metacachePrefix, id)
}
// objectPath returns the object path of the cache.
func (o *listPathOptions) objectPath(block int) string {
return pathJoin(metacachePrefixForID(o.Bucket, o.ID), "block-"+strconv.Itoa(block)+".s2")
}
func (o *listPathOptions) SetFilter() {
switch {
case metacacheSharePrefix:
return
case o.CurrentCycle != o.OldestCycle:
// We have a clean bloom filter
return
case o.Prefix == o.BaseDir:
// No additional prefix
return
}
// Remove basedir.
o.FilterPrefix = strings.TrimPrefix(o.Prefix, o.BaseDir)
// Remove leading and trailing slashes.
o.FilterPrefix = strings.Trim(o.FilterPrefix, slashSeparator)
if strings.Contains(o.FilterPrefix, slashSeparator) {
// Sanity check, should not happen.
o.FilterPrefix = ""
}
}
// filter will apply the options and return the number of objects requested by the limit.
// Will return io.EOF if there are no more entries with the same filter.
// The last entry can be used as a marker to resume the listing.
func (r *metacacheReader) filter(o listPathOptions) (entries metaCacheEntriesSorted, err error) {
// Forward to prefix, if any
err = r.forwardTo(o.Prefix)
if err != nil {
return entries, err
}
if o.Marker != "" {
err = r.forwardTo(o.Marker)
if err != nil {
return entries, err
}
next, err := r.peek()
if err != nil {
return entries, err
}
if next.name == o.Marker {
err := r.skip(1)
if err != nil {
return entries, err
}
}
}
o.debugln("forwarded to ", o.Prefix, "marker:", o.Marker, "sep:", o.Separator)
// Filter
if !o.Recursive {
entries.o = make(metaCacheEntries, 0, o.Limit)
pastPrefix := false
err := r.readFn(func(entry metaCacheEntry) bool {
if o.Prefix != "" && !strings.HasPrefix(entry.name, o.Prefix) {
// We are past the prefix, don't continue.
pastPrefix = true
return false
}
if !o.IncludeDirectories && entry.isDir() {
return true
}
if !entry.isInDir(o.Prefix, o.Separator) {
return true
}
if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() {
return entries.len() < o.Limit
}
entries.o = append(entries.o, entry)
return entries.len() < o.Limit
})
if (err != nil && err.Error() == io.EOF.Error()) || pastPrefix || r.nextEOF() {
return entries, io.EOF
}
return entries, err
}
// We should not need to filter more.
return r.readN(o.Limit, o.InclDeleted, o.IncludeDirectories, o.Prefix)
}
func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOptions) (entries metaCacheEntriesSorted, err error) {
retries := 0
rpc := globalNotificationSys.restClientFromHash(o.Bucket)
for {
select {
case <-ctx.Done():
return entries, ctx.Err()
default:
}
// If many failures, check the cache state.
if retries > 10 {
err := o.checkMetacacheState(ctx, rpc)
if err != nil {
return entries, fmt.Errorf("remote listing canceled: %w", err)
}
retries = 1
}
const retryDelay = 500 * time.Millisecond
// Load first part metadata...
// All operations are performed without locks, so we must be careful and allow for failures.
// Read metadata associated with the object from a disk.
if retries > 0 {
disks := er.getOnlineDisks()
if len(disks) == 0 {
time.Sleep(retryDelay)
retries++
continue
}
_, err := disks[0].ReadVersion(ctx, minioMetaBucket, o.objectPath(0), "", false)
if err != nil {
time.Sleep(retryDelay)
retries++
continue
}
}
// Read metadata associated with the object from all disks.
fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, minioMetaBucket, o.objectPath(0), ObjectOptions{}, true)
if err != nil {
switch toObjectErr(err, minioMetaBucket, o.objectPath(0)).(type) {
case ObjectNotFound:
retries++
time.Sleep(retryDelay)
continue
case InsufficientReadQuorum:
retries++
time.Sleep(retryDelay)
continue
default:
return entries, fmt.Errorf("reading first part metadata: %w", err)
}
}
partN, err := o.findFirstPart(fi)
switch {
case err == nil:
case errors.Is(err, io.ErrUnexpectedEOF):
if retries == 10 {
err := o.checkMetacacheState(ctx, rpc)
if err != nil {
return entries, fmt.Errorf("remote listing canceled: %w", err)
}
retries = -1
}
retries++
time.Sleep(retryDelay)
continue
case errors.Is(err, io.EOF):
return entries, io.EOF
}
// We got a stream to start at.
loadedPart := 0
buf := bufferPool.Get().(*bytes.Buffer)
defer func() {
buf.Reset()
bufferPool.Put(buf)
}()
for {
select {
case <-ctx.Done():
return entries, ctx.Err()
default:
}
if partN != loadedPart {
if retries > 10 {
err := o.checkMetacacheState(ctx, rpc)
if err != nil {
return entries, fmt.Errorf("waiting for next part %d: %w", partN, err)
}
retries = 1
}
if retries > 0 {
// Load from one disk only
disks := er.getOnlineDisks()
if len(disks) == 0 {
time.Sleep(retryDelay)
retries++
continue
}
_, err := disks[0].ReadVersion(ctx, minioMetaBucket, o.objectPath(partN), "", false)
if err != nil {
time.Sleep(retryDelay)
retries++
continue
}
}
// Load first part metadata...
fi, metaArr, onlineDisks, err = er.getObjectFileInfo(ctx, minioMetaBucket, o.objectPath(partN), ObjectOptions{}, true)
if err != nil {
time.Sleep(retryDelay)
retries++
continue
}
loadedPart = partN
bi, err := getMetacacheBlockInfo(fi, partN)
logger.LogIf(ctx, err)
if err == nil {
if bi.pastPrefix(o.Prefix) {
return entries, io.EOF
}
}
}
buf.Reset()
err := er.getObjectWithFileInfo(ctx, minioMetaBucket, o.objectPath(partN), 0, fi.Size, buf, fi, metaArr, onlineDisks)
if err != nil {
switch toObjectErr(err, minioMetaBucket, o.objectPath(partN)).(type) {
case ObjectNotFound:
retries++
time.Sleep(retryDelay)
continue
case InsufficientReadQuorum:
retries++
time.Sleep(retryDelay)
continue
default:
logger.LogIf(ctx, err)
return entries, err
}
}
tmp, err := newMetacacheReader(buf)
if err != nil {
return entries, err
}
e, err := tmp.filter(o)
entries.o = append(entries.o, e.o...)
if o.Limit > 0 && entries.len() > o.Limit {
entries.truncate(o.Limit)
return entries, nil
}
if err == nil {
// We stopped within the listing, we are done for now...
return entries, nil
}
if !errors.Is(err, io.EOF) {
logger.LogIf(ctx, err)
return entries, err
}
// We finished at the end of the block.
// And should not expect any more results.
bi, err := getMetacacheBlockInfo(fi, partN)
logger.LogIf(ctx, err)
if err != nil || bi.EOS {
// We are done and there are no more parts.
return entries, io.EOF
}
if bi.endedPrefix(o.Prefix) {
// Nothing more for prefix.
return entries, io.EOF
}
partN++
retries = 0
}
}
}
func (er erasureObjects) SetDriveCount() int {
return er.setDriveCount
}
// Will return io.EOF if continuing would not yield more results.
func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entries metaCacheEntriesSorted, err error) {
o.debugf(color.Green("listPath:")+" with options: %#v", o)
// See if we have the listing stored.
if !o.Create && !o.discardResult {
entries, err := er.streamMetadataParts(ctx, o)
if IsErr(err, []error{
nil,
context.Canceled,
context.DeadlineExceeded,
}...) {
// Expected good errors we don't need to return error.
return entries, nil
}
if !errors.Is(err, io.EOF) { // io.EOF is expected and should be returned but no need to log it.
// Log an return errors on unexpected errors.
logger.LogIf(ctx, err)
}
return entries, err
}
meta := o.newMetacache()
rpc := globalNotificationSys.restClientFromHash(o.Bucket)
var metaMu sync.Mutex
o.debugln(color.Green("listPath:")+" scanning bucket:", o.Bucket, "basedir:", o.BaseDir, "prefix:", o.Prefix, "marker:", o.Marker)
// Disconnect from call above, but cancel on exit.
ctx, cancel := context.WithCancel(GlobalContext)
// We need to ask disks.
disks := er.getOnlineDisks()
defer func() {
o.debugln(color.Green("listPath:")+" returning:", entries.len(), "err:", err)
if err != nil && !errors.Is(err, io.EOF) {
go func(err string) {
metaMu.Lock()
if meta.status != scanStateError {
meta.error = err
meta.status = scanStateError
}
meta, _ = o.updateMetacacheListing(meta, rpc)
metaMu.Unlock()
}(err.Error())
cancel()
}
}()
askDisks := o.AskDisks
listingQuorum := askDisks - 1
// Special case: ask all disks if the drive count is 4
if askDisks == -1 || er.SetDriveCount() == 4 {
askDisks = len(disks) // with 'strict' quorum list on all online disks.
listingQuorum = getReadQuorum(er.SetDriveCount())
}
if len(disks) < askDisks {
err = InsufficientReadQuorum{}
logger.LogIf(ctx, fmt.Errorf("listPath: Insufficient disks, %d of %d needed are available", len(disks), askDisks))
cancel()
return
}
// Select askDisks random disks.
if len(disks) > askDisks {
disks = disks[:askDisks]
}
// Create output for our results.
var cacheCh chan metaCacheEntry
if !o.discardResult {
cacheCh = make(chan metaCacheEntry, metacacheBlockSize)
}
// Create filter for results.
filterCh := make(chan metaCacheEntry, 100)
filteredResults := o.gatherResults(filterCh)
closeChannels := func() {
if !o.discardResult {
close(cacheCh)
}
close(filterCh)
}
// Cancel listing on return if non-saved list.
if o.discardResult {
defer cancel()
}
go func() {
defer cancel()
// Save continuous updates
go func() {
var err error
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
var exit bool
for !exit {
select {
case <-ticker.C:
case <-ctx.Done():
exit = true
}
metaMu.Lock()
meta.endedCycle = intDataUpdateTracker.current()
meta, err = o.updateMetacacheListing(meta, rpc)
if meta.status == scanStateError {
cancel()
exit = true
}
metaMu.Unlock()
logger.LogIf(ctx, err)
}
}()
const retryDelay = 200 * time.Millisecond
const maxTries = 5
var bw *metacacheBlockWriter
// Don't save single object listings.
if !o.discardResult {
// Write results to disk.
bw = newMetacacheBlockWriter(cacheCh, func(b *metacacheBlock) error {
// if the block is 0 bytes and its a first block skip it.
// skip only this for Transient caches.
if len(b.data) == 0 && b.n == 0 && o.Transient {
return nil
}
o.debugln(color.Green("listPath:")+" saving block", b.n, "to", o.objectPath(b.n))
r, err := hash.NewReader(bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data)), false)
logger.LogIf(ctx, err)
custom := b.headerKV()
_, err = er.putObject(ctx, minioMetaBucket, o.objectPath(b.n), NewPutObjReader(r, nil, nil), ObjectOptions{
UserDefined: custom,
NoLock: true, // No need to hold namespace lock, each prefix caches uniquely.
})
if err != nil {
metaMu.Lock()
if meta.error != "" {
meta.status = scanStateError
meta.error = err.Error()
}
metaMu.Unlock()
cancel()
return err
}
if b.n == 0 {
return nil
}
// Update block 0 metadata.
var retries int
for {
err := er.updateObjectMeta(ctx, minioMetaBucket, o.objectPath(0), b.headerKV(), ObjectOptions{})
if err == nil {
break
}
switch err.(type) {
case ObjectNotFound:
return err
case InsufficientReadQuorum:
default:
logger.LogIf(ctx, err)
}
if retries >= maxTries {
return err
}
retries++
time.Sleep(retryDelay)
}
return nil
})
}
// How to resolve results.
resolver := metadataResolutionParams{
dirQuorum: listingQuorum,
objQuorum: listingQuorum,
bucket: o.Bucket,
}
err := listPathRaw(ctx, listPathRawOptions{
disks: disks,
bucket: o.Bucket,
path: o.BaseDir,
recursive: o.Recursive,
filterPrefix: o.FilterPrefix,
minDisks: listingQuorum,
agreed: func(entry metaCacheEntry) {
if !o.discardResult {
cacheCh <- entry
}
filterCh <- entry
},
partial: func(entries metaCacheEntries, nAgreed int, errs []error) {
// Results Disagree :-(
entry, ok := entries.resolve(&resolver)
if ok {
if !o.discardResult {
cacheCh <- *entry
}
filterCh <- *entry
}
},
})
metaMu.Lock()
if err != nil {
meta.status = scanStateError
meta.error = err.Error()
}
// Save success
if meta.error == "" {
meta.status = scanStateSuccess
meta.endedCycle = intDataUpdateTracker.current()
}
meta, _ = o.updateMetacacheListing(meta, rpc)
metaMu.Unlock()
closeChannels()
if !o.discardResult {
if err := bw.Close(); err != nil {
metaMu.Lock()
meta.error = err.Error()
meta.status = scanStateError
meta, err = o.updateMetacacheListing(meta, rpc)
metaMu.Unlock()
}
}
}()
return filteredResults()
}
type listPathRawOptions struct {
disks []StorageAPI
bucket, path string
recursive bool
filterPrefix string
// Minimum number of good disks to continue.
// An error will be returned if this many disks returned an error.
minDisks int
reportNotFound bool
// Callbacks with results:
// If set to nil, it will not be called.
// agreed is called if all disks agreed.
agreed func(entry metaCacheEntry)
// partial will be returned when there is disagreement between disks.
// if disk did not return any result, but also haven't errored
// the entry will be empty and errs will
partial func(entries metaCacheEntries, nAgreed int, errs []error)
// finished will be called when all streams have finished and
// more than one disk returned an error.
// Will not be called if everything operates as expected.
finished func(errs []error)
}
// listPathRaw will list a path on the provided drives.
// See listPathRawOptions on how results are delivered.
// Directories are always returned.
// Cache will be bypassed.
// Context cancellation will be respected but may take a while to effectuate.
func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
disks := opts.disks
if len(disks) == 0 {
return fmt.Errorf("listPathRaw: 0 drives provided")
}
// Disconnect from call above, but cancel on exit.
ctx, cancel := context.WithCancel(GlobalContext)
defer cancel()
askDisks := len(disks)
readers := make([]*metacacheReader, askDisks)
for i := range disks {
r, w := io.Pipe()
d := disks[i]
readers[i], err = newMetacacheReader(r)
if err != nil {
return err
}
// Send request to each disk.
go func() {
werr := d.WalkDir(ctx, WalkDirOptions{
Bucket: opts.bucket,
BaseDir: opts.path,
Recursive: opts.recursive,
ReportNotFound: opts.reportNotFound,
FilterPrefix: opts.filterPrefix}, w)
w.CloseWithError(werr)
if werr != io.EOF && werr != nil && werr.Error() != errFileNotFound.Error() && werr.Error() != errVolumeNotFound.Error() {
logger.LogIf(ctx, werr)
}
}()
}
topEntries := make(metaCacheEntries, len(readers))
errs := make([]error, len(readers))
for {
// Get the top entry from each
var current metaCacheEntry
var atEOF, fnf, hasErr, agree int
for i := range topEntries {
topEntries[i] = metaCacheEntry{}
}
select {
case <-ctx.Done():
return ctx.Err()
default:
}
for i, r := range readers {
if errs[i] != nil {
hasErr++
continue
}
entry, err := r.peek()
switch err {
case io.EOF:
atEOF++
continue
case nil:
default:
if err.Error() == errFileNotFound.Error() {
atEOF++
fnf++
continue
}
if err.Error() == errVolumeNotFound.Error() {
atEOF++
fnf++
continue
}
hasErr++
errs[i] = err
continue
}
// If no current, add it.
if current.name == "" {
topEntries[i] = entry
current = entry
agree++
continue
}
// If exact match, we agree.
if current.matches(&entry, opts.bucket) {
topEntries[i] = entry
agree++
continue
}
// If only the name matches we didn't agree, but add it for resolution.
if entry.name == current.name {
topEntries[i] = entry
continue
}
// We got different entries
if entry.name > current.name {
continue
}
// We got a new, better current.
// Clear existing entries.
for i := range topEntries[:i] {
topEntries[i] = metaCacheEntry{}
}
agree = 1
current = entry
topEntries[i] = entry
}
// Stop if we exceed number of bad disks
if hasErr > len(disks)-opts.minDisks && hasErr > 0 {
if opts.finished != nil {
opts.finished(errs)
}
var combinedErr []string
for i, err := range errs {
if err != nil {
combinedErr = append(combinedErr, fmt.Sprintf("disk %d returned: %s", i, err))
}
}
return errors.New(strings.Join(combinedErr, ", "))
}
// Break if all at EOF or error.
if atEOF+hasErr == len(readers) {
if hasErr > 0 && opts.finished != nil {
opts.finished(errs)
}
break
}
if fnf == len(readers) {
return errFileNotFound
}
if agree == len(readers) {
// Everybody agreed
for _, r := range readers {
r.skip(1)
}
if opts.agreed != nil {
opts.agreed(current)
}
continue
}
if opts.partial != nil {
opts.partial(topEntries, agree, errs)
}
// Skip the inputs we used.
for i, r := range readers {
if topEntries[i].name != "" {
r.skip(1)
}
}
}
return nil
}
| cmd/metacache-set.go | 1 | https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a | [
0.9788637757301331,
0.02022361010313034,
0.00016038607282098383,
0.00017261007451452315,
0.13790224492549896
] |
{
"id": 5,
"code_window": [
"\tm.Match(\n",
"\t\t`for $_, $v := range $_ { $*_ }`,\n",
"\t).\n",
"\t\tWhere(m[\"v\"].Type.Size > 512).\n",
"\t\tReport(`loop copies large value each iteration`)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tWhere(m[\"v\"].Type.Size > 1024).\n"
],
"file_path": "ruleguard.rules.go",
"type": "replace",
"edit_start_line_idx": 352
} | /*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
// Format related consts
const (
// Format config file carries backend format specific details.
formatConfigFile = "format.json"
)
const (
// Version of the formatMetaV1
formatMetaVersionV1 = "1"
)
// format.json currently has the format:
// {
// "version": "1",
// "format": "XXXXX",
// "XXXXX": {
//
// }
// }
// Here "XXXXX" depends on the backend, currently we have "fs" and "xl" implementations.
// formatMetaV1 should be inherited by backend format structs. Please look at format-fs.go
// and format-xl.go for details.
// Ideally we will never have a situation where we will have to change the
// fields of this struct and deal with related migration.
type formatMetaV1 struct {
// Version of the format config.
Version string `json:"version"`
// Format indicates the backend format type, supports two values 'xl' and 'fs'.
Format string `json:"format"`
// ID is the identifier for the minio deployment
ID string `json:"id"`
}
| cmd/format-meta.go | 0 | https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a | [
0.00017859354557003826,
0.00017267295334022492,
0.00016688209143467247,
0.0001718532294034958,
0.000004502844149101293
] |
{
"id": 5,
"code_window": [
"\tm.Match(\n",
"\t\t`for $_, $v := range $_ { $*_ }`,\n",
"\t).\n",
"\t\tWhere(m[\"v\"].Type.Size > 512).\n",
"\t\tReport(`loop copies large value each iteration`)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tWhere(m[\"v\"].Type.Size > 1024).\n"
],
"file_path": "ruleguard.rules.go",
"type": "replace",
"edit_start_line_idx": 352
} | /*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"encoding/json"
"path"
"sort"
"strings"
"unicode/utf8"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/pkg/madmin"
)
const (
minioConfigPrefix = "config"
kvPrefix = ".kv"
// Captures all the previous SetKV operations and allows rollback.
minioConfigHistoryPrefix = minioConfigPrefix + "/history"
// MinIO configuration file.
minioConfigFile = "config.json"
)
func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer, withData bool, count int) (
[]madmin.ConfigHistoryEntry, error) {
var configHistory []madmin.ConfigHistoryEntry
// List all kvs
marker := ""
for {
res, err := objAPI.ListObjects(ctx, minioMetaBucket, minioConfigHistoryPrefix, marker, "", maxObjectList)
if err != nil {
return nil, err
}
for _, obj := range res.Objects {
cfgEntry := madmin.ConfigHistoryEntry{
RestoreID: strings.TrimSuffix(path.Base(obj.Name), kvPrefix),
CreateTime: obj.ModTime, // ModTime is createTime for config history entries.
}
if withData {
data, err := readConfig(ctx, objAPI, obj.Name)
if err != nil {
return nil, err
}
if globalConfigEncrypted && !utf8.Valid(data) {
data, err = madmin.DecryptData(globalActiveCred.String(), bytes.NewReader(data))
if err != nil {
return nil, err
}
}
cfgEntry.Data = string(data)
}
configHistory = append(configHistory, cfgEntry)
count--
if count == 0 {
break
}
}
if !res.IsTruncated {
// We are done here
break
}
marker = res.NextMarker
}
sort.Slice(configHistory, func(i, j int) bool {
return configHistory[i].CreateTime.Before(configHistory[j].CreateTime)
})
return configHistory, nil
}
func delServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) error {
historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV+kvPrefix)
_, err := objAPI.DeleteObject(ctx, minioMetaBucket, historyFile, ObjectOptions{})
return err
}
func readServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) ([]byte, error) {
historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV+kvPrefix)
data, err := readConfig(ctx, objAPI, historyFile)
if err != nil {
return nil, err
}
if globalConfigEncrypted && !utf8.Valid(data) {
data, err = madmin.DecryptData(globalActiveCred.String(), bytes.NewReader(data))
}
return data, err
}
func saveServerConfigHistory(ctx context.Context, objAPI ObjectLayer, kv []byte) error {
uuidKV := mustGetUUID() + kvPrefix
historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV)
var err error
if globalConfigEncrypted {
kv, err = madmin.EncryptData(globalActiveCred.String(), kv)
if err != nil {
return err
}
}
// Save the new config KV settings into the history path.
return saveConfig(ctx, objAPI, historyFile, kv)
}
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config interface{}) error {
data, err := json.Marshal(config)
if err != nil {
return err
}
if globalConfigEncrypted {
data, err = madmin.EncryptData(globalActiveCred.String(), data)
if err != nil {
return err
}
}
configFile := path.Join(minioConfigPrefix, minioConfigFile)
// Save the new config in the std config path
return saveConfig(ctx, objAPI, configFile, data)
}
func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, error) {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
configData, err := readConfig(ctx, objAPI, configFile)
if err != nil {
// Config not found for some reason, allow things to continue
// by initializing a new fresh config in safe mode.
if err == errConfigNotFound && newObjectLayerFn() == nil {
return newServerConfig(), nil
}
return nil, err
}
if globalConfigEncrypted && !utf8.Valid(configData) {
configData, err = madmin.DecryptData(globalActiveCred.String(), bytes.NewReader(configData))
if err != nil {
if err == madmin.ErrMaliciousData {
return nil, config.ErrInvalidCredentialsBackendEncrypted(nil)
}
return nil, err
}
}
var srvCfg = config.New()
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.Unmarshal(configData, &srvCfg); err != nil {
return nil, err
}
// Add any missing entries
return srvCfg.Merge(), nil
}
// ConfigSys - config system.
type ConfigSys struct{}
// Load - load config.json.
func (sys *ConfigSys) Load(objAPI ObjectLayer) error {
return sys.Init(objAPI)
}
// Init - initializes config system from config.json.
func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
if objAPI == nil {
return errInvalidArgument
}
return initConfig(objAPI)
}
// NewConfigSys - creates new config system object.
func NewConfigSys() *ConfigSys {
return &ConfigSys{}
}
// Initialize and load config from remote etcd or local config directory
func initConfig(objAPI ObjectLayer) error {
if objAPI == nil {
return errServerNotInitialized
}
if isFile(getConfigFile()) {
if err := migrateConfig(); err != nil {
return err
}
}
// Migrates ${HOME}/.minio/config.json or config.json.deprecated
// to '<export_path>/.minio.sys/config/config.json'
// ignore if the file doesn't exist.
// If etcd is set then migrates /config/config.json
// to '<export_path>/.minio.sys/config/config.json'
if err := migrateConfigToMinioSys(objAPI); err != nil {
return err
}
// Migrates backend '<export_path>/.minio.sys/config/config.json' to latest version.
if err := migrateMinioSysConfig(objAPI); err != nil {
return err
}
// Migrates backend '<export_path>/.minio.sys/config/config.json' to
// latest config format.
if err := migrateMinioSysConfigToKV(objAPI); err != nil {
return err
}
return loadConfig(objAPI)
}
| cmd/config.go | 0 | https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a | [
0.0003622635267674923,
0.00018184678629040718,
0.000162964963237755,
0.0001715879625407979,
0.0000397353433072567
] |
{
"id": 5,
"code_window": [
"\tm.Match(\n",
"\t\t`for $_, $v := range $_ { $*_ }`,\n",
"\t).\n",
"\t\tWhere(m[\"v\"].Type.Size > 512).\n",
"\t\tReport(`loop copies large value each iteration`)\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tWhere(m[\"v\"].Type.Size > 1024).\n"
],
"file_path": "ruleguard.rules.go",
"type": "replace",
"edit_start_line_idx": 352
} | /*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3select
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"os"
"reflect"
"strings"
"testing"
"github.com/klauspost/cpuid"
"github.com/minio/minio-go/v7"
"github.com/minio/simdjson-go"
)
type testResponseWriter struct {
statusCode int
response []byte
}
func (w *testResponseWriter) Header() http.Header {
return nil
}
func (w *testResponseWriter) Write(p []byte) (int, error) {
w.response = append(w.response, p...)
return len(p), nil
}
func (w *testResponseWriter) WriteHeader(statusCode int) {
w.statusCode = statusCode
}
func (w *testResponseWriter) Flush() {
}
func TestJSONQueries(t *testing.T) {
input := `{"id": 0,"title": "Test Record","desc": "Some text","synonyms": ["foo", "bar", "whatever"]}
{"id": 1,"title": "Second Record","desc": "another text","synonyms": ["some", "synonym", "value"]}
{"id": 2,"title": "Second Record","desc": "another text","numbers": [2, 3.0, 4]}
{"id": 3,"title": "Second Record","desc": "another text","nested": [[2, 3.0, 4], [7, 8.5, 9]]}`
var testTable = []struct {
name string
query string
requestXML []byte // override request XML
wantResult string
withJSON string // Override JSON input
}{
{
name: "select-in-array-full",
query: `SELECT * from s3object s WHERE 'bar' IN s.synonyms[*]`,
wantResult: `{"id":0,"title":"Test Record","desc":"Some text","synonyms":["foo","bar","whatever"]}`,
},
{
name: "simple-in-array",
query: `SELECT * from s3object s WHERE s.id IN (1,3)`,
wantResult: `{"id":1,"title":"Second Record","desc":"another text","synonyms":["some","synonym","value"]}
{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`,
},
{
name: "select-in-array-single",
query: `SELECT synonyms from s3object s WHERE 'bar' IN s.synonyms[*] `,
wantResult: `{"synonyms":["foo","bar","whatever"]}`,
},
{
name: "donatello-1",
query: `SELECT * from s3object s WHERE 'bar' in s.synonyms`,
wantResult: `{"id":0,"title":"Test Record","desc":"Some text","synonyms":["foo","bar","whatever"]}`,
},
{
name: "donatello-2",
query: `SELECT * from s3object s WHERE 'bar' in s.synonyms[*]`,
wantResult: `{"id":0,"title":"Test Record","desc":"Some text","synonyms":["foo","bar","whatever"]}`,
},
{
name: "bignum-1",
query: `SELECT id from s3object s WHERE s.id <= 9223372036854775807`,
wantResult: `{"id":0}
{"id":1}
{"id":2}
{"id":3}`},
{
name: "bignum-2",
query: `SELECT id from s3object s WHERE s.id >= -9223372036854775808`,
wantResult: `{"id":0}
{"id":1}
{"id":2}
{"id":3}`},
{
name: "donatello-3",
query: `SELECT * from s3object s WHERE 'value' IN s.synonyms[*]`,
wantResult: `{"id":1,"title":"Second Record","desc":"another text","synonyms":["some","synonym","value"]}`,
},
{
name: "select-in-number",
query: `SELECT * from s3object s WHERE 4 in s.numbers[*]`,
wantResult: `{"id":2,"title":"Second Record","desc":"another text","numbers":[2,3,4]}`,
},
{
name: "select-in-number-float",
query: `SELECT * from s3object s WHERE 3 in s.numbers[*]`,
wantResult: `{"id":2,"title":"Second Record","desc":"another text","numbers":[2,3,4]}`,
},
{
name: "select-in-number-float-in-sql",
query: `SELECT * from s3object s WHERE 3.0 in s.numbers[*]`,
wantResult: `{"id":2,"title":"Second Record","desc":"another text","numbers":[2,3,4]}`,
},
{
name: "select-in-list-match",
query: `SELECT * from s3object s WHERE (2,3,4) IN s.nested[*]`,
wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`,
},
{
name: "select-in-nested-float",
query: `SELECT s.nested from s3object s WHERE 8.5 IN s.nested[*][*]`,
wantResult: `{"nested":[[2,3,4],[7,8.5,9]]}`,
},
{
name: "select-in-combine-and",
query: `SELECT s.nested from s3object s WHERE (8.5 IN s.nested[*][*]) AND (s.id > 0)`,
wantResult: `{"nested":[[2,3,4],[7,8.5,9]]}`,
},
{
name: "select-in-combine-and-no",
query: `SELECT s.nested from s3object s WHERE (8.5 IN s.nested[*][*]) AND (s.id = 0)`,
wantResult: ``,
},
{
name: "select-in-nested-float-no-flat",
query: `SELECT s.nested from s3object s WHERE 8.5 IN s.nested[*]`,
wantResult: ``,
},
{
name: "select-empty-field-result",
query: `SELECT * from s3object s WHERE s.nested[0][0] = 2`,
wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`,
},
{
name: "select-arrays-specific",
query: `SELECT * from s3object s WHERE s.nested[1][0] = 7`,
wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`,
},
{
name: "wrong-index-no-result",
query: `SELECT * from s3object s WHERE s.nested[0][0] = 7`,
wantResult: ``,
},
{
name: "not-equal-result",
query: `SELECT * from s3object s WHERE s.nested[1][0] != 7`,
wantResult: `{"id":0,"title":"Test Record","desc":"Some text","synonyms":["foo","bar","whatever"]}
{"id":1,"title":"Second Record","desc":"another text","synonyms":["some","synonym","value"]}
{"id":2,"title":"Second Record","desc":"another text","numbers":[2,3,4]}`,
},
{
name: "indexed-list-match",
query: `SELECT * from s3object s WHERE (7,8.5,9) IN s.nested[1]`,
wantResult: ``,
},
{
name: "indexed-list-match-equals",
query: `SELECT * from s3object s WHERE (7,8.5,9) = s.nested[1]`,
wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`,
},
{
name: "indexed-list-match-equals-s-star",
query: `SELECT s.* from s3object s WHERE (7,8.5,9) = s.nested[1]`,
wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`,
},
{
name: "indexed-list-match-equals-s-index",
query: `SELECT s.nested[1], s.nested[0] from s3object s WHERE (7,8.5,9) = s.nested[1]`,
wantResult: `{"_1":[7,8.5,9],"_2":[2,3,4]}`,
},
{
name: "indexed-list-match-not-equals",
query: `SELECT * from s3object s WHERE (7,8.5,9) != s.nested[1]`,
wantResult: `{"id":0,"title":"Test Record","desc":"Some text","synonyms":["foo","bar","whatever"]}
{"id":1,"title":"Second Record","desc":"another text","synonyms":["some","synonym","value"]}
{"id":2,"title":"Second Record","desc":"another text","numbers":[2,3,4]}`,
},
{
name: "indexed-list-square-bracket",
query: `SELECT * from s3object s WHERE [7,8.5,9] = s.nested[1]`,
wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`,
},
{
name: "indexed-list-square-bracket",
query: `SELECT * from s3object s WHERE [7,8.5,9] IN s.nested`,
wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`,
},
{
name: "indexed-list-square-bracket",
query: `SELECT * from s3object s WHERE id IN [3,2]`,
wantResult: `{"id":2,"title":"Second Record","desc":"another text","numbers":[2,3,4]}
{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`,
},
{
name: "index-wildcard-in",
query: `SELECT * from s3object s WHERE (8.5) IN s.nested[1][*]`,
wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`,
},
{
name: "index-wildcard-in",
query: `SELECT * from s3object s WHERE (8.0+0.5) IN s.nested[1][*]`,
wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`,
},
{
name: "compare-mixed",
query: `SELECT id from s3object s WHERE value = true`,
wantResult: `{"id":1}`,
withJSON: `{"id":0, "value": false}
{"id":1, "value": true}
{"id":2, "value": 42}
{"id":3, "value": "true"}
`,
},
{
name: "compare-mixed-not",
query: `SELECT COUNT(id) as n from s3object s WHERE value != true`,
wantResult: `{"n":3}`,
withJSON: `{"id":0, "value": false}
{"id":1, "value": true}
{"id":2, "value": 42}
{"id":3, "value": "true"}
`,
},
{
name: "index-wildcard-in",
query: `SELECT * from s3object s WHERE title = 'Test Record'`,
wantResult: `{"id":0,"title":"Test Record","desc":"Some text","synonyms":["foo","bar","whatever"]}`,
},
{
name: "select-output-field-as-csv",
requestXML: []byte(`<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>SELECT s.synonyms from s3object s WHERE 'whatever' IN s.synonyms</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<JSON>
<Type>DOCUMENT</Type>
</JSON>
</InputSerialization>
<OutputSerialization>
<CSV>
<QuoteCharacter>"</QuoteCharacter>
</CSV>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>`),
wantResult: `"[""foo"",""bar"",""whatever""]"`,
},
{
name: "document",
query: "",
requestXML: []byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>select * from s3object[*].elements[*] s where s.element_type = '__elem__merfu'</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<JSON>
<Type>DOCUMENT</Type>
</JSON>
</InputSerialization>
<OutputSerialization>
<JSON>
</JSON>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>`),
withJSON: `
{
"name": "small_pdf1.pdf",
"lume_id": "9507193e-572d-4f95-bcf1-e9226d96be65",
"elements": [
{
"element_type": "__elem__image",
"element_id": "859d09c4-7cf1-4a37-9674-3a7de8b56abc",
"attributes": {
"__attr__image_dpi": 300,
"__attr__image_size": [
2550,
3299
],
"__attr__image_index": 1,
"__attr__image_format": "JPEG",
"__attr__file_extension": "jpg",
"__attr__data": null
}
},
{
"element_type": "__elem__merfu",
"element_id": "d868aefe-ef9a-4be2-b9b2-c9fd89cc43eb",
"attributes": {
"__attr__image_dpi": 300,
"__attr__image_size": [
2550,
3299
],
"__attr__image_index": 2,
"__attr__image_format": "JPEG",
"__attr__file_extension": "jpg",
"__attr__data": null
}
}
],
"data": "asdascasdc1234e123erdasdas"
}`,
wantResult: `{"element_type":"__elem__merfu","element_id":"d868aefe-ef9a-4be2-b9b2-c9fd89cc43eb","attributes":{"__attr__image_dpi":300,"__attr__image_size":[2550,3299],"__attr__image_index":2,"__attr__image_format":"JPEG","__attr__file_extension":"jpg","__attr__data":null}}`,
},
}
defRequest := `<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>%s</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<JSON>
<Type>LINES</Type>
</JSON>
</InputSerialization>
<OutputSerialization>
<JSON>
</JSON>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>`
for _, testCase := range testTable {
t.Run(testCase.name, func(t *testing.T) {
// Hack cpuid to the CPU doesn't appear to support AVX2.
// Restore whatever happens.
defer func(f cpuid.Flags) {
cpuid.CPU.Features = f
}(cpuid.CPU.Features)
cpuid.CPU.Features &= math.MaxUint64 - cpuid.AVX2
testReq := testCase.requestXML
if len(testReq) == 0 {
var escaped bytes.Buffer
xml.EscapeText(&escaped, []byte(testCase.query))
testReq = []byte(fmt.Sprintf(defRequest, escaped.String()))
}
s3Select, err := NewS3Select(bytes.NewReader(testReq))
if err != nil {
t.Fatal(err)
}
if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) {
in := input
if len(testCase.withJSON) > 0 {
in = testCase.withJSON
}
return ioutil.NopCloser(bytes.NewBufferString(in)), nil
}); err != nil {
t.Fatal(err)
}
w := &testResponseWriter{}
s3Select.Evaluate(w)
s3Select.Close()
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
if err != nil {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
if err != nil {
t.Error(err)
return
}
gotS := strings.TrimSpace(string(got))
if !reflect.DeepEqual(gotS, testCase.wantResult) {
t.Errorf("received response does not match with expected reply. Query: %s\ngot: %s\nwant:%s", testCase.query, gotS, testCase.wantResult)
}
})
t.Run("simd-"+testCase.name, func(t *testing.T) {
if !simdjson.SupportedCPU() {
t.Skip("No CPU support")
}
testReq := testCase.requestXML
if len(testReq) == 0 {
var escaped bytes.Buffer
xml.EscapeText(&escaped, []byte(testCase.query))
testReq = []byte(fmt.Sprintf(defRequest, escaped.String()))
}
s3Select, err := NewS3Select(bytes.NewReader(testReq))
if err != nil {
t.Fatal(err)
}
if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) {
in := input
if len(testCase.withJSON) > 0 {
in = testCase.withJSON
}
return ioutil.NopCloser(bytes.NewBufferString(in)), nil
}); err != nil {
t.Fatal(err)
}
w := &testResponseWriter{}
s3Select.Evaluate(w)
s3Select.Close()
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
if err != nil {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
if err != nil {
t.Error(err)
return
}
gotS := strings.TrimSpace(string(got))
if !reflect.DeepEqual(gotS, testCase.wantResult) {
t.Errorf("received response does not match with expected reply. Query: %s\ngot: %s\nwant:%s", testCase.query, gotS, testCase.wantResult)
}
})
}
}
func TestCSVQueries(t *testing.T) {
input := `index,ID,CaseNumber,Date,Day,Month,Year,Block,IUCR,PrimaryType,Description,LocationDescription,Arrest,Domestic,Beat,District,Ward,CommunityArea,FBI Code,XCoordinate,YCoordinate,UpdatedOn,Latitude,Longitude,Location
2700763,7732229,,2010-05-26 00:00:00,26,May,2010,113XX S HALSTED ST,1150,,CREDIT CARD FRAUD,,False,False,2233,22.0,34.0,,11,,,,41.688043288,-87.6422444,"(41.688043288, -87.6422444)"`
var testTable = []struct {
name string
query string
requestXML []byte
wantResult string
}{
{
name: "select-in-text-simple",
query: `SELECT index FROM s3Object s WHERE "Month"='May'`,
wantResult: `2700763`,
},
}
defRequest := `<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>%s</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<CSV>
<FieldDelimiter>,</FieldDelimiter>
<FileHeaderInfo>USE</FileHeaderInfo>
<QuoteCharacter>"</QuoteCharacter>
<QuoteEscapeCharacter>"</QuoteEscapeCharacter>
<RecordDelimiter>\n</RecordDelimiter>
</CSV>
</InputSerialization>
<OutputSerialization>
<CSV>
</CSV>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>`
for _, testCase := range testTable {
t.Run(testCase.name, func(t *testing.T) {
testReq := testCase.requestXML
if len(testReq) == 0 {
testReq = []byte(fmt.Sprintf(defRequest, testCase.query))
}
s3Select, err := NewS3Select(bytes.NewReader(testReq))
if err != nil {
t.Fatal(err)
}
if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewBufferString(input)), nil
}); err != nil {
t.Fatal(err)
}
w := &testResponseWriter{}
s3Select.Evaluate(w)
s3Select.Close()
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
if err != nil {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
if err != nil {
t.Error(err)
return
}
gotS := strings.TrimSpace(string(got))
if !reflect.DeepEqual(gotS, testCase.wantResult) {
t.Errorf("received response does not match with expected reply. Query: %s\ngot: %s\nwant:%s", testCase.query, gotS, testCase.wantResult)
}
})
}
}
func TestCSVQueries2(t *testing.T) {
input := `id,time,num,num2,text
1,2010-01-01T,7867786,4565.908123,"a text, with comma"
2,2017-01-02T03:04Z,-5, 0.765111,
`
var testTable = []struct {
name string
query string
requestXML []byte // override request XML
wantResult string
}{
{
name: "select-all",
query: `SELECT * from s3object AS s WHERE id = '1'`,
wantResult: `{"id":"1","time":"2010-01-01T","num":"7867786","num2":"4565.908123","text":"a text, with comma"}`,
},
{
name: "select-all-2",
query: `SELECT * from s3object s WHERE id = 2`,
wantResult: `{"id":"2","time":"2017-01-02T03:04Z","num":"-5","num2":" 0.765111","text":""}`,
},
{
name: "select-text-convert",
query: `SELECT CAST(text AS STRING) AS text from s3object s WHERE id = 1`,
wantResult: `{"text":"a text, with comma"}`,
},
{
name: "select-text-direct",
query: `SELECT text from s3object s WHERE id = 1`,
wantResult: `{"text":"a text, with comma"}`,
},
{
name: "select-time-direct",
query: `SELECT time from s3object s WHERE id = 2`,
wantResult: `{"time":"2017-01-02T03:04Z"}`,
},
{
name: "select-int-direct",
query: `SELECT num from s3object s WHERE id = 2`,
wantResult: `{"num":"-5"}`,
},
{
name: "select-float-direct",
query: `SELECT num2 from s3object s WHERE id = 2`,
wantResult: `{"num2":" 0.765111"}`,
},
{
name: "select-in-array",
query: `select id from S3Object s WHERE id in [1,3]`,
wantResult: `{"id":"1"}`,
},
{
name: "select-in-array-matchnone",
query: `select id from S3Object s WHERE s.id in [4,3]`,
wantResult: ``,
},
{
name: "select-float-by-val",
query: `SELECT num2 from s3object s WHERE num2 = 0.765111`,
wantResult: `{"num2":" 0.765111"}`,
},
}
defRequest := `<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>%s</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<CSV>
<FileHeaderInfo>USE</FileHeaderInfo>
<QuoteCharacter>"</QuoteCharacter>
</CSV>
</InputSerialization>
<OutputSerialization>
<JSON>
</JSON>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>`
for _, testCase := range testTable {
t.Run(testCase.name, func(t *testing.T) {
testReq := testCase.requestXML
if len(testReq) == 0 {
testReq = []byte(fmt.Sprintf(defRequest, testCase.query))
}
s3Select, err := NewS3Select(bytes.NewReader(testReq))
if err != nil {
t.Fatal(err)
}
if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewBufferString(input)), nil
}); err != nil {
t.Fatal(err)
}
w := &testResponseWriter{}
s3Select.Evaluate(w)
s3Select.Close()
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
if err != nil {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
if err != nil {
t.Error(err)
return
}
gotS := strings.TrimSpace(string(got))
if !reflect.DeepEqual(gotS, testCase.wantResult) {
t.Errorf("received response does not match with expected reply. Query: %s\ngot: %s\nwant:%s", testCase.query, gotS, testCase.wantResult)
}
})
}
}
func TestCSVInput(t *testing.T) {
var testTable = []struct {
requestXML []byte
expectedResult []byte
}{
{
[]byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>SELECT one, two, three from S3Object</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<CSV>
<FileHeaderInfo>USE</FileHeaderInfo>
</CSV>
</InputSerialization>
<OutputSerialization>
<CSV>
</CSV>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>
`), []byte{
0, 0, 0, 137, 0, 0, 0, 85, 194, 213, 168, 241, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 45, 49, 44, 102, 111, 111, 44, 116, 114, 117, 101, 10, 44, 98, 97, 114, 44, 102, 97, 108, 115, 101, 10, 50, 46, 53, 44, 98, 97, 122, 44, 116, 114, 117, 101, 10, 75, 182, 193, 80, 0, 0, 0, 235, 0, 0, 0, 67, 213, 243, 57, 141, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 53, 48, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 53, 48, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 51, 54, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 253, 105, 8, 216, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146,
},
},
{
[]byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>SELECT COUNT(*) AS total_record_count from S3Object</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<CSV>
<FileHeaderInfo>USE</FileHeaderInfo>
</CSV>
</InputSerialization>
<OutputSerialization>
<JSON>
</JSON>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>
`), []byte{
0, 0, 0, 126, 0, 0, 0, 85, 56, 193, 36, 188, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 123, 34, 116, 111, 116, 97, 108, 95, 114, 101, 99, 111, 114, 100, 95, 99, 111, 117, 110, 116, 34, 58, 51, 125, 10, 196, 183, 134, 242, 0, 0, 0, 235, 0, 0, 0, 67, 213, 243, 57, 141, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 53, 48, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 53, 48, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 50, 53, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 47, 153, 24, 28, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146,
},
},
{
[]byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>SELECT * from S3Object</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<CSV>
<FileHeaderInfo>USE</FileHeaderInfo>
</CSV>
</InputSerialization>
<OutputSerialization>
<JSON>
</JSON>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>
`), []byte{0x0, 0x0, 0x0, 0xdd, 0x0, 0x0, 0x0, 0x55, 0xf, 0x46, 0xc1, 0xfa, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xd, 0x3a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x18, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x63, 0x74, 0x65, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x7, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x7b, 0x22, 0x6f, 0x6e, 0x65, 0x22, 0x3a, 0x22, 0x2d, 0x31, 0x22, 0x2c, 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3a, 0x22, 0x66, 0x6f, 0x6f, 0x22, 0x2c, 0x22, 0x74, 0x68, 0x72, 0x65, 0x65, 0x22, 0x3a, 0x22, 0x74, 0x72, 0x75, 0x65, 0x22, 0x7d, 0xa, 0x7b, 0x22, 0x6f, 0x6e, 0x65, 0x22, 0x3a, 0x22, 0x22, 0x2c, 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3a, 0x22, 0x62, 0x61, 0x72, 0x22, 0x2c, 0x22, 0x74, 0x68, 0x72, 0x65, 0x65, 0x22, 0x3a, 0x22, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x22, 0x7d, 0xa, 0x7b, 0x22, 0x6f, 0x6e, 0x65, 0x22, 0x3a, 0x22, 0x32, 0x2e, 0x35, 0x22, 0x2c, 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3a, 0x22, 0x62, 0x61, 0x7a, 0x22, 0x2c, 0x22, 0x74, 0x68, 0x72, 0x65, 0x65, 0x22, 0x3a, 0x22, 0x74, 0x72, 0x75, 0x65, 0x22, 0x7d, 0xa, 0x7e, 0xb5, 0x99, 0xfb, 0x0, 0x0, 0x0, 0xec, 0x0, 0x0, 0x0, 0x43, 0x67, 0xd3, 0xe5, 0x9d, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xd, 0x3a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x8, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x78, 0x6d, 0x6c, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3c, 0x3f, 0x78, 0x6d, 0x6c, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x22, 0x31, 0x2e, 0x30, 0x22, 0x20, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x3d, 0x22, 0x55, 0x54, 0x46, 0x2d, 0x38, 0x22, 0x3f, 0x3e, 0x3c, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x3e, 0x35, 0x30, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x3e, 0x35, 0x30, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x3e, 0x31, 0x32, 0x30, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x3e, 0x3c, 0x2f, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3e, 0x5a, 0xe5, 0xd, 0x84, 0x0, 0x0, 0x0, 0x38, 0x0, 0x0, 0x0, 0x28, 0xc1, 0xc6, 0x84, 0xd4, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x3, 0x45, 0x6e, 0x64, 0xcf, 0x97, 0xd3, 0x92},
},
{
[]byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>SELECT one from S3Object limit 1</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<CSV>
<FileHeaderInfo>USE</FileHeaderInfo>
</CSV>
</InputSerialization>
<OutputSerialization>
<CSV>
</CSV>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>
`), []byte{
0x0, 0x0, 0x0, 0x68, 0x0, 0x0, 0x0, 0x55, 0xd7, 0x61, 0x46, 0x9e, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xd, 0x3a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x18, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x63, 0x74, 0x65, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x7, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x2d, 0x31, 0xa, 0x17, 0xfb, 0x1, 0x90, 0x0, 0x0, 0x0, 0xea, 0x0, 0x0, 0x0, 0x43, 0xe8, 0x93, 0x10, 0x3d, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xd, 0x3a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x8, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x78, 0x6d, 0x6c, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3c, 0x3f, 0x78, 0x6d, 0x6c, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x22, 0x31, 0x2e, 0x30, 0x22, 0x20, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x3d, 0x22, 0x55, 0x54, 0x46, 0x2d, 0x38, 0x22, 0x3f, 0x3e, 0x3c, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x3e, 0x35, 0x30, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x3e, 0x35, 0x30, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x3e, 0x33, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x3e, 0x3c, 0x2f, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3e, 0x15, 0x72, 0x19, 0x94, 0x0, 0x0, 0x0, 0x38, 0x0, 0x0, 0x0, 0x28, 0xc1, 0xc6, 0x84, 0xd4, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x3, 0x45, 0x6e, 0x64, 0xcf, 0x97, 0xd3, 0x92,
},
},
}
var csvData = []byte(`one,two,three
-1,foo,true
,bar,false
2.5,baz,true
`)
for i, testCase := range testTable {
t.Run(fmt.Sprint(i), func(t *testing.T) {
s3Select, err := NewS3Select(bytes.NewReader(testCase.requestXML))
if err != nil {
t.Fatal(err)
}
if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(csvData)), nil
}); err != nil {
t.Fatal(err)
}
w := &testResponseWriter{}
s3Select.Evaluate(w)
s3Select.Close()
if !reflect.DeepEqual(w.response, testCase.expectedResult) {
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
if err != nil {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
if err != nil {
t.Error(err)
return
}
t.Errorf("received response does not match with expected reply\ngot: %#v\nwant:%#v\ndecoded:%s", w.response, testCase.expectedResult, string(got))
}
})
}
}
func TestJSONInput(t *testing.T) {
var testTable = []struct {
requestXML []byte
expectedResult []byte
}{
{
[]byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>SELECT one, two, three from S3Object</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<JSON>
<Type>DOCUMENT</Type>
</JSON>
</InputSerialization>
<OutputSerialization>
<CSV>
</CSV>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>
`), []byte{
0, 0, 0, 137, 0, 0, 0, 85, 194, 213, 168, 241, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 45, 49, 44, 102, 111, 111, 44, 116, 114, 117, 101, 10, 44, 98, 97, 114, 44, 102, 97, 108, 115, 101, 10, 50, 46, 53, 44, 98, 97, 122, 44, 116, 114, 117, 101, 10, 75, 182, 193, 80, 0, 0, 0, 237, 0, 0, 0, 67, 90, 179, 204, 45, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 49, 49, 50, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 49, 49, 50, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 51, 54, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 181, 40, 50, 250, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146,
},
},
{
[]byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>SELECT COUNT(*) AS total_record_count from S3Object</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<JSON>
<Type>DOCUMENT</Type>
</JSON>
</InputSerialization>
<OutputSerialization>
<CSV>
</CSV>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>
`), []byte{
0, 0, 0, 103, 0, 0, 0, 85, 85, 49, 209, 79, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 51, 10, 175, 58, 213, 152, 0, 0, 0, 236, 0, 0, 0, 67, 103, 211, 229, 157, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 49, 49, 50, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 49, 49, 50, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 50, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 52, 192, 77, 114, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146,
},
},
{
[]byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>SELECT * from S3Object</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<JSON>
<Type>DOCUMENT</Type>
</JSON>
</InputSerialization>
<OutputSerialization>
<CSV>
</CSV>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>
`), []byte{0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x55, 0xc2, 0xd5, 0xa8, 0xf1, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xd, 0x3a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x18, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x63, 0x74, 0x65, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x7, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x74, 0x72, 0x75, 0x65, 0x2c, 0x66, 0x6f, 0x6f, 0x2c, 0x2d, 0x31, 0xa, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x2c, 0x62, 0x61, 0x72, 0x2c, 0xa, 0x74, 0x72, 0x75, 0x65, 0x2c, 0x62, 0x61, 0x7a, 0x2c, 0x32, 0x2e, 0x35, 0xa, 0xef, 0x22, 0x13, 0xa3, 0x0, 0x0, 0x0, 0xed, 0x0, 0x0, 0x0, 0x43, 0x5a, 0xb3, 0xcc, 0x2d, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xd, 0x3a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x8, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x78, 0x6d, 0x6c, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3c, 0x3f, 0x78, 0x6d, 0x6c, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x22, 0x31, 0x2e, 0x30, 0x22, 0x20, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x3d, 0x22, 0x55, 0x54, 0x46, 0x2d, 0x38, 0x22, 0x3f, 0x3e, 0x3c, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x3e, 0x31, 0x31, 0x32, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x3e, 0x31, 0x31, 0x32, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x3e, 0x33, 0x36, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x3e, 0x3c, 0x2f, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3e, 0xb5, 0x28, 0x32, 0xfa, 0x0, 0x0, 0x0, 0x38, 0x0, 0x0, 0x0, 0x28, 0xc1, 0xc6, 0x84, 0xd4, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x3, 0x45, 0x6e, 0x64, 0xcf, 0x97, 0xd3, 0x92},
},
}
var jsonData = []byte(`{"three":true,"two":"foo","one":-1}
{"three":false,"two":"bar","one":null}
{"three":true,"two":"baz","one":2.5}
`)
for i, testCase := range testTable {
t.Run(fmt.Sprint(i), func(t *testing.T) {
s3Select, err := NewS3Select(bytes.NewReader(testCase.requestXML))
if err != nil {
t.Fatal(err)
}
if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(jsonData)), nil
}); err != nil {
t.Fatal(err)
}
w := &testResponseWriter{}
s3Select.Evaluate(w)
s3Select.Close()
if !reflect.DeepEqual(w.response, testCase.expectedResult) {
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
if err != nil {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
if err != nil {
t.Error(err)
return
}
t.Errorf("received response does not match with expected reply\ngot: %#v\nwant:%#v\ndecoded:%s", w.response, testCase.expectedResult, string(got))
}
})
}
}
func TestParquetInput(t *testing.T) {
os.Setenv("MINIO_API_SELECT_PARQUET", "on")
defer os.Setenv("MINIO_API_SELECT_PARQUET", "off")
var testTable = []struct {
requestXML []byte
expectedResult []byte
}{
{
[]byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>SELECT one, two, three from S3Object</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<Parquet>
</Parquet>
</InputSerialization>
<OutputSerialization>
<CSV>
</CSV>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>
`), []byte{
0, 0, 0, 137, 0, 0, 0, 85, 194, 213, 168, 241, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 45, 49, 44, 102, 111, 111, 44, 116, 114, 117, 101, 10, 44, 98, 97, 114, 44, 102, 97, 108, 115, 101, 10, 50, 46, 53, 44, 98, 97, 122, 44, 116, 114, 117, 101, 10, 75, 182, 193, 80, 0, 0, 0, 235, 0, 0, 0, 67, 213, 243, 57, 141, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 45, 49, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 45, 49, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 51, 54, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 128, 96, 253, 66, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146,
},
},
{
[]byte(`
<?xml version="1.0" encoding="UTF-8"?>
<SelectObjectContentRequest>
<Expression>SELECT COUNT(*) AS total_record_count from S3Object</Expression>
<ExpressionType>SQL</ExpressionType>
<InputSerialization>
<CompressionType>NONE</CompressionType>
<Parquet>
</Parquet>
</InputSerialization>
<OutputSerialization>
<CSV>
</CSV>
</OutputSerialization>
<RequestProgress>
<Enabled>FALSE</Enabled>
</RequestProgress>
</SelectObjectContentRequest>
`), []byte{
0, 0, 0, 103, 0, 0, 0, 85, 85, 49, 209, 79, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 51, 10, 175, 58, 213, 152, 0, 0, 0, 234, 0, 0, 0, 67, 232, 147, 16, 61, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 45, 49, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 45, 49, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 50, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 190, 146, 162, 21, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146,
},
},
}
for i, testCase := range testTable {
t.Run(fmt.Sprint(i), func(t *testing.T) {
getReader := func(offset int64, length int64) (io.ReadCloser, error) {
testdataFile := "testdata.parquet"
file, err := os.Open(testdataFile)
if err != nil {
return nil, err
}
fi, err := file.Stat()
if err != nil {
return nil, err
}
if offset < 0 {
offset = fi.Size() + offset
}
if _, err = file.Seek(offset, io.SeekStart); err != nil {
return nil, err
}
return file, nil
}
s3Select, err := NewS3Select(bytes.NewReader(testCase.requestXML))
if err != nil {
t.Fatal(err)
}
if err = s3Select.Open(getReader); err != nil {
t.Fatal(err)
}
w := &testResponseWriter{}
s3Select.Evaluate(w)
s3Select.Close()
if !reflect.DeepEqual(w.response, testCase.expectedResult) {
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
if err != nil {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
if err != nil {
t.Error(err)
return
}
t.Errorf("received response does not match with expected reply\ngot: %#v\nwant:%#v\ndecoded:%s", w.response, testCase.expectedResult, string(got))
}
})
}
}
| pkg/s3select/select_test.go | 0 | https://github.com/minio/minio/commit/4593b146bec40cc062fe921f2d47ca4c0ab98b9a | [
0.002058310667052865,
0.00020104885334149003,
0.0001621696283109486,
0.00017139935516752303,
0.00020144438894931227
] |
{
"id": 0,
"code_window": [
"\tfor i := range aggCols {\n",
"\t\taggCols[i] = uint32(numGroupCol + i)\n",
"\t}\n",
"\ttc := aggregatorTestCase{\n",
"\t\ttyps: typs,\n",
"\t\tgroupCols: groupCols,\n",
"\t\taggCols: [][]uint32{aggCols},\n",
"\t\taggFns: []execinfrapb.AggregatorSpec_Func{aggFn},\n",
"\t}\n",
"\tif distinctProb > 0 {\n",
"\t\tif !typs[0].Identical(types.Int) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\ttyps: typs,\n",
"\t\tgroupCols: groupCols,\n",
"\t\taggCols: [][]uint32{aggCols},\n",
"\t\taggFns: []execinfrapb.AggregatorSpec_Func{aggFn},\n",
"\t\tunorderedInput: agg.order == unordered,\n"
],
"file_path": "pkg/sql/colexec/aggregators_test.go",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"fmt"
"math"
"testing"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/col/coldatatestutils"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecagg"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/cockroach/pkg/util/timeofday"
"github.com/stretchr/testify/require"
)
type aggregatorTestCase struct {
name string
typs []*types.T
input colexectestutils.Tuples
groupCols []uint32
aggCols [][]uint32
aggFns []execinfrapb.AggregatorSpec_Func
expected colexectestutils.Tuples
constArguments [][]execinfrapb.Expression
// spec will be populated during init().
spec *execinfrapb.AggregatorSpec
aggDistinct []bool
aggFilter []int
unorderedInput bool
orderedCols []uint32
// convToDecimal will convert any float64s to apd.Decimals. If a string is
// encountered, a best effort is made to convert that string to an
// apd.Decimal.
convToDecimal bool
}
type ordering int64
const (
ordered ordering = iota
partial
unordered
)
// aggType is a helper struct that allows tests to test both the ordered and
// hash aggregators at the same time.
type aggType struct {
new func(*colexecagg.NewAggregatorArgs) (colexecop.ResettableOperator, error)
name string
order ordering
}
var aggTypesWithPartial = []aggType{
{
// This is a wrapper around NewHashAggregator so its signature is
// compatible with NewOrderedAggregator.
new: func(args *colexecagg.NewAggregatorArgs) (colexecop.ResettableOperator, error) {
return NewHashAggregator(args, nil /* newSpillingQueueArgs */, testAllocator, math.MaxInt64)
},
name: "hash",
order: unordered,
},
{
new: NewOrderedAggregator,
name: "ordered",
order: ordered,
},
{
// This is a wrapper around NewHashAggregator so its signature is
// compatible with NewOrderedAggregator.
new: func(args *colexecagg.NewAggregatorArgs) (colexecop.ResettableOperator, error) {
return NewHashAggregator(args, nil /* newSpillingQueueArgs */, testAllocator, math.MaxInt64)
},
name: "hash-partial-order",
order: partial,
},
}
var aggTypes = aggTypesWithPartial[:1]
func (tc *aggregatorTestCase) init() error {
if tc.convToDecimal {
for _, tuples := range []colexectestutils.Tuples{tc.input, tc.expected} {
for _, tuple := range tuples {
for i, e := range tuple {
switch v := e.(type) {
case float64:
d := &apd.Decimal{}
d, err := d.SetFloat64(v)
if err != nil {
return err
}
tuple[i] = *d
case string:
d := &apd.Decimal{}
d, _, err := d.SetString(v)
if err != nil {
// If there was an error converting the string to decimal, just
// leave the datum as is.
continue
}
tuple[i] = *d
}
}
}
}
}
aggregations := make([]execinfrapb.AggregatorSpec_Aggregation, len(tc.aggFns))
for i, aggFn := range tc.aggFns {
aggregations[i].Func = aggFn
aggregations[i].ColIdx = tc.aggCols[i]
if tc.constArguments != nil {
aggregations[i].Arguments = tc.constArguments[i]
}
if tc.aggDistinct != nil {
aggregations[i].Distinct = tc.aggDistinct[i]
}
if tc.aggFilter != nil && tc.aggFilter[i] != tree.NoColumnIdx {
filterColIdx := uint32(tc.aggFilter[i])
aggregations[i].FilterColIdx = &filterColIdx
}
}
tc.spec = &execinfrapb.AggregatorSpec{
GroupCols: tc.groupCols,
Aggregations: aggregations,
}
if !tc.unorderedInput {
var outputOrderCols []uint32
if len(tc.orderedCols) == 0 {
outputOrderCols = tc.spec.GroupCols
} else {
outputOrderCols = tc.orderedCols
tc.spec.OrderedGroupCols = tc.orderedCols
}
// If input grouping columns have an ordering, then we'll require the
// output to also have the same ordering.
outputOrdering := execinfrapb.Ordering{Columns: make([]execinfrapb.Ordering_Column, len(outputOrderCols))}
for i, col := range outputOrderCols {
outputOrdering.Columns[i].ColIdx = col
}
tc.spec.OutputOrdering = outputOrdering
}
return nil
}
var aggregatorsTestCases = []aggregatorTestCase{
{
name: "OneTuple",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 1},
},
},
{
name: "OneGroup",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 1},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 2},
},
},
{
name: "MultiGroup",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 0},
{0, 1},
{1, 4},
{2, 5},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 2},
{1, 4},
{2, 5},
},
},
{
name: "CarryBetweenInputBatches",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 2},
{0, 3},
{1, 4},
{1, 5},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 6},
{1, 9},
},
},
{
name: "CarryBetweenOutputBatches",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 2},
{0, 3},
{0, 4},
{1, 5},
{2, 6},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 10},
{1, 5},
{2, 6},
},
},
{
name: "CarryBetweenInputAndOutputBatches",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 1},
{1, 2},
{2, 3},
{2, 3},
{3, 4},
{3, 4},
{4, 5},
{5, 6},
{6, 7},
{7, 8},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 2},
{1, 2},
{2, 6},
{3, 8},
{4, 5},
{5, 6},
{6, 7},
{7, 8},
},
},
{
name: "NoGroupingCols",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 2},
{0, 3},
{0, 4},
},
groupCols: []uint32{},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 10},
},
},
{
name: "UnorderedWithNullsInGroupingCol",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{nil, 1},
{4, 42},
{nil, 2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{nil, 3},
{4, 42},
},
unorderedInput: true,
},
{
name: "CountRows",
typs: types.OneIntCol,
input: colexectestutils.Tuples{
{1},
{2},
{1},
{nil},
{3},
{1},
{3},
{4},
{1},
{nil},
{2},
{4},
{2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
},
expected: colexectestutils.Tuples{
{nil, 2},
{1, 4},
{2, 3},
{3, 2},
{4, 2},
},
unorderedInput: true,
},
{
name: "OutputOrder",
typs: types.ThreeIntCols,
input: colexectestutils.Tuples{
{0, 1, 2},
{0, 1, 2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {2}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 4, 2},
},
},
{
name: "SumMultiType",
typs: []*types.T{types.Int, types.Int, types.Decimal},
input: colexectestutils.Tuples{
{0, 1, 1.3},
{0, 1, 1.6},
{0, 1, 0.5},
{1, 1, 1.2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {2}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Sum,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 3.4, 3},
{1, 1.2, 1},
},
convToDecimal: true,
},
{
name: "AvgSumSingleInputBatch",
typs: []*types.T{types.Int, types.Decimal},
input: colexectestutils.Tuples{
{0, 1.1},
{0, 1.2},
{0, 2.3},
{1, 6.21},
{1, 2.43},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Avg,
execinfrapb.Sum,
},
expected: colexectestutils.Tuples{
{0, "1.5333333333333333333", 4.6},
{1, 4.32, 8.64},
},
convToDecimal: true,
},
{
name: "BoolAndOrBatch",
typs: []*types.T{types.Int, types.Bool},
input: colexectestutils.Tuples{
{0, true},
{1, false},
{2, true},
{2, false},
{3, true},
{3, true},
{4, false},
{4, false},
{5, false},
{5, nil},
{6, nil},
{6, true},
{7, nil},
{7, false},
{7, true},
{8, nil},
{8, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.BoolAnd,
execinfrapb.BoolOr,
},
expected: colexectestutils.Tuples{
{0, true, true},
{1, false, false},
{2, false, true},
{3, true, true},
{4, false, false},
{5, false, false},
{6, true, true},
{7, false, true},
{8, nil, nil},
},
},
{
name: "MultiGroupColsWithPointerTypes",
typs: []*types.T{types.Int, types.Decimal, types.Bytes, types.Decimal},
input: colexectestutils.Tuples{
{2, 1.0, "1.0", 2.0},
{2, 1.0, "1.0", 4.0},
{2, 2.0, "2.0", 6.0},
},
groupCols: []uint32{0, 1, 2},
aggCols: [][]uint32{{0}, {1}, {2}, {3}, {3}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.AnyNotNull,
execinfrapb.AnyNotNull,
execinfrapb.Min,
execinfrapb.Sum,
},
expected: colexectestutils.Tuples{
{2, 1.0, "1.0", 2.0, 6.0},
{2, 2.0, "2.0", 6.0, 6.0},
},
},
{
name: "GroupOnTimeTZColumns",
typs: []*types.T{types.TimeTZ, types.Int},
input: colexectestutils.Tuples{
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 0), -1},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 1), 1},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 1), 2},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 2), 10},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 2), 11},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 3), 100},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 3), 101},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 4), 102},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 0), -1},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 1), 3},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 2), 21},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 3), 201},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 4), 102},
},
},
{
name: "AVG on all types",
typs: []*types.T{types.Int, types.Int2, types.Int4, types.Int, types.Decimal, types.Float, types.Interval},
input: colexectestutils.Tuples{
{0, nil, 1, 1, 1.0, 1.0, duration.MakeDuration(1, 1, 1)},
{0, 1, nil, 2, 2.0, 2.0, duration.MakeDuration(2, 2, 2)},
{0, 2, 2, nil, 3.0, 3.0, duration.MakeDuration(3, 3, 3)},
{0, 3, 3, 3, nil, 4.0, duration.MakeDuration(4, 4, 4)},
{0, 4, 4, 4, 4.0, nil, duration.MakeDuration(5, 5, 5)},
{0, 5, 5, 5, 5.0, 5.0, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {2}, {3}, {4}, {5}, {6}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Avg,
execinfrapb.Avg,
execinfrapb.Avg,
execinfrapb.Avg,
execinfrapb.Avg,
execinfrapb.Avg,
},
expected: colexectestutils.Tuples{
{0, 3.0, 3.0, 3.0, 3.0, 3.0, duration.MakeDuration(3, 3, 3)},
},
},
{
name: "ConcatAgg",
typs: []*types.T{types.Int, types.Bytes},
input: colexectestutils.Tuples{
{1, "1"},
{1, "2"},
{1, "3"},
{2, nil},
{2, "1"},
{2, "2"},
{3, "1"},
{3, nil},
{3, "2"},
{4, nil},
{4, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.ConcatAgg,
},
expected: colexectestutils.Tuples{
{1, "123"},
{2, "12"},
{3, "12"},
{4, nil},
},
},
{
name: "All",
typs: []*types.T{types.Int, types.Decimal, types.Int, types.Bool, types.Bytes},
input: colexectestutils.Tuples{
{0, 3.1, 2, true, "zero"},
{0, 1.1, 3, false, "zero"},
{1, 1.1, 1, false, "one"},
{1, 4.1, 0, false, "one"},
{2, 1.1, 1, true, "two"},
{3, 4.1, 0, false, "three"},
{3, 5.1, 0, true, "three"},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}, {1}, {1}, {1}, {2}, {2}, {2}, {3}, {3}, {4}, {4}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
execinfrapb.Avg,
execinfrapb.Count,
execinfrapb.Sum,
execinfrapb.SumInt,
execinfrapb.Min,
execinfrapb.Max,
execinfrapb.BoolAnd,
execinfrapb.BoolOr,
execinfrapb.AnyNotNull,
execinfrapb.ConcatAgg,
},
expected: colexectestutils.Tuples{
{0, 2, 2.1, 2, 4.2, 5, 2, 3, false, true, "zero", "zerozero"},
{1, 2, 2.6, 2, 5.2, 1, 0, 1, false, false, "one", "oneone"},
{2, 1, 1.1, 1, 1.1, 1, 1, 1, true, true, "two", "two"},
{3, 2, 4.6, 2, 9.2, 0, 0, 0, false, true, "three", "threethree"},
},
convToDecimal: true,
},
{
name: "NullHandling",
typs: []*types.T{types.Int, types.Decimal, types.Int, types.Bool, types.Bytes},
input: colexectestutils.Tuples{
{nil, 1.1, 4, true, "a"},
{0, nil, nil, nil, nil},
{0, 3.1, 5, nil, "b"},
{1, nil, nil, nil, nil},
{1, nil, nil, false, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}, {1}, {1}, {1}, {1}, {2}, {2}, {2}, {3}, {3}, {4}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
execinfrapb.AnyNotNull,
execinfrapb.Count,
execinfrapb.Sum,
execinfrapb.Avg,
execinfrapb.SumInt,
execinfrapb.Min,
execinfrapb.Max,
execinfrapb.BoolAnd,
execinfrapb.BoolOr,
execinfrapb.ConcatAgg,
},
expected: colexectestutils.Tuples{
{nil, 1, 1.1, 1, 1.1, 1.1, 4, 4, 4, true, true, "a"},
{0, 2, 3.1, 1, 3.1, 3.1, 5, 5, 5, nil, nil, "b"},
{1, 2, nil, 0, nil, nil, nil, nil, nil, false, false, nil},
},
convToDecimal: true,
},
{
name: "DistinctAggregation",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 2},
{0, 2},
{0, nil},
{0, 1},
{0, nil},
{1, 1},
{1, 2},
{1, 2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {1}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Count,
execinfrapb.Count,
execinfrapb.SumInt,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 4, 2, 6, 3},
{1, 3, 2, 5, 3},
},
aggDistinct: []bool{false, false, true, false, true},
},
{
name: "FilteringAggregation",
typs: []*types.T{types.Int, types.Int, types.Bool},
input: colexectestutils.Tuples{
{0, 1, false},
{0, 2, true},
{0, 2, true},
{0, nil, nil},
{0, 1, nil},
{0, nil, true},
{1, 1, true},
{1, 2, nil},
{1, 2, true},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 3, 4},
{1, 2, 3},
},
aggFilter: []int{tree.NoColumnIdx, 2, 2},
},
{
name: "AllGroupsFilteredOut",
typs: []*types.T{types.Int, types.Int, types.Bool},
input: colexectestutils.Tuples{
{0, 1, false},
{0, nil, nil},
{0, 2, false},
{1, 1, true},
{1, 2, nil},
{1, 2, true},
{2, 1, false},
{2, nil, nil},
{2, 2, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 0, nil},
{1, 2, 3},
{2, 0, nil},
},
aggFilter: []int{tree.NoColumnIdx, 2, 2},
},
{
name: "DistinctFilteringAggregation",
typs: []*types.T{types.Int, types.Int, types.Bool},
input: colexectestutils.Tuples{
{0, 1, false},
{0, 2, true},
{0, 2, true},
{0, nil, nil},
{0, 1, nil},
{0, nil, true},
{1, 1, true},
{1, 2, nil},
{1, 2, true},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {1}, {1}, {1}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Count,
execinfrapb.Count,
execinfrapb.Count,
execinfrapb.SumInt,
execinfrapb.SumInt,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 2, 2, 1, 4, 3, 2},
{1, 2, 2, 2, 3, 3, 3},
},
aggDistinct: []bool{false, false, true, true, false, true, true},
aggFilter: []int{tree.NoColumnIdx, 2, tree.NoColumnIdx, 2, 2, tree.NoColumnIdx, 2},
},
}
func init() {
for i := range aggregatorsTestCases {
if err := aggregatorsTestCases[i].init(); err != nil {
colexecerror.InternalError(err)
}
}
}
func TestAggregators(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
evalCtx := tree.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())
defer evalCtx.Stop(context.Background())
ctx := context.Background()
for _, tc := range aggregatorsTestCases {
constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(
&evalCtx, nil /* semaCtx */, tc.spec.Aggregations, tc.typs,
)
require.NoError(t, err)
for _, agg := range aggTypes {
if tc.unorderedInput && agg.order == ordered {
// This test case has unordered input, so we skip ordered
// aggregator.
continue
}
if agg.order == ordered && tc.aggFilter != nil {
// Filtering aggregation is only supported with hash aggregator.
continue
}
log.Infof(ctx, "%s/%s", tc.name, agg.name)
verifier := colexectestutils.OrderedVerifier
if tc.unorderedInput {
verifier = colexectestutils.UnorderedVerifier
}
colexectestutils.RunTestsWithTyps(t, testAllocator, []colexectestutils.Tuples{tc.input}, [][]*types.T{tc.typs}, tc.expected, verifier,
func(input []colexecop.Operator) (colexecop.Operator, error) {
return agg.new(&colexecagg.NewAggregatorArgs{
Allocator: testAllocator,
MemAccount: testMemAcc,
Input: input[0],
InputTypes: tc.typs,
Spec: tc.spec,
EvalCtx: &evalCtx,
Constructors: constructors,
ConstArguments: constArguments,
OutputTypes: outputTypes,
})
})
}
}
}
func TestAggregatorRandom(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
evalCtx := tree.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())
defer evalCtx.Stop(context.Background())
// This test aggregates random inputs, keeping track of the expected results
// to make sure the aggregations are correct.
rng, _ := randutil.NewTestRand()
for _, groupSize := range []int{1, 2, coldata.BatchSize() / 4, coldata.BatchSize() / 2} {
if groupSize == 0 {
// We might be varying coldata.BatchSize() so that when it is divided by
// 4, groupSize is 0. We want to skip such configuration.
continue
}
for _, numInputBatches := range []int{1, 2, 64} {
for _, hasNulls := range []bool{true, false} {
for _, agg := range aggTypesWithPartial {
log.Infof(context.Background(), "%s/groupSize=%d/numInputBatches=%d/hasNulls=%t", agg.name, groupSize, numInputBatches, hasNulls)
nTuples := coldata.BatchSize() * numInputBatches
typs := []*types.T{types.Int, types.Float}
cols := []coldata.Vec{
testAllocator.NewMemColumn(typs[0], nTuples),
testAllocator.NewMemColumn(typs[1], nTuples),
}
if agg.order == partial {
typs = append(typs, types.Int)
cols = append(cols, testAllocator.NewMemColumn(typs[2], nTuples))
}
groups, aggCol, aggColNulls := cols[0].Int64(), cols[1].Float64(), cols[1].Nulls()
expectedTuples := colexectestutils.Tuples{}
var expRowCounts, expCounts []int64
var expSums, expMins, expMaxs []float64
// SUM, MIN, MAX, and AVG aggregators can output null.
var expNulls []bool
curGroup := -1
for i := range groups {
if i%groupSize == 0 {
if curGroup != -1 {
if expNulls[curGroup] {
expectedTuples = append(expectedTuples, colexectestutils.Tuple{
expRowCounts[curGroup], expCounts[curGroup], nil, nil, nil, nil,
})
} else {
expectedTuples = append(expectedTuples, colexectestutils.Tuple{
expRowCounts[curGroup], expCounts[curGroup], expSums[curGroup], expMins[curGroup], expMaxs[curGroup], expSums[curGroup] / float64(expCounts[curGroup]),
})
}
}
expRowCounts = append(expRowCounts, 0)
expCounts = append(expCounts, 0)
expSums = append(expSums, 0)
expMins = append(expMins, 2048)
expMaxs = append(expMaxs, -2048)
expNulls = append(expNulls, true)
curGroup++
}
// Keep the inputs small so they are a realistic size. Using a
// large range is not realistic and makes decimal operations
// slower.
aggCol[i] = 2048 * (rng.Float64() - 0.5)
// NULL values contribute to the row count, so we're updating
// the row counts outside of the if block.
expRowCounts[curGroup]++
if hasNulls && rng.Float64() < nullProbability {
aggColNulls.SetNull(i)
} else {
expNulls[curGroup] = false
expCounts[curGroup]++
expSums[curGroup] += aggCol[i]
expMins[curGroup] = min64(aggCol[i], expMins[curGroup])
expMaxs[curGroup] = max64(aggCol[i], expMaxs[curGroup])
}
groups[i] = int64(curGroup)
}
// Add result for last group.
if expNulls[curGroup] {
expectedTuples = append(expectedTuples, colexectestutils.Tuple{
expRowCounts[curGroup], expCounts[curGroup], nil, nil, nil, nil,
})
} else {
expectedTuples = append(expectedTuples, colexectestutils.Tuple{
expRowCounts[curGroup], expCounts[curGroup], expSums[curGroup], expMins[curGroup], expMaxs[curGroup], expSums[curGroup] / float64(expCounts[curGroup]),
})
}
source := colexectestutils.NewChunkingBatchSource(testAllocator, typs, cols, nTuples)
tc := aggregatorTestCase{
typs: typs,
groupCols: []uint32{0},
aggCols: [][]uint32{{}, {1}, {1}, {1}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.CountRows,
execinfrapb.Count,
execinfrapb.Sum,
execinfrapb.Min,
execinfrapb.Max,
execinfrapb.Avg,
},
}
if agg.order == partial {
tc.groupCols = []uint32{0, 2}
tc.orderedCols = []uint32{0}
}
require.NoError(t, tc.init())
constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(
&evalCtx, nil /* semaCtx */, tc.spec.Aggregations, tc.typs,
)
require.NoError(t, err)
a, err := agg.new(&colexecagg.NewAggregatorArgs{
Allocator: testAllocator,
MemAccount: testMemAcc,
Input: source,
InputTypes: tc.typs,
Spec: tc.spec,
EvalCtx: &evalCtx,
Constructors: constructors,
ConstArguments: constArguments,
OutputTypes: outputTypes,
})
if err != nil {
t.Fatal(err)
}
a.Init(context.Background())
testOutput := colexectestutils.NewOpTestOutput(a, expectedTuples)
if agg.order == ordered {
err = testOutput.Verify()
} else if agg.order == partial {
err = testOutput.VerifyPartialOrder()
} else {
err = testOutput.VerifyAnyOrder()
}
if err != nil {
t.Fatal(err)
}
}
}
}
}
}
// benchmarkAggregateFunction runs aggregator microbenchmarks. numGroupCol is
// the number of grouping columns. groupSize is the number of tuples to target
// in each distinct aggregation group. chunkSize is the number of tuples to
// target in each distinct partially ordered group column, and is intended for
// use with partial order. Limit is the number of rows to retrieve from the
// aggregation function before ending the microbenchmark.
func benchmarkAggregateFunction(
b *testing.B,
agg aggType,
aggFn execinfrapb.AggregatorSpec_Func,
aggInputTypes []*types.T,
numGroupCol int,
groupSize int,
distinctProb float64,
numInputRows int,
chunkSize int,
limit int,
) {
defer log.Scope(b).Close(b)
if groupSize > numInputRows {
// In this case all tuples will be part of the same group, and we have
// likely already benchmarked such scenario with this value of
// numInputRows, so we short-circuit.
return
}
if numGroupCol < 1 {
// We should always have at least one group column.
return
}
if agg.order == partial {
if chunkSize > numInputRows || groupSize > chunkSize {
return
}
}
rng, _ := randutil.NewTestRand()
ctx := context.Background()
evalCtx := tree.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())
defer evalCtx.Stop(ctx)
aggMemAcc := evalCtx.Mon.MakeBoundAccount()
defer aggMemAcc.Close(ctx)
evalCtx.SingleDatumAggMemAccount = &aggMemAcc
const bytesFixedLength = 8
typs := []*types.T{types.Int}
groupCols := []uint32{0}
for g := 1; g < numGroupCol; g++ {
typs = append(typs, types.Int)
groupCols = append(groupCols, uint32(g))
}
typs = append(typs, aggInputTypes...)
cols := make([]coldata.Vec, len(typs))
for i := range typs {
cols[i] = testAllocator.NewMemColumn(typs[i], numInputRows)
}
groups := cols[0].Int64()
if agg.order == ordered {
curGroup := -1
for i := 0; i < numInputRows; i++ {
if i%groupSize == 0 {
curGroup++
}
groups[i] = int64(curGroup)
}
} else if agg.order == unordered {
numGroups := numInputRows / groupSize
for i := 0; i < numInputRows; i++ {
groups[i] = int64(rng.Intn(numGroups))
}
} else {
// partial order.
chunks := cols[0].Int64()
groups = cols[1].Int64()
curChunk := -1
numGroups := chunkSize / groupSize
for i := 0; i < numInputRows; i++ {
if i%chunkSize == 0 {
curChunk++
}
chunks[i] = int64(curChunk)
groups[i] = int64(rng.Intn(numGroups))
}
}
for _, col := range cols[numGroupCol:] {
coldatatestutils.RandomVec(coldatatestutils.RandomVecArgs{
Rand: rng,
Vec: col,
N: numInputRows,
NullProbability: 0,
BytesFixedLength: bytesFixedLength,
})
}
if aggFn == execinfrapb.SumInt {
// Integer summation of random Int64 values can lead
// to overflow, and we will panic. To go around it, we
// restrict the range of values.
vals := cols[numGroupCol].Int64()
for i := range vals {
vals[i] = vals[i] % 1024
}
}
source := colexectestutils.NewChunkingBatchSource(testAllocator, typs, cols, numInputRows)
aggCols := make([]uint32, len(aggInputTypes))
for i := range aggCols {
aggCols[i] = uint32(numGroupCol + i)
}
tc := aggregatorTestCase{
typs: typs,
groupCols: groupCols,
aggCols: [][]uint32{aggCols},
aggFns: []execinfrapb.AggregatorSpec_Func{aggFn},
}
if distinctProb > 0 {
if !typs[0].Identical(types.Int) {
skip.IgnoreLint(b, "benchmarking distinct aggregation is supported only on an INT argument")
}
tc.aggDistinct = []bool{true}
distinctModulo := int64(1.0 / distinctProb)
vals := cols[1].Int64()
for i := range vals {
vals[i] = vals[i] % distinctModulo
}
}
if agg.order == partial {
tc.unorderedInput = false
tc.orderedCols = []uint32{0}
}
require.NoError(b, tc.init())
constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(
&evalCtx, nil /* semaCtx */, tc.spec.Aggregations, tc.typs,
)
require.NoError(b, err)
fName := execinfrapb.AggregatorSpec_Func_name[int32(aggFn)]
// Only count the aggregation columns.
var argumentsSize int
if len(aggInputTypes) > 0 {
for _, typ := range aggInputTypes {
if typ.Identical(types.Bool) {
argumentsSize++
} else {
argumentsSize += 8
}
}
} else {
// For COUNT_ROWS we'll just use 8 bytes.
argumentsSize = 8
}
var inputTypesString string
switch len(aggInputTypes) {
case 1:
// Override the string so that the name of the benchmark was the same
// as in pre-20.2 releases (which allows us to compare against old
// numbers).
inputTypesString = aggInputTypes[0].String()
default:
inputTypesString = fmt.Sprintf("%s", aggInputTypes)
}
distinctProbString := ""
if distinctProb > 0 {
distinctProbString = fmt.Sprintf("/distinctProb=%.2f", distinctProb)
}
b.Run(fmt.Sprintf(
"%s/%s/%s/groupSize=%d%s/numInputRows=%d",
fName, agg.name, inputTypesString, groupSize, distinctProbString, numInputRows),
func(b *testing.B) {
b.SetBytes(int64(argumentsSize * numInputRows))
b.ResetTimer()
for i := 0; i < b.N; i++ {
a, err := agg.new(&colexecagg.NewAggregatorArgs{
Allocator: testAllocator,
MemAccount: testMemAcc,
Input: source,
InputTypes: tc.typs,
Spec: tc.spec,
EvalCtx: &evalCtx,
Constructors: constructors,
ConstArguments: constArguments,
OutputTypes: outputTypes,
})
if err != nil {
b.Fatal(err)
}
a.Init(ctx)
// Exhaust aggregator until all batches have been read or limit, if
// non-zero, is reached.
tupleCount := 0
for b := a.Next(); b.Length() != 0; b = a.Next() {
tupleCount += b.Length()
if limit > 0 && tupleCount >= limit {
break
}
}
if err = a.(colexecop.Closer).Close(); err != nil {
b.Fatal(err)
}
source.Reset(ctx)
}
},
)
}
// BenchmarkAggregator runs the benchmark both aggregators with diverse data
// source parameters but using a single aggregate function. The goal of this
// benchmark is measuring the performance of the aggregators themselves
// depending on the parameters of the input.
func BenchmarkAggregator(b *testing.B) {
aggFn := execinfrapb.Min
numRows := []int{1, 32, coldata.BatchSize(), 32 * coldata.BatchSize(), 1024 * coldata.BatchSize()}
groupSizes := []int{1, 2, 32, 128, coldata.BatchSize()}
if testing.Short() {
numRows = []int{32, 32 * coldata.BatchSize()}
groupSizes = []int{1, coldata.BatchSize()}
}
for _, agg := range aggTypes {
for _, numInputRows := range numRows {
for _, groupSize := range groupSizes {
benchmarkAggregateFunction(
b, agg, aggFn, []*types.T{types.Int}, 1, /* numGroupCol */
groupSize, 0 /* distinctProb */, numInputRows,
0 /* chunkSize */, 0 /* limit */)
}
}
}
}
// BenchmarkAllOptimizedAggregateFunctions runs the benchmark of all optimized
// aggregate functions in 4 configurations (hash vs ordered, and small groups
// vs big groups). Such configurations were chosen since they provide good
// enough signal on the speeds of aggregate functions. For more diverse
// configurations look at BenchmarkAggregator.
func BenchmarkAllOptimizedAggregateFunctions(b *testing.B) {
var numInputRows = 32 * coldata.BatchSize()
numFnsToRun := len(execinfrapb.AggregatorSpec_Func_name)
if testing.Short() {
numFnsToRun = 1
}
for aggFnNumber := 0; aggFnNumber < numFnsToRun; aggFnNumber++ {
aggFn := execinfrapb.AggregatorSpec_Func(aggFnNumber)
if !colexecagg.IsAggOptimized(aggFn) {
continue
}
for _, agg := range aggTypes {
var aggInputTypes []*types.T
switch aggFn {
case execinfrapb.BoolAnd, execinfrapb.BoolOr:
aggInputTypes = []*types.T{types.Bool}
case execinfrapb.ConcatAgg:
aggInputTypes = []*types.T{types.Bytes}
case execinfrapb.CountRows:
default:
aggInputTypes = []*types.T{types.Int}
}
for _, groupSize := range []int{1, coldata.BatchSize()} {
benchmarkAggregateFunction(b, agg, aggFn, aggInputTypes,
1 /* numGroupCol */, groupSize,
0 /* distinctProb */, numInputRows,
0 /* chunkSize */, 0 /* limit */)
}
}
}
}
func BenchmarkDistinctAggregation(b *testing.B) {
aggFn := execinfrapb.Count
for _, agg := range aggTypes {
for _, numInputRows := range []int{32, 32 * coldata.BatchSize()} {
for _, groupSize := range []int{1, 2, 32, 128, coldata.BatchSize()} {
for _, distinctProb := range []float64{0.01, 0.1, 1.0} {
distinctModulo := int(1.0 / distinctProb)
if (groupSize == 1 && distinctProb != 1.0) || float64(groupSize)/float64(distinctModulo) < 0.1 {
// We have a such combination of groupSize and distinctProb
// parameters that we will be very unlikely to satisfy them
// (for example, with groupSize=1 and distinctProb=0.01,
// every value will be distinct within the group), so we
// skip such configuration.
continue
}
benchmarkAggregateFunction(b, agg, aggFn, []*types.T{types.Int},
1 /* numGroupCol */, groupSize,
0 /* distinctProb */, numInputRows,
0 /* chunkSize */, 0 /* limit */)
}
}
}
}
}
func min64(a, b float64) float64 {
if a < b {
return a
}
return b
}
func max64(a, b float64) float64 {
if a > b {
return a
}
return b
}
| pkg/sql/colexec/aggregators_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/6a1b9c891f20b32d985a451118f690de01dc6e37 | [
0.9979459643363953,
0.11034099012613297,
0.0001617781090317294,
0.0010265762684866786,
0.3035691976547241
] |
{
"id": 0,
"code_window": [
"\tfor i := range aggCols {\n",
"\t\taggCols[i] = uint32(numGroupCol + i)\n",
"\t}\n",
"\ttc := aggregatorTestCase{\n",
"\t\ttyps: typs,\n",
"\t\tgroupCols: groupCols,\n",
"\t\taggCols: [][]uint32{aggCols},\n",
"\t\taggFns: []execinfrapb.AggregatorSpec_Func{aggFn},\n",
"\t}\n",
"\tif distinctProb > 0 {\n",
"\t\tif !typs[0].Identical(types.Int) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\ttyps: typs,\n",
"\t\tgroupCols: groupCols,\n",
"\t\taggCols: [][]uint32{aggCols},\n",
"\t\taggFns: []execinfrapb.AggregatorSpec_Func{aggFn},\n",
"\t\tunorderedInput: agg.order == unordered,\n"
],
"file_path": "pkg/sql/colexec/aggregators_test.go",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
syntax = "proto3";
package cockroach.server.serverpb;
option go_package = "serverpb";
import "config/zonepb/zone.proto";
import "jobs/jobspb/jobs.proto";
import "server/serverpb/status.proto";
import "storage/enginepb/mvcc.proto";
import "kv/kvserver/liveness/livenesspb/liveness.proto";
import "kv/kvserver/kvserverpb/log.proto";
import "ts/catalog/chart_catalog.proto";
import "util/metric/metric.proto";
import "gogoproto/gogo.proto";
import "google/api/annotations.proto";
import "google/protobuf/timestamp.proto";
// ZoneConfigurationLevel indicates, for objects with a Zone Configuration,
// the object level at which the configuration is defined. This is needed
// because objects without a specifically indicated Zone Configuration will
// inherit the configuration of their "parent".
enum ZoneConfigurationLevel {
UNKNOWN = 0;
// CLUSTER indicates that this object uses the cluster default Zone Configuration.
CLUSTER = 1;
// DATABASE indicates that this object uses a database-level Zone Configuration.
DATABASE = 2;
// TABLE indicates that this object uses a table-level Zone Configuration.
TABLE = 3;
}
// DatabasesRequest requests a list of databases.
message DatabasesRequest {
}
// DatabasesResponse contains a list of databases.
message DatabasesResponse {
repeated string databases = 1;
}
// DatabaseDetailsRequest requests detailed information about the specified
// database
message DatabaseDetailsRequest {
// database is the name of the database we are querying.
string database = 1;
// Setting this flag includes a computationally-expensive stats field
// in the response.
bool include_stats = 2;
}
// DatabaseDetailsResponse contains grant information, table names,
// zone configuration, and size statistics for a database.
message DatabaseDetailsResponse {
message Grant {
// user is the user that this grant applies to.
string user = 1;
// privileges are the abilities this grant gives to the user.
repeated string privileges = 2;
}
message Stats {
// A table which exists in the database, but for which we could not load stats
// during this request.
message MissingTable {
// The name of the table for which we could not load stats.
string name = 1;
// The error message that resulted when the request for this table failed.
string error_message = 2;
}
// A list of tables that exist in the database, but for which stats could
// not be loaded due to failures during this request.
repeated MissingTable missing_tables = 1;
// The number of ranges, as determined from a query of range meta keys,
// across all tables.
int64 range_count = 2;
// An approximation of the disk space (in bytes) used for all replicas
// of all tables across the cluster.
uint64 approximate_disk_bytes = 3;
// node_ids is the ordered list of node ids on which data is stored.
repeated int32 node_ids = 4 [(gogoproto.customname) = "NodeIDs",
(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"];
}
// grants are the results of SHOW GRANTS for this database.
repeated Grant grants = 1 [(gogoproto.nullable) = false];
// table_names contains the names of all tables in this database. Note that
// all responses will be schema-qualified (schema.table) and that every schema
// or table that contains a "sql unsafe character" such as uppercase letters
// or dots will be surrounded with double quotes, such as "naughty schema".table.
repeated string table_names = 2;
// descriptor_id is an identifier used to uniquely identify this database.
// It can be used to find events pertaining to this database by filtering on
// the 'target_id' field of events.
int64 descriptor_id = 3 [(gogoproto.customname) = "DescriptorID"];
// The zone configuration in effect for this database.
cockroach.config.zonepb.ZoneConfig zone_config = 4 [(gogoproto.nullable) = false];
// The level at which this object's zone configuration is set.
ZoneConfigurationLevel zone_config_level = 5;
// Size information about the database, present only when explicitly requested.
Stats stats = 6;
}
// TableDetailsRequest is a request for detailed information about a table.
message TableDetailsRequest {
// database is the database that contains the table we're interested in.
string database = 1;
// table is the name of the table that we're querying. Table may be
// schema-qualified (schema.table) and each name component that contains
// sql unsafe characters such as . or uppercase letters must be surrounded
// in double quotes like "naughty schema".table.
string table = 2;
}
// TableDetailsResponse contains grants, column names, and indexes for
// a table.
message TableDetailsResponse {
// Grant is an entry from SHOW GRANTS.
message Grant {
// user is the user that this grant applies to.
string user = 1;
// privileges are the abilities this grant gives to the user.
repeated string privileges = 2;
}
message Column {
// name is the name of the column.
string name = 1;
// type is the SQL type (INT, STRING, etc.) of this column.
string type = 2;
// nullable is whether this column can contain NULL.
bool nullable = 3;
// default_value is the default value of this column.
string default_value = 4;
// generation_expression is the generator expression if the column is computed.
string generation_expression = 5;
// hidden is whether this column is hidden.
bool hidden = 6;
}
message Index {
// name is the name of this index.
string name = 1;
// unique is whether this a unique index (i.e. CREATE UNIQUE INDEX).
bool unique = 2;
// seq is an internal variable that's passed along.
int64 seq = 3;
// column is the column that this index indexes.
string column = 4;
// direction is either "ASC" (ascending) or "DESC" (descending).
string direction = 5;
// storing is an internal variable that's passed along.
bool storing = 6;
// implicit is an internal variable that's passed along.
bool implicit = 7;
}
repeated Grant grants = 1 [(gogoproto.nullable) = false];
repeated Column columns = 2 [(gogoproto.nullable) = false];
repeated Index indexes = 3 [(gogoproto.nullable) = false];
// range_count is the size of the table in ranges. This provides a rough
// estimate of the storage requirements for the table.
// TODO(mrtracy): The TableStats method also returns a range_count field which
// is more accurate than this one; TableDetails calculates this number using
// a potentially faster method that is subject to cache staleness. We should
// consider removing or renaming this field to reflect that difference. See
// GitHub issue #5435 for more information.
int64 range_count = 4;
// create_table_statement is the output of "SHOW CREATE" for this table;
// it is a SQL statement that would re-create the table's current schema if
// executed.
string create_table_statement = 5;
// The zone configuration in effect for this table.
cockroach.config.zonepb.ZoneConfig zone_config = 6 [(gogoproto.nullable) = false];
// The level at which this object's zone configuration is set.
ZoneConfigurationLevel zone_config_level = 7;
// descriptor_id is an identifier used to uniquely identify this table.
// It can be used to find events pertaining to this table by filtering on
// the 'target_id' field of events.
int64 descriptor_id = 8 [(gogoproto.customname) = "DescriptorID"];
// configure_zone_statement is the output of "SHOW ZONE CONFIGURATION FOR TABLE"
// for this table. It is a SQL statement that would re-configure the table's current
// zone if executed.
string configure_zone_statement = 9;
}
// TableStatsRequest is a request for detailed, computationally expensive
// information about a table.
message TableStatsRequest {
// database is the database that contains the table we're interested in.
string database = 1;
// table is the name of the table that we're querying. Table may be
// schema-qualified (schema.table) and each name component that contains
// sql unsafe characters such as . or uppercase letters must be surrounded
// in double quotes like "naughty schema".table.
string table = 2;
}
// TableStatsResponse contains detailed, computationally expensive information
// about a table.
message TableStatsResponse {
// range_count is the number of ranges, as determined from a query of range
// meta keys.
int64 range_count = 1;
// replica_count is the number of replicas of any range of this table, as
// found by querying nodes which are known to have replicas. When compared
// with range_count, this can be used to estimate the current replication
// factor of the table.
int64 replica_count = 2;
// node_count is the number of nodes which contain data for this table,
// according to a query of range meta keys.
int64 node_count = 3;
// stats is the summation of MVCCStats for all replicas of this table
// across the cluster.
cockroach.storage.enginepb.MVCCStats stats = 4 [(gogoproto.nullable) = false];
// approximate_disk_bytes is an approximation of the disk space (in bytes)
// used for all replicas of this table across the cluster.
uint64 approximate_disk_bytes = 6;
// MissingNode represents information on a node which should contain data
// for this table, but could not be contacted during this request.
message MissingNode {
// The ID of the missing node.
string node_id = 1 [(gogoproto.customname) = "NodeID"];
// The error message that resulted when the query sent to this node failed.
string error_message = 2;
}
// A list of nodes which should contain data for this table (according to
// cluster metadata), but could not be contacted during this request.
repeated MissingNode missing_nodes = 5 [(gogoproto.nullable) = false];
// node_ids is the ordered list of node ids on which the table data is stored.
repeated int32 node_ids = 7 [(gogoproto.customname) = "NodeIDs",
(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"];
}
// NonTableStatsRequest requests statistics on cluster data ranges that do not
// belong to SQL tables.
message NonTableStatsRequest {
}
// NonTableStatsResponse returns statistics on various cluster data ranges
// that do not belong to SQL tables. The statistics for each range are returned
// as a TableStatsResponse.
message NonTableStatsResponse {
// Information on time series ranges.
TableStatsResponse time_series_stats = 1;
// Information for remaining (non-table, non-time-series) ranges.
TableStatsResponse internal_use_stats = 2;
}
// UsersRequest requests a list of users.
message UsersRequest {
}
// UsersResponse returns a list of users.
message UsersResponse {
// User is a CockroachDB user.
message User {
string username = 1;
}
// usernames is a list of users for the CockroachDB cluster.
repeated User users = 1 [(gogoproto.nullable) = false];
}
// EventsRequest is a request for event log entries, optionally filtered
// by the specified event type and/or target_id.
message EventsRequest {
string type = 1;
int64 target_id = 2;
// limit is the total number of results that are retrieved by the query. If
// this is omitted or set to 0, the default maximum number of results are
// returned. When set to > 0, at most only that number of results are
// returned. When set to < 0, an unlimited number of results are returned.
int32 limit = 3;
// unredacted_events indicates that the values in the events should
// not be redacted. The default is to redact, so that older versions
// of `cockroach zip` do not see un-redacted values by default.
// For good security, this field is only obeyed by the server after
// checking that the client of the RPC is an admin user.
bool unredacted_events = 4;
}
// EventsResponse contains a set of event log entries. This is always limited
// to the latest N entries (N is enforced in the associated endpoint).
message EventsResponse {
message Event {
// timestamp is the time at which the event occurred.
google.protobuf.Timestamp timestamp = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
// event_type is the type of the event (e.g. "create_table", "drop_table".
string event_type = 2;
// target_id is the target for this event.
int64 target_id = 3 [(gogoproto.customname) = "TargetID"];
// reporting_id is the reporting ID for this event.
int64 reporting_id = 4 [(gogoproto.customname) = "ReportingID"];
// info has more detailed information for the event. The contents vary
// depending on the event.
string info = 5;
// unique_id is a unique identifier for this event.
bytes unique_id = 6 [(gogoproto.customname) = "UniqueID"];
}
repeated Event events = 1 [(gogoproto.nullable) = false];
}
// SetUIDataRequest stores the given key/value pairs in the system.ui table.
message SetUIDataRequest {
// key_values is a map of keys to bytes values. Each key will be stored
// with its corresponding value as a separate row in system.ui.
map<string, bytes> key_values = 1;
}
// SetUIDataResponse is currently an empty response.
message SetUIDataResponse {
}
// GETUIDataRequest requests the values for the given keys from the system.ui
// table.
message GetUIDataRequest {
repeated string keys = 1;
}
// GetUIDataResponse contains the requested values and the times at which
// the values were last updated.
message GetUIDataResponse {
message Value {
// value is the value of the requested key.
bytes value = 1;
// last_updated is the time at which the value was last updated.
google.protobuf.Timestamp last_updated = 2 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
}
// key_values maps keys to their retrieved values. If this doesn't contain a
// a requested key, that key was not found.
map<string, Value> key_values = 1 [(gogoproto.nullable) = false];
}
// ClusterRequest requests metadata for the cluster.
message ClusterRequest {
}
// ClusterResponse contains metadata for the cluster.
message ClusterResponse {
// The unique ID used to identify this cluster.
string cluster_id = 1 [(gogoproto.customname) = "ClusterID"];
// True if diagnostics reporting is enabled for the cluster.
bool reporting_enabled = 2;
// True if enterprise features are enabled for the cluster.
bool enterprise_enabled = 3;
}
// DrainRequest instructs the receiving node to drain.
message DrainRequest {
// pre_201_marker represents a field that clients stopped using in 20.1. It's
// maintained to reject requests from such clients, since they're not setting
// other required fields.
repeated int32 pre201_marker= 1;
reserved 2;
// When true, terminates the process after the server has started draining.
// Setting both shutdown and do_drain to false causes
// the request to only operate as a probe.
// Setting do_drain to false and shutdown to true causes
// the server to shut down immediately without
// first draining.
bool shutdown = 3;
// When true, perform the drain phase. See the comment above on
// shutdown for an explanation of the interaction between the two.
// do_drain is also implied by a non-nil deprecated_probe_indicator.
bool do_drain = 4;
}
// DrainResponse is the response to a successful DrainRequest.
message DrainResponse {
// is_draining is set to true iff the server is currently draining.
// This is set to true in response to a request where skip_drain
// is false; but it can also be set to true in response
// to a probe request (!shutdown && skip_drain) if another
// drain request has been issued prior or asynchronously.
bool is_draining = 2;
// drain_remaining_indicator measures, at the time of starting to
// process the corresponding drain request, how many actions to
// fully drain the node were deemed to be necessary. Some, but not
// all, of these actions may already have been carried out by the
// time this indicator is received by the client. The client should
// issue requests until this indicator first reaches zero, which
// indicates that the node is fully drained.
//
// The API contract is the following:
//
// - upon a first Drain call with do_drain set, the remaining
// indicator will have some value >=0. If >0, it indicates that
// drain is pushing state away from the node. (What this state
// precisely means is left unspecified for this field. See below
// for details.)
//
// - upon a subsequent Drain call with do_drain set, the remaining
// indicator should have reduced in value. The drain process does best
// effort at shedding state away from the node; hopefully, all the
// state is shed away upon the first call and the progress
// indicator can be zero as early as the second call. However,
// if there was a lot of state to shed, it is possible for
// timeout to be encountered upon the first call. In that case, the
// second call will do some more work and return a non-zero value
// as well.
//
// - eventually, in an iterated sequence of DrainRequests with
// do_drain set, the remaining indicator should reduce to zero. At
// that point the client can conclude that no state is left to
// shed, and it should be safe to shut down the node with a
// DrainRequest with shutdown = true.
//
// Note that this field is left unpopulated (and thus remains at
// zero) for pre-20.1 nodes. A client can recognize this by
// observing is_draining to be false after a request with do_drain =
// true: the is_draining field is also left unpopulated by pre-20.1
// nodes.
uint64 drain_remaining_indicator = 3;
// drain_remaining_description is an informal (= not
// machine-parsable) string that explains the progress of the drain
// process to human eyes. This is intended for use mainly for
// troubleshooting.
//
// The field is only populated if do_drain is true in the
// request.
string drain_remaining_description = 4;
reserved 1;
}
// DecommissionStatusRequest requests the decommissioning status for the
// specified or, if none are specified, all nodes.
message DecommissionStatusRequest {
repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs",
(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"];
}
// DecommissionRequest requests the server to set the membership status on
// all nodes specified by NodeIDs to the value of TargetMembership.
//
// If no NodeIDs are given, it targets the recipient node.
message DecommissionRequest {
repeated int32 node_ids = 1 [(gogoproto.customname) = "NodeIDs",
(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"];
kv.kvserver.liveness.livenesspb.MembershipStatus target_membership = 2;
}
// DecommissionStatusResponse lists decommissioning statuses for a number of NodeIDs.
message DecommissionStatusResponse {
message Status {
int32 node_id = 1 [ (gogoproto.customname) = "NodeID",
(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"];
bool is_live = 2;
// The number of replicas on the node, computed by scanning meta2 ranges.
int64 replica_count = 3;
// The membership status of the given node.
kv.kvserver.liveness.livenesspb.MembershipStatus membership = 4;
bool draining = 5;
}
// Status of all affected nodes.
repeated Status status = 2 [(gogoproto.nullable) = false];
}
// SettingsRequest inquires what are the current settings in the cluster.
message SettingsRequest {
// The array of setting names to retrieve.
// An empty keys array means "all".
repeated string keys = 1;
// Indicate whether to see unredacted setting values.
// This is opt-in so that a previous version `cockroach zip`
// does not start reporting values when this becomes active.
// For good security, the server only obeys this after it checks
// that the logger-in user has admin privilege.
bool unredacted_values = 2;
}
// SettingsResponse is the response to SettingsRequest.
message SettingsResponse {
message Value {
string value = 1;
string type = 2;
string description = 3;
bool public = 4;
}
map<string, Value> key_values = 1 [(gogoproto.nullable) = false];
}
// HealthRequest requests a liveness or readiness check.
//
// A liveness check is triggered via ready set to false. In this mode,
// an empty response is returned immediately, that is, the caller merely
// learns that the process is running.
//
// A readiness check (ready == true) is suitable for determining whether
// user traffic should be directed at a given node, for example by a load
// balancer. In this mode, a successful response is returned only if the
// node:
//
// - is not in the process of shutting down or booting up (including
// waiting for cluster bootstrap);
// - is regarded as healthy by the cluster via the recent broadcast of
// a liveness beacon. Absent either of these conditions, an error
// code will result.
//
// API: PUBLIC
message HealthRequest {
// ready specifies whether the client wants to know whether the
// target node is ready to receive traffic. If a node is unready, an
// error will be returned.
// API: PUBLIC
bool ready = 1;
}
// HealthResponse is the response to HealthRequest. It currently does not
// contain any information.
// API: PUBLIC
message HealthResponse {
}
// LivenessRequest requests liveness data for all nodes on the cluster.
message LivenessRequest {
}
// LivenessResponse contains the liveness status of each node on the cluster.
message LivenessResponse {
repeated kv.kvserver.liveness.livenesspb.Liveness livenesses = 1 [(gogoproto.nullable) = false];
map<int32, kv.kvserver.liveness.livenesspb.NodeLivenessStatus> statuses = 2 [
(gogoproto.nullable) = false,
(gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"
];
}
// JobsRequest requests system job information of the given status and type.
message JobsRequest {
int32 limit = 1;
string status = 2;
cockroach.sql.jobs.jobspb.Type type = 3;
}
// JobsResponse contains the job record for each matching job.
message JobsResponse {
repeated JobResponse jobs = 1 [(gogoproto.nullable) = false];
}
// JobRequest requests system job information for the given job_id.
message JobRequest {
int64 job_id = 1;
}
// JobResponse contains the job record for a job.
message JobResponse {
int64 id = 1 [(gogoproto.customname) = "ID"];
string type = 2;
string description = 3;
string statement = 16;
string username = 4;
repeated uint32 descriptor_ids = 5 [
(gogoproto.customname) = "DescriptorIDs",
(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"
];
string status = 6;
google.protobuf.Timestamp created = 7 [(gogoproto.stdtime) = true];
google.protobuf.Timestamp started = 8 [(gogoproto.stdtime) = true];
google.protobuf.Timestamp finished = 9 [(gogoproto.stdtime) = true];
google.protobuf.Timestamp modified = 10 [(gogoproto.stdtime) = true];
float fraction_completed = 11;
string error = 12;
// highwater_timestamp is the highwater timestamp returned as normal
// timestamp. This is appropriate for display to humans.
google.protobuf.Timestamp highwater_timestamp = 13 [(gogoproto.stdtime) = true];
// highwater_decimal is the highwater timestamp in the proprietary decimal
// form used by logical timestamps internally. This is appropriate to pass
// to a "AS OF SYSTEM TIME" SQL statement.
string highwater_decimal = 14;
string running_status = 15;
}
// LocationsRequest requests system locality location information.
message LocationsRequest {
}
// JobsResponse contains the job record for each matching job.
message LocationsResponse {
message Location {
string locality_key = 1;
string locality_value = 2;
double latitude = 3;
double longitude = 4;
}
repeated Location locations = 1 [(gogoproto.nullable) = false];
}
// RangeLogRequest request the history of a range from the range log.
message RangeLogRequest {
// TODO(tamird): use [(gogoproto.customname) = "RangeID"] below. Need to
// figure out how to teach grpc-gateway about custom names.
// If RangeID is 0, returns range log history without filtering by range.
int64 range_id = 1;
// limit is the total number of results that are retrieved by the query. If
// this is omitted or set to 0, the default maximum number of results are
// returned. When set to > 0, at most only that number of results are
// returned. When set to < 0, an unlimited number of results are returned.
int32 limit = 2;
}
// RangeLogResponse contains a list of entries from the range log table.
message RangeLogResponse {
// To avoid porting the pretty printing of keys and descriptors to
// javascript, they will be precomputed on the serverside.
message PrettyInfo {
string updated_desc = 1;
string new_desc = 2;
string added_replica = 3;
string removed_replica = 4;
string reason = 5;
string details = 6;
}
message Event {
cockroach.kv.kvserver.storagepb.RangeLogEvent event = 1 [(gogoproto.nullable) = false];
PrettyInfo pretty_info = 2 [(gogoproto.nullable) = false];
}
reserved 1; // Previously used.
repeated Event events = 2 [(gogoproto.nullable) = false];
}
// QueryPlanRequest requests the query plans for a SQL string.
message QueryPlanRequest {
// query is the SQL query string.
string query = 1;
}
// QueryPlanResponse contains the query plans for a SQL string (currently only
// the distsql physical query plan).
message QueryPlanResponse {
string distsql_physical_query_plan = 1 [(gogoproto.customname) = "DistSQLPhysicalQueryPlan"];
}
message DataDistributionRequest {
}
message DataDistributionResponse {
message ZoneConfig {
// target is the object the zone config applies to, e.g. "DATABASE db" or
// "PARTITION north_america OF TABLE users".
string target = 1;
config.zonepb.ZoneConfig config = 2 [(gogoproto.nullable) = false];
reserved 3;
// config_sql is the SQL representation of config.
string config_sql = 4 [(gogoproto.customname) = "ConfigSQL"];
}
message TableInfo {
map<int32, int64> replica_count_by_node_id = 1 [(gogoproto.castkey) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"];
int64 zone_config_id = 2;
google.protobuf.Timestamp dropped_at = 3 [(gogoproto.stdtime) = true];
}
message DatabaseInfo {
// By table name.
map<string, TableInfo> table_info = 1 [(gogoproto.nullable) = false];
}
// By database name.
map<string, DatabaseInfo> database_info = 1 [(gogoproto.nullable) = false];
reserved 2;
// By zone name.
map<string, ZoneConfig> zone_configs = 3 [(gogoproto.nullable) = false];
}
// MetricMetadataRequest requests metadata for all metrics.
message MetricMetadataRequest {
}
// MetricMetadataResponse contains the metadata for all metrics.
message MetricMetadataResponse {
map<string, cockroach.util.metric.Metadata> metadata = 1 [(gogoproto.nullable) = false];
}
message EnqueueRangeRequest {
// The node on which the queue should process the range. If node_id is 0,
// the request will be forwarded to all other nodes.
int32 node_id = 1 [(gogoproto.customname) = "NodeID",
(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"];
// The name of the replica queue to run the range through. Matched against
// each queue's name field. See the implementation of baseQueue for details.
string queue = 2;
// The ID of the range to run through the queue.
int32 range_id = 3 [(gogoproto.customname) = "RangeID",
(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.RangeID"];
// If set, run the queue's process method without first checking whether the
// replica should be processed by calling shouldQueue.
bool skip_should_queue = 4;
}
message EnqueueRangeResponse {
message Details {
int32 node_id = 1 [(gogoproto.customname) = "NodeID",
(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"];
// All trace events collected while processing the range in the queue.
repeated TraceEvent events = 2;
// The error message from the queue's processing, if any.
string error = 3;
}
repeated Details details = 1;
}
// ChartCatalogRequest requests returns a catalog of Admin UI charts.
message ChartCatalogRequest {
}
// ChartCatalogResponse returns a catalog of Admin UI charts useful for debugging.
message ChartCatalogResponse {
repeated cockroach.ts.catalog.ChartSection catalog = 1 [(gogoproto.nullable) = false];
}
// CARequest requests the CA cert anchoring this service.
message CARequest {
}
// CAResponse contains a PEM encoded copy of the CA cert for this service.
message CAResponse {
bytes ca_cert = 1;
}
// CertBundleRequest requests the bundle of initialization CAs for a new node.
// It provides authentication in the form of a joinToken containing a
// sharedSecret.
message CertBundleRequest {
string token_id = 1 [(gogoproto.customname) = "TokenID"];
bytes shared_secret = 2;
}
// CertBundleResponse contains a copy of all CAs needed to initialize TLS for
// a new node.
message CertBundleResponse {
bytes bundle = 1;
}
// Admin is the gRPC API for the admin UI. Through grpc-gateway, we offer
// REST-style HTTP endpoints that locally proxy to the gRPC endpoints.
service Admin {
rpc RequestCA(CARequest) returns (CAResponse) {
option (google.api.http) = {
get : "/_join/v1/ca"
};
}
rpc RequestCertBundle(CertBundleRequest) returns (CertBundleResponse) {
option (google.api.http) = {
get : "/_join/v1/requestbundle"
};
}
// URL: /_admin/v1/users
rpc Users(UsersRequest) returns (UsersResponse) {
option (google.api.http) = {
get: "/_admin/v1/users"
};
}
// URL: /_admin/v1/databases
rpc Databases(DatabasesRequest) returns (DatabasesResponse) {
option (google.api.http) = {
get: "/_admin/v1/databases"
};
}
// Example URL: /_admin/v1/databases/system
rpc DatabaseDetails(DatabaseDetailsRequest) returns (DatabaseDetailsResponse) {
option (google.api.http) = {
get: "/_admin/v1/databases/{database}"
};
}
// Example URL: /_admin/v1/databases/system/tables/ui
rpc TableDetails(TableDetailsRequest) returns (TableDetailsResponse) {
option (google.api.http) = {
get: "/_admin/v1/databases/{database}/tables/{table}"
};
}
// Example URL: /_admin/v1/databases/system/tables/ui/stats
rpc TableStats(TableStatsRequest) returns (TableStatsResponse) {
option (google.api.http) = {
get: "/_admin/v1/databases/{database}/tables/{table}/stats"
};
}
// Example URL: /_admin/v1/nontablestats
rpc NonTableStats(NonTableStatsRequest) returns (NonTableStatsResponse) {
option (google.api.http) = {
get: "/_admin/v1/nontablestats"
};
}
// Example URLs:
// Example URLs:
// - /_admin/v1/events
// - /_admin/v1/events?limit=100
// - /_admin/v1/events?type=create_table
// - /_admin/v1/events?type=create_table&limit=100
// - /_admin/v1/events?type=drop_table&target_id=4
// - /_admin/v1/events?type=drop_table&target_id=4&limit=100
rpc Events(EventsRequest) returns (EventsResponse) {
option (google.api.http) = {
get: "/_admin/v1/events"
};
}
// This requires a POST. Because of the libraries we're using, the POST body
// must be in the following format:
//
// {"key_values":
// { "key1": "base64_encoded_value1"},
// ...
// { "keyN": "base64_encoded_valueN"},
// }
//
// Note that all keys are quoted strings and that all values are base64-
// encoded.
//
// Together, SetUIData and GetUIData provide access to a "cookie jar" for the
// admin UI. The structure of the underlying data is meant to be opaque to the
// server.
rpc SetUIData(SetUIDataRequest) returns (SetUIDataResponse) {
option (google.api.http) = {
post: "/_admin/v1/uidata"
body: "*"
};
}
// Example URLs:
// - /_admin/v1/uidata?keys=MYKEY
// - /_admin/v1/uidata?keys=MYKEY1&keys=MYKEY2
//
// Yes, it's a little odd that the query parameter is named "keys" instead of
// "key". I would've preferred that the URL parameter be named "key". However,
// it's clearer for the protobuf field to be named "keys," which makes the URL
// parameter "keys" as well.
rpc GetUIData(GetUIDataRequest) returns (GetUIDataResponse) {
option (google.api.http) = {
get: "/_admin/v1/uidata"
};
}
// Cluster returns metadata for the cluster.
rpc Cluster(ClusterRequest) returns (ClusterResponse) {
option (google.api.http) = {
get: "/_admin/v1/cluster"
};
}
// Settings returns the cluster-wide settings for the cluster.
rpc Settings(SettingsRequest) returns (SettingsResponse) {
option (google.api.http) = {
get: "/_admin/v1/settings"
};
}
// Health returns liveness for the node target of the request.
// API: PUBLIC
rpc Health(HealthRequest) returns (HealthResponse) {
option (google.api.http) = {
get: "/_admin/v1/health"
additional_bindings {get : "/health"}
};
}
// Liveness returns the liveness state of all nodes on the cluster.
rpc Liveness(LivenessRequest) returns (LivenessResponse) {
option (google.api.http) = {
get: "/_admin/v1/liveness"
};
}
// Jobs returns the job records for all jobs of the given status and type.
rpc Jobs(JobsRequest) returns (JobsResponse) {
option (google.api.http) = {
get: "/_admin/v1/jobs"
};
}
// Job returns the job record for the job of the given job_id.
rpc Job(JobRequest) returns (JobResponse) {
option (google.api.http) = {
get: "/_admin/v1/jobs/{job_id}"
};
}
// Locations returns the locality location records.
rpc Locations(LocationsRequest) returns (LocationsResponse) {
option (google.api.http) = {
get: "/_admin/v1/locations"
};
}
// QueryPlan returns the query plans for a SQL string.
rpc QueryPlan(QueryPlanRequest) returns (QueryPlanResponse) {
option (google.api.http) = {
get: "/_admin/v1/queryplan"
};
}
// Drain puts the node into the specified drain mode(s) and optionally
// instructs the process to terminate.
// We do not expose this via HTTP unless we have a way to authenticate
// + authorize streaming RPC connections. See #42567.
rpc Drain(DrainRequest) returns (stream DrainResponse) {
}
// Decommission puts the node(s) into the specified decommissioning state.
// If this ever becomes exposed via HTTP, ensure that it performs
// authorization. See #42567.
rpc Decommission(DecommissionRequest) returns (DecommissionStatusResponse) {
}
// DecommissionStatus retrieves the decommissioning status of the specified nodes.
// If this ever becomes exposed via HTTP, ensure that it performs
// authorization. See #42567.
rpc DecommissionStatus(DecommissionStatusRequest) returns (DecommissionStatusResponse) {
}
// URL: /_admin/v1/rangelog
// URL: /_admin/v1/rangelog?limit=100
// URL: /_admin/v1/rangelog/1
// URL: /_admin/v1/rangelog/1?limit=100
rpc RangeLog(RangeLogRequest) returns (RangeLogResponse) {
option (google.api.http) = {
get: "/_admin/v1/rangelog"
additional_bindings {
get: "/_admin/v1/rangelog/{range_id}"
}
};
}
rpc DataDistribution(DataDistributionRequest) returns (DataDistributionResponse) {
option (google.api.http) = {
get: "/_admin/v1/data_distribution"
};
}
// URL: /_admin/v1/metricmetadata
rpc AllMetricMetadata(MetricMetadataRequest) returns (MetricMetadataResponse) {
option (google.api.http) = {
get: "/_admin/v1/metricmetadata"
};
}
// URL: /_admin/v1/chartcatalog
rpc ChartCatalog(ChartCatalogRequest) returns (ChartCatalogResponse) {
option (google.api.http) = {
get: "/_admin/v1/chartcatalog"
};
}
// EnqueueRange runs the specified range through the specified queue on the
// range's leaseholder store, returning the detailed trace and error
// information from doing so. Parameters must be provided in the body of the
// POST request.
// For example:
//
// {
// "queue": "raftlog",
// "rangeId": 10
// }
rpc EnqueueRange(EnqueueRangeRequest) returns (EnqueueRangeResponse) {
option (google.api.http) = {
post: "/_admin/v1/enqueue_range"
body : "*"
};
}
}
| pkg/server/serverpb/admin.proto | 0 | https://github.com/cockroachdb/cockroach/commit/6a1b9c891f20b32d985a451118f690de01dc6e37 | [
0.002656987402588129,
0.0002354593889322132,
0.00016225020226556808,
0.00016882660565897822,
0.0003251252928748727
] |
{
"id": 0,
"code_window": [
"\tfor i := range aggCols {\n",
"\t\taggCols[i] = uint32(numGroupCol + i)\n",
"\t}\n",
"\ttc := aggregatorTestCase{\n",
"\t\ttyps: typs,\n",
"\t\tgroupCols: groupCols,\n",
"\t\taggCols: [][]uint32{aggCols},\n",
"\t\taggFns: []execinfrapb.AggregatorSpec_Func{aggFn},\n",
"\t}\n",
"\tif distinctProb > 0 {\n",
"\t\tif !typs[0].Identical(types.Int) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\ttyps: typs,\n",
"\t\tgroupCols: groupCols,\n",
"\t\taggCols: [][]uint32{aggCols},\n",
"\t\taggFns: []execinfrapb.AggregatorSpec_Func{aggFn},\n",
"\t\tunorderedInput: agg.order == unordered,\n"
],
"file_path": "pkg/sql/colexec/aggregators_test.go",
"type": "replace",
"edit_start_line_idx": 1000
} | @import '../core/index.module';
._text-bold {
color: $colors--primary-blue-3;
font-family: $font-family--monospace;
&-light {
color: $colors--primary-blue-7;
}
}
| pkg/ui/workspaces/cluster-ui/src/highlightedText/highlightedText.module.scss | 0 | https://github.com/cockroachdb/cockroach/commit/6a1b9c891f20b32d985a451118f690de01dc6e37 | [
0.00017696249415166676,
0.00017450891027692705,
0.00017205532640218735,
0.00017450891027692705,
0.0000024535838747397065
] |
{
"id": 0,
"code_window": [
"\tfor i := range aggCols {\n",
"\t\taggCols[i] = uint32(numGroupCol + i)\n",
"\t}\n",
"\ttc := aggregatorTestCase{\n",
"\t\ttyps: typs,\n",
"\t\tgroupCols: groupCols,\n",
"\t\taggCols: [][]uint32{aggCols},\n",
"\t\taggFns: []execinfrapb.AggregatorSpec_Func{aggFn},\n",
"\t}\n",
"\tif distinctProb > 0 {\n",
"\t\tif !typs[0].Identical(types.Int) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\ttyps: typs,\n",
"\t\tgroupCols: groupCols,\n",
"\t\taggCols: [][]uint32{aggCols},\n",
"\t\taggFns: []execinfrapb.AggregatorSpec_Func{aggFn},\n",
"\t\tunorderedInput: agg.order == unordered,\n"
],
"file_path": "pkg/sql/colexec/aggregators_test.go",
"type": "replace",
"edit_start_line_idx": 1000
} | parse
ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO foo
----
ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO foo
ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO foo
----
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO foo
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar
----
ALTER DEFAULT PRIVILEGES GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar
ALTER DEFAULT PRIVILEGES GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES GRANT ALL ON SEQUENCES TO foo
----
ALTER DEFAULT PRIVILEGES GRANT ALL ON SEQUENCES TO foo
ALTER DEFAULT PRIVILEGES GRANT ALL ON SEQUENCES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT ALL ON SEQUENCES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES GRANT ALL ON SEQUENCES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES GRANT SELECT ON SEQUENCES TO foo
----
ALTER DEFAULT PRIVILEGES GRANT SELECT ON SEQUENCES TO foo
ALTER DEFAULT PRIVILEGES GRANT SELECT ON SEQUENCES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT SELECT ON SEQUENCES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES GRANT SELECT ON SEQUENCES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar
----
ALTER DEFAULT PRIVILEGES GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar
ALTER DEFAULT PRIVILEGES GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES GRANT SELECT, UPDATE ON SEQUENCES TO _, _ -- identifiers removed
error
ALTER DEFAULT PRIVILEGES GRANT ALL ON FUNCTIONS TO foo
----
----
at or near "to": syntax error: unimplemented: this syntax
DETAIL: source SQL:
ALTER DEFAULT PRIVILEGES GRANT ALL ON FUNCTIONS TO foo
^
HINT: You have attempted to use a feature that is not yet implemented.
Please check the public issue tracker to check whether this problem is
already tracked. If you cannot find it there, please report the error
with details by creating a new issue.
If you would rather not post publicly, please contact us directly
using the support form.
We appreciate your feedback.
----
----
error
ALTER DEFAULT PRIVILEGES GRANT ALL ON FUNCTIONS TO foo, bar
----
----
at or near "to": syntax error: unimplemented: this syntax
DETAIL: source SQL:
ALTER DEFAULT PRIVILEGES GRANT ALL ON FUNCTIONS TO foo, bar
^
HINT: You have attempted to use a feature that is not yet implemented.
Please check the public issue tracker to check whether this problem is
already tracked. If you cannot find it there, please report the error
with details by creating a new issue.
If you would rather not post publicly, please contact us directly
using the support form.
We appreciate your feedback.
----
----
parse
ALTER DEFAULT PRIVILEGES GRANT ALL ON TYPES TO foo
----
ALTER DEFAULT PRIVILEGES GRANT ALL ON TYPES TO foo
ALTER DEFAULT PRIVILEGES GRANT ALL ON TYPES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT ALL ON TYPES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES GRANT ALL ON TYPES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES GRANT USAGE ON TYPES TO foo, bar
----
ALTER DEFAULT PRIVILEGES GRANT USAGE ON TYPES TO foo, bar
ALTER DEFAULT PRIVILEGES GRANT USAGE ON TYPES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT USAGE ON TYPES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES GRANT USAGE ON TYPES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES GRANT USAGE ON TYPES TO foo, bar
----
ALTER DEFAULT PRIVILEGES GRANT USAGE ON TYPES TO foo, bar
ALTER DEFAULT PRIVILEGES GRANT USAGE ON TYPES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT USAGE ON TYPES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES GRANT USAGE ON TYPES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO foo
----
ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO foo
ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO foo -- literals removed
ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO foo, bar
----
ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO foo, bar
ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES GRANT USAGE, CREATE ON SCHEMAS TO foo, bar
----
ALTER DEFAULT PRIVILEGES GRANT USAGE, CREATE ON SCHEMAS TO foo, bar
ALTER DEFAULT PRIVILEGES GRANT USAGE, CREATE ON SCHEMAS TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT USAGE, CREATE ON SCHEMAS TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES GRANT USAGE, CREATE ON SCHEMAS TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON TABLES TO foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON TABLES TO foo
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON TABLES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON TABLES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ GRANT ALL ON TABLES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT ON TABLES TO foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT ON TABLES TO foo
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT ON TABLES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT ON TABLES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ GRANT SELECT ON TABLES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON SEQUENCES TO foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON SEQUENCES TO foo
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON SEQUENCES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON SEQUENCES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ GRANT ALL ON SEQUENCES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT ON SEQUENCES TO foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT ON SEQUENCES TO foo
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT ON SEQUENCES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT ON SEQUENCES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ GRANT SELECT ON SEQUENCES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ GRANT SELECT, UPDATE ON SEQUENCES TO _, _ -- identifiers removed
error
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON FUNCTIONS TO foo
----
----
at or near "to": syntax error: unimplemented: this syntax
DETAIL: source SQL:
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON FUNCTIONS TO foo
^
HINT: You have attempted to use a feature that is not yet implemented.
Please check the public issue tracker to check whether this problem is
already tracked. If you cannot find it there, please report the error
with details by creating a new issue.
If you would rather not post publicly, please contact us directly
using the support form.
We appreciate your feedback.
----
----
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON TYPES TO foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON TYPES TO foo
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON TYPES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON TYPES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ GRANT ALL ON TYPES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE ON TYPES TO foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE ON TYPES TO foo, bar
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE ON TYPES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE ON TYPES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ GRANT USAGE ON TYPES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE ON TYPES TO foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE ON TYPES TO foo, bar
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE ON TYPES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE ON TYPES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ GRANT USAGE ON TYPES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON SCHEMAS TO foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON SCHEMAS TO foo
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON SCHEMAS TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT ALL ON SCHEMAS TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ GRANT ALL ON SCHEMAS TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE ON SCHEMAS TO foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE ON SCHEMAS TO foo, bar
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE ON SCHEMAS TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE ON SCHEMAS TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ GRANT USAGE ON SCHEMAS TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE, CREATE ON SCHEMAS TO foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE, CREATE ON SCHEMAS TO foo, bar
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE, CREATE ON SCHEMAS TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo GRANT USAGE, CREATE ON SCHEMAS TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ GRANT USAGE, CREATE ON SCHEMAS TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON TABLES TO foo
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON TABLES TO foo
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON TABLES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON TABLES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ GRANT ALL ON TABLES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT ON TABLES TO foo
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT ON TABLES TO foo
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT ON TABLES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT ON TABLES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ GRANT SELECT ON TABLES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON SEQUENCES TO foo
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON SEQUENCES TO foo
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON SEQUENCES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON SEQUENCES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ GRANT ALL ON SEQUENCES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT ON SEQUENCES TO foo
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT ON SEQUENCES TO foo
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT ON SEQUENCES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT ON SEQUENCES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ GRANT SELECT ON SEQUENCES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ GRANT SELECT, UPDATE ON SEQUENCES TO _, _ -- identifiers removed
error
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON FUNCTIONS TO foo
----
----
at or near "to": syntax error: unimplemented: this syntax
DETAIL: source SQL:
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON FUNCTIONS TO foo
^
HINT: You have attempted to use a feature that is not yet implemented.
Please check the public issue tracker to check whether this problem is
already tracked. If you cannot find it there, please report the error
with details by creating a new issue.
If you would rather not post publicly, please contact us directly
using the support form.
We appreciate your feedback.
----
----
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON TYPES TO foo
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON TYPES TO foo
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON TYPES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON TYPES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ GRANT ALL ON TYPES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ GRANT USAGE ON TYPES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ GRANT USAGE ON TYPES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON SCHEMAS TO foo
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON SCHEMAS TO foo
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON SCHEMAS TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT ALL ON SCHEMAS TO foo -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ GRANT ALL ON SCHEMAS TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE ON SCHEMAS TO foo, bar
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE ON SCHEMAS TO foo, bar
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE ON SCHEMAS TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE ON SCHEMAS TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ GRANT USAGE ON SCHEMAS TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE, CREATE ON SCHEMAS TO foo, bar
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE, CREATE ON SCHEMAS TO foo, bar
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE, CREATE ON SCHEMAS TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s GRANT USAGE, CREATE ON SCHEMAS TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ GRANT USAGE, CREATE ON SCHEMAS TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON TABLES TO foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON TABLES TO foo
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON TABLES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON TABLES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ GRANT ALL ON TABLES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT ON TABLES TO foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT ON TABLES TO foo
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT ON TABLES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT ON TABLES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ GRANT SELECT ON TABLES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON SEQUENCES TO foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON SEQUENCES TO foo
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON SEQUENCES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON SEQUENCES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ GRANT ALL ON SEQUENCES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT ON SEQUENCES TO foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT ON SEQUENCES TO foo
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT ON SEQUENCES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT ON SEQUENCES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ GRANT SELECT ON SEQUENCES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT SELECT, UPDATE ON SEQUENCES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ GRANT SELECT, UPDATE ON SEQUENCES TO _, _ -- identifiers removed
error
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON FUNCTIONS TO foo
----
----
at or near "to": syntax error: unimplemented: this syntax
DETAIL: source SQL:
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON FUNCTIONS TO foo
^
HINT: You have attempted to use a feature that is not yet implemented.
Please check the public issue tracker to check whether this problem is
already tracked. If you cannot find it there, please report the error
with details by creating a new issue.
If you would rather not post publicly, please contact us directly
using the support form.
We appreciate your feedback.
----
----
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON TYPES TO foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON TYPES TO foo
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON TYPES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON TYPES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ GRANT ALL ON TYPES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ GRANT USAGE ON TYPES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE ON TYPES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ GRANT USAGE ON TYPES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON SCHEMAS TO foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON SCHEMAS TO foo
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON SCHEMAS TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT ALL ON SCHEMAS TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ GRANT ALL ON SCHEMAS TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE ON SCHEMAS TO foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE ON SCHEMAS TO foo, bar
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE ON SCHEMAS TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE ON SCHEMAS TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ GRANT USAGE ON SCHEMAS TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE, CREATE ON SCHEMAS TO foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE, CREATE ON SCHEMAS TO foo, bar
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE, CREATE ON SCHEMAS TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE, CREATE ON SCHEMAS TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ GRANT USAGE, CREATE ON SCHEMAS TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO foo WITH GRANT OPTION
----
ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO foo WITH GRANT OPTION
ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO foo WITH GRANT OPTION -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO foo WITH GRANT OPTION -- literals removed
ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO _ WITH GRANT OPTION -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE, CREATE ON SCHEMAS TO foo, bar WITH GRANT OPTION
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE, CREATE ON SCHEMAS TO foo, bar WITH GRANT OPTION
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE, CREATE ON SCHEMAS TO foo, bar WITH GRANT OPTION -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s GRANT USAGE, CREATE ON SCHEMAS TO foo, bar WITH GRANT OPTION -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ GRANT USAGE, CREATE ON SCHEMAS TO _, _ WITH GRANT OPTION -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES REVOKE ALL ON TABLES FROM foo
----
ALTER DEFAULT PRIVILEGES REVOKE ALL ON TABLES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE ALL ON TABLES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE ALL ON TABLES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE ALL ON TABLES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON TABLES FROM foo
----
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON TABLES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON TABLES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON TABLES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON TABLES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES REVOKE ALL ON SEQUENCES FROM foo
----
ALTER DEFAULT PRIVILEGES REVOKE ALL ON SEQUENCES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE ALL ON SEQUENCES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE ALL ON SEQUENCES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE ALL ON SEQUENCES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON SEQUENCES FROM foo
----
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON SEQUENCES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON SEQUENCES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON SEQUENCES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON SEQUENCES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE SELECT, UPDATE ON SEQUENCES FROM _, _ -- identifiers removed
error
ALTER DEFAULT PRIVILEGES REVOKE ALL ON FUNCTIONS FROM foo
----
----
at or near "from": syntax error: unimplemented: this syntax
DETAIL: source SQL:
ALTER DEFAULT PRIVILEGES REVOKE ALL ON FUNCTIONS FROM foo
^
HINT: You have attempted to use a feature that is not yet implemented.
Please check the public issue tracker to check whether this problem is
already tracked. If you cannot find it there, please report the error
with details by creating a new issue.
If you would rather not post publicly, please contact us directly
using the support form.
We appreciate your feedback.
----
----
error
ALTER DEFAULT PRIVILEGES REVOKE ALL ON FUNCTIONS FROM foo, bar
----
----
at or near "from": syntax error: unimplemented: this syntax
DETAIL: source SQL:
ALTER DEFAULT PRIVILEGES REVOKE ALL ON FUNCTIONS FROM foo, bar
^
HINT: You have attempted to use a feature that is not yet implemented.
Please check the public issue tracker to check whether this problem is
already tracked. If you cannot find it there, please report the error
with details by creating a new issue.
If you would rather not post publicly, please contact us directly
using the support form.
We appreciate your feedback.
----
----
parse
ALTER DEFAULT PRIVILEGES REVOKE ALL ON TYPES FROM foo
----
ALTER DEFAULT PRIVILEGES REVOKE ALL ON TYPES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE ALL ON TYPES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE ALL ON TYPES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE ALL ON TYPES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON TYPES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON TYPES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON TYPES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON TYPES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON TYPES FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON TYPES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON TYPES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON TYPES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON TYPES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON TYPES FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES REVOKE ALL ON SCHEMAS FROM foo
----
ALTER DEFAULT PRIVILEGES REVOKE ALL ON SCHEMAS FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE ALL ON SCHEMAS FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE ALL ON SCHEMAS FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE ALL ON SCHEMAS FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON SCHEMAS FROM foo, bar
----
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON SCHEMAS FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON SCHEMAS FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON SCHEMAS FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE USAGE ON SCHEMAS FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar
----
ALTER DEFAULT PRIVILEGES REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE USAGE, CREATE ON SCHEMAS FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON TABLES FROM foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON TABLES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON TABLES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON TABLES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ REVOKE ALL ON TABLES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT ON TABLES FROM foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT ON TABLES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT ON TABLES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT ON TABLES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ REVOKE SELECT ON TABLES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON SEQUENCES FROM foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON SEQUENCES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON SEQUENCES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON SEQUENCES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ REVOKE ALL ON SEQUENCES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT ON SEQUENCES FROM foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT ON SEQUENCES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT ON SEQUENCES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT ON SEQUENCES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ REVOKE SELECT ON SEQUENCES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ REVOKE SELECT, UPDATE ON SEQUENCES FROM _, _ -- identifiers removed
error
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON FUNCTIONS FROM foo
----
----
at or near "from": syntax error: unimplemented: this syntax
DETAIL: source SQL:
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON FUNCTIONS FROM foo
^
HINT: You have attempted to use a feature that is not yet implemented.
Please check the public issue tracker to check whether this problem is
already tracked. If you cannot find it there, please report the error
with details by creating a new issue.
If you would rather not post publicly, please contact us directly
using the support form.
We appreciate your feedback.
----
----
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON TYPES FROM foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON TYPES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON TYPES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON TYPES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ REVOKE ALL ON TYPES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE ON TYPES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE ON TYPES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE ON TYPES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE ON TYPES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ REVOKE USAGE ON TYPES FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE ON TYPES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE ON TYPES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE ON TYPES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE ON TYPES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ REVOKE USAGE ON TYPES FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON SCHEMAS FROM foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON SCHEMAS FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON SCHEMAS FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE ALL ON SCHEMAS FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ REVOKE ALL ON SCHEMAS FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE ON SCHEMAS FROM foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE ON SCHEMAS FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE ON SCHEMAS FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE ON SCHEMAS FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ REVOKE USAGE ON SCHEMAS FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ REVOKE USAGE, CREATE ON SCHEMAS FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON TABLES FROM foo
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON TABLES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON TABLES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON TABLES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ REVOKE ALL ON TABLES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT ON TABLES FROM foo
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT ON TABLES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT ON TABLES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT ON TABLES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ REVOKE SELECT ON TABLES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON SEQUENCES FROM foo
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON SEQUENCES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON SEQUENCES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON SEQUENCES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ REVOKE ALL ON SEQUENCES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT ON SEQUENCES FROM foo
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT ON SEQUENCES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT ON SEQUENCES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT ON SEQUENCES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ REVOKE SELECT ON SEQUENCES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ REVOKE SELECT, UPDATE ON SEQUENCES FROM _, _ -- identifiers removed
error
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON FUNCTIONS FROM foo
----
----
at or near "from": syntax error: unimplemented: this syntax
DETAIL: source SQL:
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON FUNCTIONS FROM foo
^
HINT: You have attempted to use a feature that is not yet implemented.
Please check the public issue tracker to check whether this problem is
already tracked. If you cannot find it there, please report the error
with details by creating a new issue.
If you would rather not post publicly, please contact us directly
using the support form.
We appreciate your feedback.
----
----
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON TYPES FROM foo
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON TYPES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON TYPES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON TYPES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ REVOKE ALL ON TYPES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ REVOKE USAGE ON TYPES FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ REVOKE USAGE ON TYPES FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON SCHEMAS FROM foo
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON SCHEMAS FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON SCHEMAS FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE ALL ON SCHEMAS FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ REVOKE ALL ON SCHEMAS FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE ON SCHEMAS FROM foo, bar
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE ON SCHEMAS FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE ON SCHEMAS FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE ON SCHEMAS FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ REVOKE USAGE ON SCHEMAS FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _ REVOKE USAGE, CREATE ON SCHEMAS FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON TABLES FROM foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON TABLES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON TABLES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON TABLES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ REVOKE ALL ON TABLES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT ON TABLES FROM foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT ON TABLES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT ON TABLES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT ON TABLES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ REVOKE SELECT ON TABLES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON SEQUENCES FROM foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON SEQUENCES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON SEQUENCES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON SEQUENCES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ REVOKE ALL ON SEQUENCES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT ON SEQUENCES FROM foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT ON SEQUENCES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT ON SEQUENCES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT ON SEQUENCES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ REVOKE SELECT ON SEQUENCES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE SELECT, UPDATE ON SEQUENCES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ REVOKE SELECT, UPDATE ON SEQUENCES FROM _, _ -- identifiers removed
error
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON FUNCTIONS FROM foo
----
----
at or near "from": syntax error: unimplemented: this syntax
DETAIL: source SQL:
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON FUNCTIONS FROM foo
^
HINT: You have attempted to use a feature that is not yet implemented.
Please check the public issue tracker to check whether this problem is
already tracked. If you cannot find it there, please report the error
with details by creating a new issue.
If you would rather not post publicly, please contact us directly
using the support form.
We appreciate your feedback.
----
----
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON TYPES FROM foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON TYPES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON TYPES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON TYPES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ REVOKE ALL ON TYPES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ REVOKE USAGE ON TYPES FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE ON TYPES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ REVOKE USAGE ON TYPES FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON SCHEMAS FROM foo
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON SCHEMAS FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON SCHEMAS FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE ALL ON SCHEMAS FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ REVOKE ALL ON SCHEMAS FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE ON SCHEMAS FROM foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE ON SCHEMAS FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE ON SCHEMAS FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE ON SCHEMAS FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ REVOKE USAGE ON SCHEMAS FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE USAGE, CREATE ON SCHEMAS FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ REVOKE USAGE, CREATE ON SCHEMAS FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES REVOKE GRANT OPTION FOR ALL ON TABLES FROM foo
----
ALTER DEFAULT PRIVILEGES REVOKE GRANT OPTION FOR ALL ON TABLES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE GRANT OPTION FOR ALL ON TABLES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE GRANT OPTION FOR ALL ON TABLES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE GRANT OPTION FOR ALL ON TABLES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE GRANT OPTION FOR USAGE, CREATE ON SCHEMAS FROM foo, bar
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE GRANT OPTION FOR USAGE, CREATE ON SCHEMAS FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE GRANT OPTION FOR USAGE, CREATE ON SCHEMAS FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo IN SCHEMA s REVOKE GRANT OPTION FOR USAGE, CREATE ON SCHEMAS FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _ IN SCHEMA _ REVOKE GRANT OPTION FOR USAGE, CREATE ON SCHEMAS FROM _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo, bar GRANT ALL ON TABLES TO FOO
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo, bar GRANT ALL ON TABLES TO foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo, bar GRANT ALL ON TABLES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo, bar GRANT ALL ON TABLES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _, _ GRANT ALL ON TABLES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES IN SCHEMA s1, s2 GRANT ALL ON TABLES TO FOO
----
ALTER DEFAULT PRIVILEGES IN SCHEMA s1, s2 GRANT ALL ON TABLES TO foo -- normalized!
ALTER DEFAULT PRIVILEGES IN SCHEMA s1, s2 GRANT ALL ON TABLES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES IN SCHEMA s1, s2 GRANT ALL ON TABLES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES IN SCHEMA _, _ GRANT ALL ON TABLES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ROLE foo, bar IN SCHEMA s1, s2 GRANT ALL ON TABLES TO FOO
----
ALTER DEFAULT PRIVILEGES FOR ROLE foo, bar IN SCHEMA s1, s2 GRANT ALL ON TABLES TO foo -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE foo, bar IN SCHEMA s1, s2 GRANT ALL ON TABLES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE foo, bar IN SCHEMA s1, s2 GRANT ALL ON TABLES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _, _ IN SCHEMA _, _ GRANT ALL ON TABLES TO _ -- identifiers removed
# ALTER DEFAULT PRIVILEGES should be case insensitive for role names.
parse
ALTER DEFAULT PRIVILEGES FOR ROLE "roLeA", roleB GRANT SELECT ON TABLES TO TestUser2, tEstusEr3
----
ALTER DEFAULT PRIVILEGES FOR ROLE "roLeA", roleb GRANT SELECT ON TABLES TO testuser2, testuser3 -- normalized!
ALTER DEFAULT PRIVILEGES FOR ROLE "roLeA", roleb GRANT SELECT ON TABLES TO testuser2, testuser3 -- fully parenthesized
ALTER DEFAULT PRIVILEGES FOR ROLE "roLeA", roleb GRANT SELECT ON TABLES TO testuser2, testuser3 -- literals removed
ALTER DEFAULT PRIVILEGES FOR ROLE _, _ GRANT SELECT ON TABLES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ALL ROLES GRANT SELECT ON TABLES TO foo
----
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO foo -- normalized!
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO foo -- literals removed
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ALL ROLES GRANT SELECT ON TABLES TO foo,bar
----
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO _, _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ALL ROLES REVOKE SELECT ON TABLES FROM foo
----
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON TABLES FROM foo -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON TABLES FROM foo -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON TABLES FROM foo -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON TABLES FROM _ -- identifiers removed
parse
ALTER DEFAULT PRIVILEGES FOR ALL ROLES REVOKE SELECT ON TABLES FROM foo, bar
----
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON TABLES FROM foo, bar -- normalized!
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON TABLES FROM foo, bar -- fully parenthesized
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON TABLES FROM foo, bar -- literals removed
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON TABLES FROM _, _ -- identifiers removed
| pkg/sql/parser/testdata/alter_default_privileges | 0 | https://github.com/cockroachdb/cockroach/commit/6a1b9c891f20b32d985a451118f690de01dc6e37 | [
0.0001962234528036788,
0.00016739538114052266,
0.00015991649706847966,
0.0001665844611125067,
0.000005581304321822245
] |
{
"id": 1,
"code_window": [
"\t\tfor i := range vals {\n",
"\t\t\tvals[i] = vals[i] % distinctModulo\n",
"\t\t}\n",
"\t}\n",
"\tif agg.order == partial {\n",
"\t\ttc.unorderedInput = false\n",
"\t\ttc.orderedCols = []uint32{0}\n",
"\t}\n",
"\trequire.NoError(b, tc.init())\n",
"\tconstructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/aggregators_test.go",
"type": "replace",
"edit_start_line_idx": 1017
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"fmt"
"math"
"testing"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/col/coldatatestutils"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecagg"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/cockroach/pkg/util/timeofday"
"github.com/stretchr/testify/require"
)
type aggregatorTestCase struct {
name string
typs []*types.T
input colexectestutils.Tuples
groupCols []uint32
aggCols [][]uint32
aggFns []execinfrapb.AggregatorSpec_Func
expected colexectestutils.Tuples
constArguments [][]execinfrapb.Expression
// spec will be populated during init().
spec *execinfrapb.AggregatorSpec
aggDistinct []bool
aggFilter []int
unorderedInput bool
orderedCols []uint32
// convToDecimal will convert any float64s to apd.Decimals. If a string is
// encountered, a best effort is made to convert that string to an
// apd.Decimal.
convToDecimal bool
}
type ordering int64
const (
ordered ordering = iota
partial
unordered
)
// aggType is a helper struct that allows tests to test both the ordered and
// hash aggregators at the same time.
type aggType struct {
new func(*colexecagg.NewAggregatorArgs) (colexecop.ResettableOperator, error)
name string
order ordering
}
var aggTypesWithPartial = []aggType{
{
// This is a wrapper around NewHashAggregator so its signature is
// compatible with NewOrderedAggregator.
new: func(args *colexecagg.NewAggregatorArgs) (colexecop.ResettableOperator, error) {
return NewHashAggregator(args, nil /* newSpillingQueueArgs */, testAllocator, math.MaxInt64)
},
name: "hash",
order: unordered,
},
{
new: NewOrderedAggregator,
name: "ordered",
order: ordered,
},
{
// This is a wrapper around NewHashAggregator so its signature is
// compatible with NewOrderedAggregator.
new: func(args *colexecagg.NewAggregatorArgs) (colexecop.ResettableOperator, error) {
return NewHashAggregator(args, nil /* newSpillingQueueArgs */, testAllocator, math.MaxInt64)
},
name: "hash-partial-order",
order: partial,
},
}
var aggTypes = aggTypesWithPartial[:1]
func (tc *aggregatorTestCase) init() error {
if tc.convToDecimal {
for _, tuples := range []colexectestutils.Tuples{tc.input, tc.expected} {
for _, tuple := range tuples {
for i, e := range tuple {
switch v := e.(type) {
case float64:
d := &apd.Decimal{}
d, err := d.SetFloat64(v)
if err != nil {
return err
}
tuple[i] = *d
case string:
d := &apd.Decimal{}
d, _, err := d.SetString(v)
if err != nil {
// If there was an error converting the string to decimal, just
// leave the datum as is.
continue
}
tuple[i] = *d
}
}
}
}
}
aggregations := make([]execinfrapb.AggregatorSpec_Aggregation, len(tc.aggFns))
for i, aggFn := range tc.aggFns {
aggregations[i].Func = aggFn
aggregations[i].ColIdx = tc.aggCols[i]
if tc.constArguments != nil {
aggregations[i].Arguments = tc.constArguments[i]
}
if tc.aggDistinct != nil {
aggregations[i].Distinct = tc.aggDistinct[i]
}
if tc.aggFilter != nil && tc.aggFilter[i] != tree.NoColumnIdx {
filterColIdx := uint32(tc.aggFilter[i])
aggregations[i].FilterColIdx = &filterColIdx
}
}
tc.spec = &execinfrapb.AggregatorSpec{
GroupCols: tc.groupCols,
Aggregations: aggregations,
}
if !tc.unorderedInput {
var outputOrderCols []uint32
if len(tc.orderedCols) == 0 {
outputOrderCols = tc.spec.GroupCols
} else {
outputOrderCols = tc.orderedCols
tc.spec.OrderedGroupCols = tc.orderedCols
}
// If input grouping columns have an ordering, then we'll require the
// output to also have the same ordering.
outputOrdering := execinfrapb.Ordering{Columns: make([]execinfrapb.Ordering_Column, len(outputOrderCols))}
for i, col := range outputOrderCols {
outputOrdering.Columns[i].ColIdx = col
}
tc.spec.OutputOrdering = outputOrdering
}
return nil
}
var aggregatorsTestCases = []aggregatorTestCase{
{
name: "OneTuple",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 1},
},
},
{
name: "OneGroup",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 1},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 2},
},
},
{
name: "MultiGroup",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 0},
{0, 1},
{1, 4},
{2, 5},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 2},
{1, 4},
{2, 5},
},
},
{
name: "CarryBetweenInputBatches",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 2},
{0, 3},
{1, 4},
{1, 5},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 6},
{1, 9},
},
},
{
name: "CarryBetweenOutputBatches",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 2},
{0, 3},
{0, 4},
{1, 5},
{2, 6},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 10},
{1, 5},
{2, 6},
},
},
{
name: "CarryBetweenInputAndOutputBatches",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 1},
{1, 2},
{2, 3},
{2, 3},
{3, 4},
{3, 4},
{4, 5},
{5, 6},
{6, 7},
{7, 8},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 2},
{1, 2},
{2, 6},
{3, 8},
{4, 5},
{5, 6},
{6, 7},
{7, 8},
},
},
{
name: "NoGroupingCols",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 2},
{0, 3},
{0, 4},
},
groupCols: []uint32{},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 10},
},
},
{
name: "UnorderedWithNullsInGroupingCol",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{nil, 1},
{4, 42},
{nil, 2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{nil, 3},
{4, 42},
},
unorderedInput: true,
},
{
name: "CountRows",
typs: types.OneIntCol,
input: colexectestutils.Tuples{
{1},
{2},
{1},
{nil},
{3},
{1},
{3},
{4},
{1},
{nil},
{2},
{4},
{2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
},
expected: colexectestutils.Tuples{
{nil, 2},
{1, 4},
{2, 3},
{3, 2},
{4, 2},
},
unorderedInput: true,
},
{
name: "OutputOrder",
typs: types.ThreeIntCols,
input: colexectestutils.Tuples{
{0, 1, 2},
{0, 1, 2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {2}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 4, 2},
},
},
{
name: "SumMultiType",
typs: []*types.T{types.Int, types.Int, types.Decimal},
input: colexectestutils.Tuples{
{0, 1, 1.3},
{0, 1, 1.6},
{0, 1, 0.5},
{1, 1, 1.2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {2}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Sum,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 3.4, 3},
{1, 1.2, 1},
},
convToDecimal: true,
},
{
name: "AvgSumSingleInputBatch",
typs: []*types.T{types.Int, types.Decimal},
input: colexectestutils.Tuples{
{0, 1.1},
{0, 1.2},
{0, 2.3},
{1, 6.21},
{1, 2.43},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Avg,
execinfrapb.Sum,
},
expected: colexectestutils.Tuples{
{0, "1.5333333333333333333", 4.6},
{1, 4.32, 8.64},
},
convToDecimal: true,
},
{
name: "BoolAndOrBatch",
typs: []*types.T{types.Int, types.Bool},
input: colexectestutils.Tuples{
{0, true},
{1, false},
{2, true},
{2, false},
{3, true},
{3, true},
{4, false},
{4, false},
{5, false},
{5, nil},
{6, nil},
{6, true},
{7, nil},
{7, false},
{7, true},
{8, nil},
{8, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.BoolAnd,
execinfrapb.BoolOr,
},
expected: colexectestutils.Tuples{
{0, true, true},
{1, false, false},
{2, false, true},
{3, true, true},
{4, false, false},
{5, false, false},
{6, true, true},
{7, false, true},
{8, nil, nil},
},
},
{
name: "MultiGroupColsWithPointerTypes",
typs: []*types.T{types.Int, types.Decimal, types.Bytes, types.Decimal},
input: colexectestutils.Tuples{
{2, 1.0, "1.0", 2.0},
{2, 1.0, "1.0", 4.0},
{2, 2.0, "2.0", 6.0},
},
groupCols: []uint32{0, 1, 2},
aggCols: [][]uint32{{0}, {1}, {2}, {3}, {3}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.AnyNotNull,
execinfrapb.AnyNotNull,
execinfrapb.Min,
execinfrapb.Sum,
},
expected: colexectestutils.Tuples{
{2, 1.0, "1.0", 2.0, 6.0},
{2, 2.0, "2.0", 6.0, 6.0},
},
},
{
name: "GroupOnTimeTZColumns",
typs: []*types.T{types.TimeTZ, types.Int},
input: colexectestutils.Tuples{
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 0), -1},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 1), 1},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 1), 2},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 2), 10},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 2), 11},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 3), 100},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 3), 101},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 4), 102},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 0), -1},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 1), 3},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 2), 21},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 3), 201},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 4), 102},
},
},
{
name: "AVG on all types",
typs: []*types.T{types.Int, types.Int2, types.Int4, types.Int, types.Decimal, types.Float, types.Interval},
input: colexectestutils.Tuples{
{0, nil, 1, 1, 1.0, 1.0, duration.MakeDuration(1, 1, 1)},
{0, 1, nil, 2, 2.0, 2.0, duration.MakeDuration(2, 2, 2)},
{0, 2, 2, nil, 3.0, 3.0, duration.MakeDuration(3, 3, 3)},
{0, 3, 3, 3, nil, 4.0, duration.MakeDuration(4, 4, 4)},
{0, 4, 4, 4, 4.0, nil, duration.MakeDuration(5, 5, 5)},
{0, 5, 5, 5, 5.0, 5.0, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {2}, {3}, {4}, {5}, {6}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Avg,
execinfrapb.Avg,
execinfrapb.Avg,
execinfrapb.Avg,
execinfrapb.Avg,
execinfrapb.Avg,
},
expected: colexectestutils.Tuples{
{0, 3.0, 3.0, 3.0, 3.0, 3.0, duration.MakeDuration(3, 3, 3)},
},
},
{
name: "ConcatAgg",
typs: []*types.T{types.Int, types.Bytes},
input: colexectestutils.Tuples{
{1, "1"},
{1, "2"},
{1, "3"},
{2, nil},
{2, "1"},
{2, "2"},
{3, "1"},
{3, nil},
{3, "2"},
{4, nil},
{4, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.ConcatAgg,
},
expected: colexectestutils.Tuples{
{1, "123"},
{2, "12"},
{3, "12"},
{4, nil},
},
},
{
name: "All",
typs: []*types.T{types.Int, types.Decimal, types.Int, types.Bool, types.Bytes},
input: colexectestutils.Tuples{
{0, 3.1, 2, true, "zero"},
{0, 1.1, 3, false, "zero"},
{1, 1.1, 1, false, "one"},
{1, 4.1, 0, false, "one"},
{2, 1.1, 1, true, "two"},
{3, 4.1, 0, false, "three"},
{3, 5.1, 0, true, "three"},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}, {1}, {1}, {1}, {2}, {2}, {2}, {3}, {3}, {4}, {4}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
execinfrapb.Avg,
execinfrapb.Count,
execinfrapb.Sum,
execinfrapb.SumInt,
execinfrapb.Min,
execinfrapb.Max,
execinfrapb.BoolAnd,
execinfrapb.BoolOr,
execinfrapb.AnyNotNull,
execinfrapb.ConcatAgg,
},
expected: colexectestutils.Tuples{
{0, 2, 2.1, 2, 4.2, 5, 2, 3, false, true, "zero", "zerozero"},
{1, 2, 2.6, 2, 5.2, 1, 0, 1, false, false, "one", "oneone"},
{2, 1, 1.1, 1, 1.1, 1, 1, 1, true, true, "two", "two"},
{3, 2, 4.6, 2, 9.2, 0, 0, 0, false, true, "three", "threethree"},
},
convToDecimal: true,
},
{
name: "NullHandling",
typs: []*types.T{types.Int, types.Decimal, types.Int, types.Bool, types.Bytes},
input: colexectestutils.Tuples{
{nil, 1.1, 4, true, "a"},
{0, nil, nil, nil, nil},
{0, 3.1, 5, nil, "b"},
{1, nil, nil, nil, nil},
{1, nil, nil, false, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}, {1}, {1}, {1}, {1}, {2}, {2}, {2}, {3}, {3}, {4}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
execinfrapb.AnyNotNull,
execinfrapb.Count,
execinfrapb.Sum,
execinfrapb.Avg,
execinfrapb.SumInt,
execinfrapb.Min,
execinfrapb.Max,
execinfrapb.BoolAnd,
execinfrapb.BoolOr,
execinfrapb.ConcatAgg,
},
expected: colexectestutils.Tuples{
{nil, 1, 1.1, 1, 1.1, 1.1, 4, 4, 4, true, true, "a"},
{0, 2, 3.1, 1, 3.1, 3.1, 5, 5, 5, nil, nil, "b"},
{1, 2, nil, 0, nil, nil, nil, nil, nil, false, false, nil},
},
convToDecimal: true,
},
{
name: "DistinctAggregation",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 2},
{0, 2},
{0, nil},
{0, 1},
{0, nil},
{1, 1},
{1, 2},
{1, 2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {1}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Count,
execinfrapb.Count,
execinfrapb.SumInt,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 4, 2, 6, 3},
{1, 3, 2, 5, 3},
},
aggDistinct: []bool{false, false, true, false, true},
},
{
name: "FilteringAggregation",
typs: []*types.T{types.Int, types.Int, types.Bool},
input: colexectestutils.Tuples{
{0, 1, false},
{0, 2, true},
{0, 2, true},
{0, nil, nil},
{0, 1, nil},
{0, nil, true},
{1, 1, true},
{1, 2, nil},
{1, 2, true},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 3, 4},
{1, 2, 3},
},
aggFilter: []int{tree.NoColumnIdx, 2, 2},
},
{
name: "AllGroupsFilteredOut",
typs: []*types.T{types.Int, types.Int, types.Bool},
input: colexectestutils.Tuples{
{0, 1, false},
{0, nil, nil},
{0, 2, false},
{1, 1, true},
{1, 2, nil},
{1, 2, true},
{2, 1, false},
{2, nil, nil},
{2, 2, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 0, nil},
{1, 2, 3},
{2, 0, nil},
},
aggFilter: []int{tree.NoColumnIdx, 2, 2},
},
{
name: "DistinctFilteringAggregation",
typs: []*types.T{types.Int, types.Int, types.Bool},
input: colexectestutils.Tuples{
{0, 1, false},
{0, 2, true},
{0, 2, true},
{0, nil, nil},
{0, 1, nil},
{0, nil, true},
{1, 1, true},
{1, 2, nil},
{1, 2, true},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {1}, {1}, {1}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Count,
execinfrapb.Count,
execinfrapb.Count,
execinfrapb.SumInt,
execinfrapb.SumInt,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 2, 2, 1, 4, 3, 2},
{1, 2, 2, 2, 3, 3, 3},
},
aggDistinct: []bool{false, false, true, true, false, true, true},
aggFilter: []int{tree.NoColumnIdx, 2, tree.NoColumnIdx, 2, 2, tree.NoColumnIdx, 2},
},
}
func init() {
for i := range aggregatorsTestCases {
if err := aggregatorsTestCases[i].init(); err != nil {
colexecerror.InternalError(err)
}
}
}
func TestAggregators(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
evalCtx := tree.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())
defer evalCtx.Stop(context.Background())
ctx := context.Background()
for _, tc := range aggregatorsTestCases {
constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(
&evalCtx, nil /* semaCtx */, tc.spec.Aggregations, tc.typs,
)
require.NoError(t, err)
for _, agg := range aggTypes {
if tc.unorderedInput && agg.order == ordered {
// This test case has unordered input, so we skip ordered
// aggregator.
continue
}
if agg.order == ordered && tc.aggFilter != nil {
// Filtering aggregation is only supported with hash aggregator.
continue
}
log.Infof(ctx, "%s/%s", tc.name, agg.name)
verifier := colexectestutils.OrderedVerifier
if tc.unorderedInput {
verifier = colexectestutils.UnorderedVerifier
}
colexectestutils.RunTestsWithTyps(t, testAllocator, []colexectestutils.Tuples{tc.input}, [][]*types.T{tc.typs}, tc.expected, verifier,
func(input []colexecop.Operator) (colexecop.Operator, error) {
return agg.new(&colexecagg.NewAggregatorArgs{
Allocator: testAllocator,
MemAccount: testMemAcc,
Input: input[0],
InputTypes: tc.typs,
Spec: tc.spec,
EvalCtx: &evalCtx,
Constructors: constructors,
ConstArguments: constArguments,
OutputTypes: outputTypes,
})
})
}
}
}
func TestAggregatorRandom(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
evalCtx := tree.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())
defer evalCtx.Stop(context.Background())
// This test aggregates random inputs, keeping track of the expected results
// to make sure the aggregations are correct.
rng, _ := randutil.NewTestRand()
for _, groupSize := range []int{1, 2, coldata.BatchSize() / 4, coldata.BatchSize() / 2} {
if groupSize == 0 {
// We might be varying coldata.BatchSize() so that when it is divided by
// 4, groupSize is 0. We want to skip such configuration.
continue
}
for _, numInputBatches := range []int{1, 2, 64} {
for _, hasNulls := range []bool{true, false} {
for _, agg := range aggTypesWithPartial {
log.Infof(context.Background(), "%s/groupSize=%d/numInputBatches=%d/hasNulls=%t", agg.name, groupSize, numInputBatches, hasNulls)
nTuples := coldata.BatchSize() * numInputBatches
typs := []*types.T{types.Int, types.Float}
cols := []coldata.Vec{
testAllocator.NewMemColumn(typs[0], nTuples),
testAllocator.NewMemColumn(typs[1], nTuples),
}
if agg.order == partial {
typs = append(typs, types.Int)
cols = append(cols, testAllocator.NewMemColumn(typs[2], nTuples))
}
groups, aggCol, aggColNulls := cols[0].Int64(), cols[1].Float64(), cols[1].Nulls()
expectedTuples := colexectestutils.Tuples{}
var expRowCounts, expCounts []int64
var expSums, expMins, expMaxs []float64
// SUM, MIN, MAX, and AVG aggregators can output null.
var expNulls []bool
curGroup := -1
for i := range groups {
if i%groupSize == 0 {
if curGroup != -1 {
if expNulls[curGroup] {
expectedTuples = append(expectedTuples, colexectestutils.Tuple{
expRowCounts[curGroup], expCounts[curGroup], nil, nil, nil, nil,
})
} else {
expectedTuples = append(expectedTuples, colexectestutils.Tuple{
expRowCounts[curGroup], expCounts[curGroup], expSums[curGroup], expMins[curGroup], expMaxs[curGroup], expSums[curGroup] / float64(expCounts[curGroup]),
})
}
}
expRowCounts = append(expRowCounts, 0)
expCounts = append(expCounts, 0)
expSums = append(expSums, 0)
expMins = append(expMins, 2048)
expMaxs = append(expMaxs, -2048)
expNulls = append(expNulls, true)
curGroup++
}
// Keep the inputs small so they are a realistic size. Using a
// large range is not realistic and makes decimal operations
// slower.
aggCol[i] = 2048 * (rng.Float64() - 0.5)
// NULL values contribute to the row count, so we're updating
// the row counts outside of the if block.
expRowCounts[curGroup]++
if hasNulls && rng.Float64() < nullProbability {
aggColNulls.SetNull(i)
} else {
expNulls[curGroup] = false
expCounts[curGroup]++
expSums[curGroup] += aggCol[i]
expMins[curGroup] = min64(aggCol[i], expMins[curGroup])
expMaxs[curGroup] = max64(aggCol[i], expMaxs[curGroup])
}
groups[i] = int64(curGroup)
}
// Add result for last group.
if expNulls[curGroup] {
expectedTuples = append(expectedTuples, colexectestutils.Tuple{
expRowCounts[curGroup], expCounts[curGroup], nil, nil, nil, nil,
})
} else {
expectedTuples = append(expectedTuples, colexectestutils.Tuple{
expRowCounts[curGroup], expCounts[curGroup], expSums[curGroup], expMins[curGroup], expMaxs[curGroup], expSums[curGroup] / float64(expCounts[curGroup]),
})
}
source := colexectestutils.NewChunkingBatchSource(testAllocator, typs, cols, nTuples)
tc := aggregatorTestCase{
typs: typs,
groupCols: []uint32{0},
aggCols: [][]uint32{{}, {1}, {1}, {1}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.CountRows,
execinfrapb.Count,
execinfrapb.Sum,
execinfrapb.Min,
execinfrapb.Max,
execinfrapb.Avg,
},
}
if agg.order == partial {
tc.groupCols = []uint32{0, 2}
tc.orderedCols = []uint32{0}
}
require.NoError(t, tc.init())
constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(
&evalCtx, nil /* semaCtx */, tc.spec.Aggregations, tc.typs,
)
require.NoError(t, err)
a, err := agg.new(&colexecagg.NewAggregatorArgs{
Allocator: testAllocator,
MemAccount: testMemAcc,
Input: source,
InputTypes: tc.typs,
Spec: tc.spec,
EvalCtx: &evalCtx,
Constructors: constructors,
ConstArguments: constArguments,
OutputTypes: outputTypes,
})
if err != nil {
t.Fatal(err)
}
a.Init(context.Background())
testOutput := colexectestutils.NewOpTestOutput(a, expectedTuples)
if agg.order == ordered {
err = testOutput.Verify()
} else if agg.order == partial {
err = testOutput.VerifyPartialOrder()
} else {
err = testOutput.VerifyAnyOrder()
}
if err != nil {
t.Fatal(err)
}
}
}
}
}
}
// benchmarkAggregateFunction runs aggregator microbenchmarks. numGroupCol is
// the number of grouping columns. groupSize is the number of tuples to target
// in each distinct aggregation group. chunkSize is the number of tuples to
// target in each distinct partially ordered group column, and is intended for
// use with partial order. Limit is the number of rows to retrieve from the
// aggregation function before ending the microbenchmark.
func benchmarkAggregateFunction(
b *testing.B,
agg aggType,
aggFn execinfrapb.AggregatorSpec_Func,
aggInputTypes []*types.T,
numGroupCol int,
groupSize int,
distinctProb float64,
numInputRows int,
chunkSize int,
limit int,
) {
defer log.Scope(b).Close(b)
if groupSize > numInputRows {
// In this case all tuples will be part of the same group, and we have
// likely already benchmarked such scenario with this value of
// numInputRows, so we short-circuit.
return
}
if numGroupCol < 1 {
// We should always have at least one group column.
return
}
if agg.order == partial {
if chunkSize > numInputRows || groupSize > chunkSize {
return
}
}
rng, _ := randutil.NewTestRand()
ctx := context.Background()
evalCtx := tree.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())
defer evalCtx.Stop(ctx)
aggMemAcc := evalCtx.Mon.MakeBoundAccount()
defer aggMemAcc.Close(ctx)
evalCtx.SingleDatumAggMemAccount = &aggMemAcc
const bytesFixedLength = 8
typs := []*types.T{types.Int}
groupCols := []uint32{0}
for g := 1; g < numGroupCol; g++ {
typs = append(typs, types.Int)
groupCols = append(groupCols, uint32(g))
}
typs = append(typs, aggInputTypes...)
cols := make([]coldata.Vec, len(typs))
for i := range typs {
cols[i] = testAllocator.NewMemColumn(typs[i], numInputRows)
}
groups := cols[0].Int64()
if agg.order == ordered {
curGroup := -1
for i := 0; i < numInputRows; i++ {
if i%groupSize == 0 {
curGroup++
}
groups[i] = int64(curGroup)
}
} else if agg.order == unordered {
numGroups := numInputRows / groupSize
for i := 0; i < numInputRows; i++ {
groups[i] = int64(rng.Intn(numGroups))
}
} else {
// partial order.
chunks := cols[0].Int64()
groups = cols[1].Int64()
curChunk := -1
numGroups := chunkSize / groupSize
for i := 0; i < numInputRows; i++ {
if i%chunkSize == 0 {
curChunk++
}
chunks[i] = int64(curChunk)
groups[i] = int64(rng.Intn(numGroups))
}
}
for _, col := range cols[numGroupCol:] {
coldatatestutils.RandomVec(coldatatestutils.RandomVecArgs{
Rand: rng,
Vec: col,
N: numInputRows,
NullProbability: 0,
BytesFixedLength: bytesFixedLength,
})
}
if aggFn == execinfrapb.SumInt {
// Integer summation of random Int64 values can lead
// to overflow, and we will panic. To go around it, we
// restrict the range of values.
vals := cols[numGroupCol].Int64()
for i := range vals {
vals[i] = vals[i] % 1024
}
}
source := colexectestutils.NewChunkingBatchSource(testAllocator, typs, cols, numInputRows)
aggCols := make([]uint32, len(aggInputTypes))
for i := range aggCols {
aggCols[i] = uint32(numGroupCol + i)
}
tc := aggregatorTestCase{
typs: typs,
groupCols: groupCols,
aggCols: [][]uint32{aggCols},
aggFns: []execinfrapb.AggregatorSpec_Func{aggFn},
}
if distinctProb > 0 {
if !typs[0].Identical(types.Int) {
skip.IgnoreLint(b, "benchmarking distinct aggregation is supported only on an INT argument")
}
tc.aggDistinct = []bool{true}
distinctModulo := int64(1.0 / distinctProb)
vals := cols[1].Int64()
for i := range vals {
vals[i] = vals[i] % distinctModulo
}
}
if agg.order == partial {
tc.unorderedInput = false
tc.orderedCols = []uint32{0}
}
require.NoError(b, tc.init())
constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(
&evalCtx, nil /* semaCtx */, tc.spec.Aggregations, tc.typs,
)
require.NoError(b, err)
fName := execinfrapb.AggregatorSpec_Func_name[int32(aggFn)]
// Only count the aggregation columns.
var argumentsSize int
if len(aggInputTypes) > 0 {
for _, typ := range aggInputTypes {
if typ.Identical(types.Bool) {
argumentsSize++
} else {
argumentsSize += 8
}
}
} else {
// For COUNT_ROWS we'll just use 8 bytes.
argumentsSize = 8
}
var inputTypesString string
switch len(aggInputTypes) {
case 1:
// Override the string so that the name of the benchmark was the same
// as in pre-20.2 releases (which allows us to compare against old
// numbers).
inputTypesString = aggInputTypes[0].String()
default:
inputTypesString = fmt.Sprintf("%s", aggInputTypes)
}
distinctProbString := ""
if distinctProb > 0 {
distinctProbString = fmt.Sprintf("/distinctProb=%.2f", distinctProb)
}
b.Run(fmt.Sprintf(
"%s/%s/%s/groupSize=%d%s/numInputRows=%d",
fName, agg.name, inputTypesString, groupSize, distinctProbString, numInputRows),
func(b *testing.B) {
b.SetBytes(int64(argumentsSize * numInputRows))
b.ResetTimer()
for i := 0; i < b.N; i++ {
a, err := agg.new(&colexecagg.NewAggregatorArgs{
Allocator: testAllocator,
MemAccount: testMemAcc,
Input: source,
InputTypes: tc.typs,
Spec: tc.spec,
EvalCtx: &evalCtx,
Constructors: constructors,
ConstArguments: constArguments,
OutputTypes: outputTypes,
})
if err != nil {
b.Fatal(err)
}
a.Init(ctx)
// Exhaust aggregator until all batches have been read or limit, if
// non-zero, is reached.
tupleCount := 0
for b := a.Next(); b.Length() != 0; b = a.Next() {
tupleCount += b.Length()
if limit > 0 && tupleCount >= limit {
break
}
}
if err = a.(colexecop.Closer).Close(); err != nil {
b.Fatal(err)
}
source.Reset(ctx)
}
},
)
}
// BenchmarkAggregator runs the benchmark both aggregators with diverse data
// source parameters but using a single aggregate function. The goal of this
// benchmark is measuring the performance of the aggregators themselves
// depending on the parameters of the input.
func BenchmarkAggregator(b *testing.B) {
aggFn := execinfrapb.Min
numRows := []int{1, 32, coldata.BatchSize(), 32 * coldata.BatchSize(), 1024 * coldata.BatchSize()}
groupSizes := []int{1, 2, 32, 128, coldata.BatchSize()}
if testing.Short() {
numRows = []int{32, 32 * coldata.BatchSize()}
groupSizes = []int{1, coldata.BatchSize()}
}
for _, agg := range aggTypes {
for _, numInputRows := range numRows {
for _, groupSize := range groupSizes {
benchmarkAggregateFunction(
b, agg, aggFn, []*types.T{types.Int}, 1, /* numGroupCol */
groupSize, 0 /* distinctProb */, numInputRows,
0 /* chunkSize */, 0 /* limit */)
}
}
}
}
// BenchmarkAllOptimizedAggregateFunctions runs the benchmark of all optimized
// aggregate functions in 4 configurations (hash vs ordered, and small groups
// vs big groups). Such configurations were chosen since they provide good
// enough signal on the speeds of aggregate functions. For more diverse
// configurations look at BenchmarkAggregator.
func BenchmarkAllOptimizedAggregateFunctions(b *testing.B) {
var numInputRows = 32 * coldata.BatchSize()
numFnsToRun := len(execinfrapb.AggregatorSpec_Func_name)
if testing.Short() {
numFnsToRun = 1
}
for aggFnNumber := 0; aggFnNumber < numFnsToRun; aggFnNumber++ {
aggFn := execinfrapb.AggregatorSpec_Func(aggFnNumber)
if !colexecagg.IsAggOptimized(aggFn) {
continue
}
for _, agg := range aggTypes {
var aggInputTypes []*types.T
switch aggFn {
case execinfrapb.BoolAnd, execinfrapb.BoolOr:
aggInputTypes = []*types.T{types.Bool}
case execinfrapb.ConcatAgg:
aggInputTypes = []*types.T{types.Bytes}
case execinfrapb.CountRows:
default:
aggInputTypes = []*types.T{types.Int}
}
for _, groupSize := range []int{1, coldata.BatchSize()} {
benchmarkAggregateFunction(b, agg, aggFn, aggInputTypes,
1 /* numGroupCol */, groupSize,
0 /* distinctProb */, numInputRows,
0 /* chunkSize */, 0 /* limit */)
}
}
}
}
func BenchmarkDistinctAggregation(b *testing.B) {
aggFn := execinfrapb.Count
for _, agg := range aggTypes {
for _, numInputRows := range []int{32, 32 * coldata.BatchSize()} {
for _, groupSize := range []int{1, 2, 32, 128, coldata.BatchSize()} {
for _, distinctProb := range []float64{0.01, 0.1, 1.0} {
distinctModulo := int(1.0 / distinctProb)
if (groupSize == 1 && distinctProb != 1.0) || float64(groupSize)/float64(distinctModulo) < 0.1 {
// We have a such combination of groupSize and distinctProb
// parameters that we will be very unlikely to satisfy them
// (for example, with groupSize=1 and distinctProb=0.01,
// every value will be distinct within the group), so we
// skip such configuration.
continue
}
benchmarkAggregateFunction(b, agg, aggFn, []*types.T{types.Int},
1 /* numGroupCol */, groupSize,
0 /* distinctProb */, numInputRows,
0 /* chunkSize */, 0 /* limit */)
}
}
}
}
}
func min64(a, b float64) float64 {
if a < b {
return a
}
return b
}
func max64(a, b float64) float64 {
if a > b {
return a
}
return b
}
| pkg/sql/colexec/aggregators_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/6a1b9c891f20b32d985a451118f690de01dc6e37 | [
0.998202919960022,
0.050414010882377625,
0.00016270506603177637,
0.00018623197684064507,
0.20807036757469177
] |
{
"id": 1,
"code_window": [
"\t\tfor i := range vals {\n",
"\t\t\tvals[i] = vals[i] % distinctModulo\n",
"\t\t}\n",
"\t}\n",
"\tif agg.order == partial {\n",
"\t\ttc.unorderedInput = false\n",
"\t\ttc.orderedCols = []uint32{0}\n",
"\t}\n",
"\trequire.NoError(b, tc.init())\n",
"\tconstructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/aggregators_test.go",
"type": "replace",
"edit_start_line_idx": 1017
} | exec-ddl
CREATE TABLE a (k INT PRIMARY KEY, i INT, f FLOAT, s STRING, j JSON)
----
exec-ddl
CREATE VIEW av AS SELECT k, i, s FROM a
----
build
SELECT * FROM av
----
project
├── columns: k:1!null i:2 s:4
└── scan a
└── columns: k:1!null i:2 f:3 s:4 j:5 crdb_internal_mvcc_timestamp:6 tableoid:7
build
SELECT av.i, s, t.public.av.s AS s2 FROM t.av
----
project
├── columns: i:2 s:4 s2:4
└── project
├── columns: k:1!null i:2 s:4
└── scan a
└── columns: k:1!null i:2 f:3 s:4 j:5 crdb_internal_mvcc_timestamp:6 tableoid:7
# Self view join (multiple references to view).
build
SELECT av.k, av2.s FROM av, av AS av2 WHERE av.k=av2.k
----
project
├── columns: k:1!null s:11
└── select
├── columns: k:1!null i:2 s:4 k:8!null i:9 s:11
├── inner-join (cross)
│ ├── columns: k:1!null i:2 s:4 k:8!null i:9 s:11
│ ├── project
│ │ ├── columns: k:1!null i:2 s:4
│ │ └── scan a
│ │ └── columns: k:1!null i:2 f:3 s:4 j:5 crdb_internal_mvcc_timestamp:6 tableoid:7
│ ├── project
│ │ ├── columns: k:8!null i:9 s:11
│ │ └── scan a
│ │ └── columns: k:8!null i:9 f:10 s:11 j:12 crdb_internal_mvcc_timestamp:13 tableoid:14
│ └── filters (true)
└── filters
└── k:1 = k:8
# View with aliased column names, filter, and ORDER BY.
exec-ddl
CREATE VIEW av2 (x, y) AS SELECT k, f FROM a WHERE i=10 ORDER BY s
----
# Result is not ordered.
build
SELECT * FROM av2
----
project
├── columns: x:1!null y:3
└── project
├── columns: k:1!null f:3 s:4
└── select
├── columns: k:1!null i:2!null f:3 s:4 j:5 crdb_internal_mvcc_timestamp:6 tableoid:7
├── scan a
│ └── columns: k:1!null i:2 f:3 s:4 j:5 crdb_internal_mvcc_timestamp:6 tableoid:7
└── filters
└── i:2 = 10
# Sort used by group by because of presence of ARRAY_AGG.
build
SELECT array_agg(y) FROM av2
----
scalar-group-by
├── columns: array_agg:8
├── internal-ordering: +4
├── sort
│ ├── columns: f:3 s:4
│ ├── ordering: +4
│ └── project
│ ├── columns: f:3 s:4
│ └── project
│ ├── columns: k:1!null f:3 s:4
│ └── select
│ ├── columns: k:1!null i:2!null f:3 s:4 j:5 crdb_internal_mvcc_timestamp:6 tableoid:7
│ ├── scan a
│ │ └── columns: k:1!null i:2 f:3 s:4 j:5 crdb_internal_mvcc_timestamp:6 tableoid:7
│ └── filters
│ └── i:2 = 10
└── aggregations
└── array-agg [as=array_agg:8]
└── f:3
# Verify that an outer table is visible from a subquery that uses
# a view (#46180).
exec-ddl
CREATE VIEW v AS SELECT x FROM (VALUES (1), (2)) AS foo(x);
----
build
SELECT (SELECT x FROM v WHERE x=t.a) FROM (VALUES (3), (4)) AS t(a);
----
project
├── columns: x:3
├── values
│ ├── columns: column1:1!null
│ ├── (3,)
│ └── (4,)
└── projections
└── subquery [as=x:3]
└── max1-row
├── columns: column1:2!null
└── select
├── columns: column1:2!null
├── values
│ ├── columns: column1:2!null
│ ├── (1,)
│ └── (2,)
└── filters
└── column1:2 = column1:1
| pkg/sql/opt/optbuilder/testdata/view | 0 | https://github.com/cockroachdb/cockroach/commit/6a1b9c891f20b32d985a451118f690de01dc6e37 | [
0.00019582289678510278,
0.00017713361012283713,
0.000171013773069717,
0.00017645099433138967,
0.000006109452442615293
] |
{
"id": 1,
"code_window": [
"\t\tfor i := range vals {\n",
"\t\t\tvals[i] = vals[i] % distinctModulo\n",
"\t\t}\n",
"\t}\n",
"\tif agg.order == partial {\n",
"\t\ttc.unorderedInput = false\n",
"\t\ttc.orderedCols = []uint32{0}\n",
"\t}\n",
"\trequire.NoError(b, tc.init())\n",
"\tconstructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/aggregators_test.go",
"type": "replace",
"edit_start_line_idx": 1017
} | load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "cliccl",
srcs = [
"cliccl.go",
"debug.go",
"debug_backup.go",
"demo.go",
"start.go",
],
importpath = "github.com/cockroachdb/cockroach/pkg/ccl/cliccl",
visibility = ["//visibility:public"],
deps = [
"//pkg/base",
"//pkg/blobs",
"//pkg/build",
"//pkg/ccl/backupccl",
"//pkg/ccl/baseccl",
"//pkg/ccl/cliccl/cliflagsccl",
"//pkg/ccl/storageccl",
"//pkg/ccl/storageccl/engineccl/enginepbccl:enginepbccl_go_proto",
"//pkg/ccl/workloadccl/cliccl",
"//pkg/cli",
"//pkg/cli/clierrorplus",
"//pkg/cli/cliflags",
"//pkg/cli/clisqlexec",
"//pkg/cli/democluster",
"//pkg/cloud",
"//pkg/cloud/nodelocal",
"//pkg/keys",
"//pkg/roachpb:with-mocks",
"//pkg/security",
"//pkg/server",
"//pkg/settings/cluster",
"//pkg/sql/catalog",
"//pkg/sql/catalog/catconstants",
"//pkg/sql/catalog/colinfo",
"//pkg/sql/catalog/descpb",
"//pkg/sql/catalog/tabledesc",
"//pkg/sql/row",
"//pkg/sql/rowenc",
"//pkg/sql/sem/tree",
"//pkg/storage",
"//pkg/storage/enginepb",
"//pkg/util",
"//pkg/util/envutil",
"//pkg/util/hlc",
"//pkg/util/humanizeutil",
"//pkg/util/log",
"//pkg/util/protoutil",
"//pkg/util/stop",
"//pkg/util/timeutil",
"//pkg/util/timeutil/pgdate",
"//pkg/util/uuid",
"@com_github_cockroachdb_apd_v2//:apd",
"@com_github_cockroachdb_errors//:errors",
"@com_github_cockroachdb_errors//oserror",
"@com_github_spf13_cobra//:cobra",
],
)
go_test(
name = "cliccl_test",
size = "medium",
srcs = [
"debug_backup_test.go",
"main_test.go",
],
embed = [":cliccl"],
deps = [
"//pkg/base",
"//pkg/build",
"//pkg/ccl/backupccl",
"//pkg/ccl/utilccl",
"//pkg/cli",
"//pkg/cli/clisqlexec",
"//pkg/server",
"//pkg/testutils",
"//pkg/testutils/serverutils",
"//pkg/testutils/sqlutils",
"//pkg/util/hlc",
"//pkg/util/leaktest",
"//pkg/util/log",
"//pkg/util/timeutil",
"@com_github_stretchr_testify//require",
],
)
| pkg/ccl/cliccl/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/6a1b9c891f20b32d985a451118f690de01dc6e37 | [
0.00017769633268471807,
0.0001728581846691668,
0.00016754199168644845,
0.0001731233496684581,
0.000003144957418044214
] |
{
"id": 1,
"code_window": [
"\t\tfor i := range vals {\n",
"\t\t\tvals[i] = vals[i] % distinctModulo\n",
"\t\t}\n",
"\t}\n",
"\tif agg.order == partial {\n",
"\t\ttc.unorderedInput = false\n",
"\t\ttc.orderedCols = []uint32{0}\n",
"\t}\n",
"\trequire.NoError(b, tc.init())\n",
"\tconstructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/sql/colexec/aggregators_test.go",
"type": "replace",
"edit_start_line_idx": 1017
} | <?php
function kill($msg) {
echo($msg);
exit(1);
}
$dbconn = pg_connect('')
or kill('Could not connect: ' . pg_last_error());
$result = pg_query_params('SELECT 1, 2 > $1, $1', [intval($argv[1])])
or kill('Query failed: ' . pg_last_error());
$arr = pg_fetch_row($result);
($arr === ['1', 'f', '3']) or kill('Unexpected: ' . print_r($arr, true));
$dbh = new PDO('pgsql:','root', null, array(PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION));
$dbh->exec('CREATE database bank');
$dbh->exec('CREATE table bank.accounts (id INT PRIMARY KEY, balance INT)');
$dbh->exec('INSERT INTO bank.accounts (id, balance) VALUES (1, 1000), (2, 250)');
$dbh->beginTransaction();
$stmt = $dbh->prepare('UPDATE bank.accounts SET balance = balance + :deposit WHERE id=:account');
$stmt->execute(array('account' => 1, 'deposit' => 10));
$stmt->execute(array('account' => 2, 'deposit' => -10));
$dbh->commit();
// Regression test for #59007.
$stmt = $dbh->prepare("insert into a_table (id, a) select ?, ?, ?, ? returning id");
$stmt->bindValue(1, 'ed66e7c0-5c39-11eb-8992-89bd28f48e75');
$stmt->bindValue(2, 'bugging_a');
$stmt->bindValue(3, 'bugging_b');
try {
$stmt->execute();
assert(false, "expected exception in execute");
} catch (Exception $e) {
assert(strpos($e.getMessage(), "expected 4 arguments, got 3"));
}
| pkg/acceptance/testdata/php/test.php | 0 | https://github.com/cockroachdb/cockroach/commit/6a1b9c891f20b32d985a451118f690de01dc6e37 | [
0.0001743066677590832,
0.00017289764946326613,
0.0001707662595435977,
0.0001732588279992342,
0.000001329457518295385
] |
{
"id": 2,
"code_window": [
"\t\t\t\t\t\t\t// wrap it with a noop operator. It is ok for the\n",
"\t\t\t\t\t\t\t// purposes of this benchmark.\n",
"\t\t\t\t\t\t\treturn colexecop.NewNoop(op), err\n",
"\t\t\t\t\t\t},\n",
"\t\t\t\t\t\tname: fmt.Sprintf(\"spilled=%t\", spillForced),\n",
"\t\t\t\t\t},\n",
"\t\t\t\t\taggFn, []*types.T{types.Int}, 1 /* numGroupCol */, groupSize,\n",
"\t\t\t\t\t0 /* distinctProb */, numInputRows, 0 /* chunkSize */, 0 /* limit */)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t\tname: fmt.Sprintf(\"spilled=%t\", spillForced),\n",
"\t\t\t\t\t\torder: unordered,\n"
],
"file_path": "pkg/sql/colexec/external_hash_aggregator_test.go",
"type": "replace",
"edit_start_line_idx": 220
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/colcontainer"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecagg"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/testutils/colcontainerutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/marusama/semaphore"
"github.com/stretchr/testify/require"
)
func TestExternalHashAggregator(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Cfg: &execinfra.ServerConfig{
Settings: st,
},
DiskMonitor: testDiskMonitor,
}
queueCfg, cleanup := colcontainerutils.NewTestingDiskQueueCfg(t, true /* inMem */)
defer cleanup()
var (
accounts []*mon.BoundAccount
monitors []*mon.BytesMonitor
)
rng, _ := randutil.NewTestRand()
numForcedRepartitions := rng.Intn(5)
for _, cfg := range []struct {
diskSpillingEnabled bool
spillForced bool
memoryLimitBytes int64
}{
{
diskSpillingEnabled: true,
spillForced: true,
},
{
diskSpillingEnabled: true,
spillForced: false,
},
{
diskSpillingEnabled: false,
},
{
diskSpillingEnabled: true,
spillForced: false,
memoryLimitBytes: hashAggregatorAllocSize * sizeOfAggBucket,
},
{
diskSpillingEnabled: true,
spillForced: false,
memoryLimitBytes: hashAggregatorAllocSize * sizeOfAggBucket * 2,
},
} {
HashAggregationDiskSpillingEnabled.Override(ctx, &flowCtx.Cfg.Settings.SV, cfg.diskSpillingEnabled)
flowCtx.Cfg.TestingKnobs.ForceDiskSpill = cfg.spillForced
flowCtx.Cfg.TestingKnobs.MemoryLimitBytes = cfg.memoryLimitBytes
for _, tc := range append(aggregatorsTestCases, hashAggregatorTestCases...) {
if len(tc.groupCols) == 0 {
// If there are no grouping columns, then the ordered
// aggregator is planned.
continue
}
if tc.aggFilter != nil {
// Filtering aggregation is not supported with the ordered
// aggregation which is required for the external hash
// aggregator in the fallback strategy.
continue
}
log.Infof(ctx, "diskSpillingEnabled=%t/spillForced=%t/memoryLimitBytes=%d/numRepartitions=%d/%s", cfg.diskSpillingEnabled, cfg.spillForced, cfg.memoryLimitBytes, numForcedRepartitions, tc.name)
constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(
&evalCtx, nil /* semaCtx */, tc.spec.Aggregations, tc.typs,
)
require.NoError(t, err)
verifier := colexectestutils.OrderedVerifier
if tc.unorderedInput {
verifier = colexectestutils.UnorderedVerifier
} else if len(tc.orderedCols) > 0 {
verifier = colexectestutils.PartialOrderedVerifier
}
var numExpectedClosers int
if cfg.diskSpillingEnabled {
// The external sorter and the disk spiller should be added
// as Closers (the latter is responsible for closing the
// in-memory hash aggregator as well as the external one).
numExpectedClosers = 2
if len(tc.spec.OutputOrdering.Columns) > 0 {
// When the output ordering is required, we also plan
// another external sort.
numExpectedClosers++
}
} else {
// Only the in-memory hash aggregator should be added.
numExpectedClosers = 1
}
var semsToCheck []semaphore.Semaphore
colexectestutils.RunTestsWithTyps(t, testAllocator, []colexectestutils.Tuples{tc.input}, [][]*types.T{tc.typs}, tc.expected, verifier, func(input []colexecop.Operator) (colexecop.Operator, error) {
sem := colexecop.NewTestingSemaphore(ehaNumRequiredFDs)
semsToCheck = append(semsToCheck, sem)
op, accs, mons, closers, err := createExternalHashAggregator(
ctx, flowCtx, &colexecagg.NewAggregatorArgs{
Allocator: testAllocator,
MemAccount: testMemAcc,
Input: input[0],
InputTypes: tc.typs,
Spec: tc.spec,
EvalCtx: &evalCtx,
Constructors: constructors,
ConstArguments: constArguments,
OutputTypes: outputTypes,
},
queueCfg, sem, numForcedRepartitions,
)
accounts = append(accounts, accs...)
monitors = append(monitors, mons...)
require.Equal(t, numExpectedClosers, len(closers))
if !cfg.diskSpillingEnabled {
// Sanity check that indeed only the in-memory hash
// aggregator was created.
_, isHashAgg := MaybeUnwrapInvariantsChecker(op).(*hashAggregator)
require.True(t, isHashAgg)
}
return op, err
})
for i, sem := range semsToCheck {
require.Equal(t, 0, sem.GetCount(), "sem still reports open FDs at index %d", i)
}
}
}
for _, acc := range accounts {
acc.Close(ctx)
}
for _, mon := range monitors {
mon.Stop(ctx)
}
}
func BenchmarkExternalHashAggregator(b *testing.B) {
defer leaktest.AfterTest(b)()
defer log.Scope(b).Close(b)
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Cfg: &execinfra.ServerConfig{
Settings: st,
},
DiskMonitor: testDiskMonitor,
}
var (
memAccounts []*mon.BoundAccount
memMonitors []*mon.BytesMonitor
)
queueCfg, cleanup := colcontainerutils.NewTestingDiskQueueCfg(b, false /* inMem */)
defer cleanup()
aggFn := execinfrapb.Min
numRows := []int{coldata.BatchSize(), 64 * coldata.BatchSize(), 4096 * coldata.BatchSize()}
groupSizes := []int{1, 2, 32, 128, coldata.BatchSize()}
if testing.Short() {
numRows = []int{64 * coldata.BatchSize()}
groupSizes = []int{1, coldata.BatchSize()}
}
for _, spillForced := range []bool{false, true} {
flowCtx.Cfg.TestingKnobs.ForceDiskSpill = spillForced
for _, numInputRows := range numRows {
for _, groupSize := range groupSizes {
benchmarkAggregateFunction(
b, aggType{
new: func(args *colexecagg.NewAggregatorArgs) (colexecop.ResettableOperator, error) {
op, accs, mons, _, err := createExternalHashAggregator(
ctx, flowCtx, args, queueCfg,
&colexecop.TestingSemaphore{}, 0, /* numForcedRepartitions */
)
memAccounts = append(memAccounts, accs...)
memMonitors = append(memMonitors, mons...)
// The hash-based partitioner is not a
// ResettableOperator, so in order to not change the
// signatures of the aggregator constructors, we
// wrap it with a noop operator. It is ok for the
// purposes of this benchmark.
return colexecop.NewNoop(op), err
},
name: fmt.Sprintf("spilled=%t", spillForced),
},
aggFn, []*types.T{types.Int}, 1 /* numGroupCol */, groupSize,
0 /* distinctProb */, numInputRows, 0 /* chunkSize */, 0 /* limit */)
}
}
}
for _, account := range memAccounts {
account.Close(ctx)
}
for _, monitor := range memMonitors {
monitor.Stop(ctx)
}
}
// createExternalHashAggregator is a helper function that instantiates a
// disk-backed hash aggregator. It returns an operator and an error as well as
// memory monitors and memory accounts that will need to be closed once the
// caller is done with the operator.
func createExternalHashAggregator(
ctx context.Context,
flowCtx *execinfra.FlowCtx,
newAggArgs *colexecagg.NewAggregatorArgs,
diskQueueCfg colcontainer.DiskQueueCfg,
testingSemaphore semaphore.Semaphore,
numForcedRepartitions int,
) (colexecop.Operator, []*mon.BoundAccount, []*mon.BytesMonitor, []colexecop.Closer, error) {
spec := &execinfrapb.ProcessorSpec{
Input: []execinfrapb.InputSyncSpec{{ColumnTypes: newAggArgs.InputTypes}},
Core: execinfrapb.ProcessorCoreUnion{
Aggregator: newAggArgs.Spec,
},
Post: execinfrapb.PostProcessSpec{},
ResultTypes: newAggArgs.OutputTypes,
}
args := &colexecargs.NewColOperatorArgs{
Spec: spec,
Inputs: []colexecargs.OpWithMetaInfo{{Root: newAggArgs.Input}},
StreamingMemAccount: testMemAcc,
DiskQueueCfg: diskQueueCfg,
FDSemaphore: testingSemaphore,
}
args.TestingKnobs.NumForcedRepartitions = numForcedRepartitions
result, err := colexecargs.TestNewColOperator(ctx, flowCtx, args)
return result.Root, result.OpAccounts, result.OpMonitors, result.ToClose, err
}
| pkg/sql/colexec/external_hash_aggregator_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/6a1b9c891f20b32d985a451118f690de01dc6e37 | [
0.9720882177352905,
0.04072928428649902,
0.00016311345098074526,
0.00020291961845941842,
0.1837487518787384
] |
{
"id": 2,
"code_window": [
"\t\t\t\t\t\t\t// wrap it with a noop operator. It is ok for the\n",
"\t\t\t\t\t\t\t// purposes of this benchmark.\n",
"\t\t\t\t\t\t\treturn colexecop.NewNoop(op), err\n",
"\t\t\t\t\t\t},\n",
"\t\t\t\t\t\tname: fmt.Sprintf(\"spilled=%t\", spillForced),\n",
"\t\t\t\t\t},\n",
"\t\t\t\t\taggFn, []*types.T{types.Int}, 1 /* numGroupCol */, groupSize,\n",
"\t\t\t\t\t0 /* distinctProb */, numInputRows, 0 /* chunkSize */, 0 /* limit */)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t\tname: fmt.Sprintf(\"spilled=%t\", spillForced),\n",
"\t\t\t\t\t\torder: unordered,\n"
],
"file_path": "pkg/sql/colexec/external_hash_aggregator_test.go",
"type": "replace",
"edit_start_line_idx": 220
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package row
import (
"bytes"
"context"
"sort"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/unique"
"github.com/cockroachdb/errors"
)
// Updater abstracts the key/value operations for updating table rows.
type Updater struct {
Helper rowHelper
DeleteHelper *rowHelper
FetchCols []catalog.Column
// FetchColIDtoRowIndex must be kept in sync with FetchCols.
FetchColIDtoRowIndex catalog.TableColMap
UpdateCols []catalog.Column
UpdateColIDtoRowIndex catalog.TableColMap
primaryKeyColChange bool
// rd and ri are used when the update this Updater is created for modifies
// the primary key of the table. In that case, rows must be deleted and
// re-added instead of merely updated, since the keys are changing.
rd Deleter
ri Inserter
// For allocation avoidance.
marshaled []roachpb.Value
newValues []tree.Datum
key roachpb.Key
valueBuf []byte
value roachpb.Value
oldIndexEntries [][]rowenc.IndexEntry
newIndexEntries [][]rowenc.IndexEntry
}
type rowUpdaterType int
const (
// UpdaterDefault indicates that an Updater should update everything
// about a row, including secondary indexes.
UpdaterDefault rowUpdaterType = 0
// UpdaterOnlyColumns indicates that an Updater should only update the
// columns of a row and not the secondary indexes.
UpdaterOnlyColumns rowUpdaterType = 1
)
// MakeUpdater creates a Updater for the given table.
//
// UpdateCols are the columns being updated and correspond to the updateValues
// that will be passed to UpdateRow.
//
// The returned Updater contains a FetchCols field that defines the
// expectation of which values are passed as oldValues to UpdateRow.
// requestedCols must be non-nil and define the schema that determines
// FetchCols.
func MakeUpdater(
ctx context.Context,
txn *kv.Txn,
codec keys.SQLCodec,
tableDesc catalog.TableDescriptor,
updateCols []catalog.Column,
requestedCols []catalog.Column,
updateType rowUpdaterType,
alloc *rowenc.DatumAlloc,
sv *settings.Values,
internal bool,
metrics *Metrics,
) (Updater, error) {
if requestedCols == nil {
return Updater{}, errors.AssertionFailedf("requestedCols is nil in MakeUpdater")
}
updateColIDtoRowIndex := ColIDtoRowIndexFromCols(updateCols)
var primaryIndexCols catalog.TableColSet
for i := 0; i < tableDesc.GetPrimaryIndex().NumKeyColumns(); i++ {
colID := tableDesc.GetPrimaryIndex().GetKeyColumnID(i)
primaryIndexCols.Add(colID)
}
var primaryKeyColChange bool
for _, c := range updateCols {
if primaryIndexCols.Contains(c.GetID()) {
primaryKeyColChange = true
break
}
}
// needsUpdate returns true if the given index may need to be updated for
// the current UPDATE mutation.
needsUpdate := func(index catalog.Index) bool {
// If the UPDATE is set to only update columns and not secondary
// indexes, return false.
if updateType == UpdaterOnlyColumns {
return false
}
// If the primary key changed, we need to update all secondary indexes.
if primaryKeyColChange {
return true
}
// If the index is a partial index, an update may be required even if
// the indexed columns aren't changing. For example, an index entry must
// be added when an update to a non-indexed column causes a row to
// satisfy the partial index predicate when it did not before.
// TODO(mgartner): needsUpdate does not need to return true for every
// partial index. A partial index will never require updating if neither
// its indexed columns nor the columns referenced in its predicate
// expression are changing.
if index.IsPartial() {
return true
}
colIDs := index.CollectKeyColumnIDs()
colIDs.UnionWith(index.CollectSecondaryStoredColumnIDs())
colIDs.UnionWith(index.CollectKeySuffixColumnIDs())
for _, colID := range colIDs.Ordered() {
if _, ok := updateColIDtoRowIndex.Get(colID); ok {
return true
}
}
return false
}
includeIndexes := make([]catalog.Index, 0, len(tableDesc.WritableNonPrimaryIndexes()))
var deleteOnlyIndexes []catalog.Index
for _, index := range tableDesc.DeletableNonPrimaryIndexes() {
if !needsUpdate(index) {
continue
}
if !index.DeleteOnly() {
includeIndexes = append(includeIndexes, index)
} else {
if deleteOnlyIndexes == nil {
// Allocate at most once.
deleteOnlyIndexes = make([]catalog.Index, 0, len(tableDesc.DeleteOnlyNonPrimaryIndexes()))
}
deleteOnlyIndexes = append(deleteOnlyIndexes, index)
}
}
var deleteOnlyHelper *rowHelper
if len(deleteOnlyIndexes) > 0 {
rh := newRowHelper(codec, tableDesc, deleteOnlyIndexes, sv, internal, metrics)
deleteOnlyHelper = &rh
}
ru := Updater{
Helper: newRowHelper(codec, tableDesc, includeIndexes, sv, internal, metrics),
DeleteHelper: deleteOnlyHelper,
FetchCols: requestedCols,
FetchColIDtoRowIndex: ColIDtoRowIndexFromCols(requestedCols),
UpdateCols: updateCols,
UpdateColIDtoRowIndex: updateColIDtoRowIndex,
primaryKeyColChange: primaryKeyColChange,
marshaled: make([]roachpb.Value, len(updateCols)),
oldIndexEntries: make([][]rowenc.IndexEntry, len(includeIndexes)),
newIndexEntries: make([][]rowenc.IndexEntry, len(includeIndexes)),
}
if primaryKeyColChange {
// These fields are only used when the primary key is changing.
var err error
ru.rd = MakeDeleter(codec, tableDesc, requestedCols, sv, internal, metrics)
if ru.ri, err = MakeInserter(
ctx, txn, codec, tableDesc, requestedCols, alloc, sv, internal, metrics,
); err != nil {
return Updater{}, err
}
}
// If we are fetching from specific families, we might get
// less columns than in the table. So we cannot assign this to
// have length len(tableCols).
ru.newValues = make(tree.Datums, len(ru.FetchCols))
return ru, nil
}
// UpdateRow adds to the batch the kv operations necessary to update a table row
// with the given values.
//
// The row corresponding to oldValues is updated with the ones in updateValues.
// Note that updateValues only contains the ones that are changing.
//
// The return value is only good until the next call to UpdateRow.
func (ru *Updater) UpdateRow(
ctx context.Context,
batch *kv.Batch,
oldValues []tree.Datum,
updateValues []tree.Datum,
pm PartialIndexUpdateHelper,
traceKV bool,
) ([]tree.Datum, error) {
if len(oldValues) != len(ru.FetchCols) {
return nil, errors.Errorf("got %d values but expected %d", len(oldValues), len(ru.FetchCols))
}
if len(updateValues) != len(ru.UpdateCols) {
return nil, errors.Errorf("got %d values but expected %d", len(updateValues), len(ru.UpdateCols))
}
primaryIndexKey, err := ru.Helper.encodePrimaryIndex(ru.FetchColIDtoRowIndex, oldValues)
if err != nil {
return nil, err
}
var deleteOldSecondaryIndexEntries []rowenc.IndexEntry
if ru.DeleteHelper != nil {
// We want to include empty k/v pairs because we want
// to delete all k/v's for this row. By setting includeEmpty
// to true, we will get a k/v pair for each family in the row,
// which will guarantee that we delete all the k/v's in this row.
// N.B. that setting includeEmpty to true will sometimes cause
// deletes of keys that aren't present. We choose to make this
// compromise in order to avoid having to read all values of
// the row that is being updated.
_, deleteOldSecondaryIndexEntries, err = ru.DeleteHelper.encodeIndexes(
ru.FetchColIDtoRowIndex, oldValues, pm.IgnoreForDel, true /* includeEmpty */)
if err != nil {
return nil, err
}
}
// Check that the new value types match the column types. This needs to
// happen before index encoding because certain datum types (i.e. tuple)
// cannot be used as index values.
for i, val := range updateValues {
if ru.marshaled[i], err = rowenc.MarshalColumnValue(ru.UpdateCols[i], val); err != nil {
return nil, err
}
}
// Update the row values.
copy(ru.newValues, oldValues)
for i, updateCol := range ru.UpdateCols {
idx, ok := ru.FetchColIDtoRowIndex.Get(updateCol.GetID())
if !ok {
return nil, errors.AssertionFailedf("update column without a corresponding fetch column")
}
ru.newValues[idx] = updateValues[i]
}
rowPrimaryKeyChanged := false
if ru.primaryKeyColChange {
var newPrimaryIndexKey []byte
newPrimaryIndexKey, err =
ru.Helper.encodePrimaryIndex(ru.FetchColIDtoRowIndex, ru.newValues)
if err != nil {
return nil, err
}
rowPrimaryKeyChanged = !bytes.Equal(primaryIndexKey, newPrimaryIndexKey)
}
for i, index := range ru.Helper.Indexes {
// We don't want to insert any empty k/v's, so set includeEmpty to false.
// Consider the following case:
// TABLE t (
// x INT PRIMARY KEY, y INT, z INT, w INT,
// INDEX (y) STORING (z, w),
// FAMILY (x), FAMILY (y), FAMILY (z), FAMILY (w)
//)
// If we are to perform an update on row (1, 2, 3, NULL), the k/v pair
// for index i that encodes column w would have an empty value because w
// is null and the sole resident of that family. We want to ensure that
// we don't insert empty k/v pairs during the process of the update, so
// set includeEmpty to false while generating the old and new index
// entries.
//
// Also, we don't build entries for old and new values if the index
// exists in ignoreIndexesForDel and ignoreIndexesForPut, respectively.
// Index IDs in these sets indicate that old and new values for the row
// do not satisfy a partial index's predicate expression.
if pm.IgnoreForDel.Contains(int(index.GetID())) {
ru.oldIndexEntries[i] = nil
} else {
ru.oldIndexEntries[i], err = rowenc.EncodeSecondaryIndex(
ru.Helper.Codec,
ru.Helper.TableDesc,
index,
ru.FetchColIDtoRowIndex,
oldValues,
false, /* includeEmpty */
)
if err != nil {
return nil, err
}
}
if pm.IgnoreForPut.Contains(int(index.GetID())) {
ru.newIndexEntries[i] = nil
} else {
ru.newIndexEntries[i], err = rowenc.EncodeSecondaryIndex(
ru.Helper.Codec,
ru.Helper.TableDesc,
index,
ru.FetchColIDtoRowIndex,
ru.newValues,
false, /* includeEmpty */
)
if err != nil {
return nil, err
}
}
if ru.Helper.Indexes[i].GetType() == descpb.IndexDescriptor_INVERTED {
// Deduplicate the keys we're adding and removing if we're updating an
// inverted index. For example, imagine a table with an inverted index on j:
//
// a | j
// --+----------------
// 1 | {"foo": "bar"}
//
// If we update the json value to be {"foo": "bar", "baz": "qux"}, we don't
// want to delete the /foo/bar key and re-add it, that would be wasted work.
// So, we are going to remove keys from both the new and old index entry
// array if they're identical.
newIndexEntries := ru.newIndexEntries[i]
oldIndexEntries := ru.oldIndexEntries[i]
sort.Slice(oldIndexEntries, func(i, j int) bool {
return compareIndexEntries(oldIndexEntries[i], oldIndexEntries[j]) < 0
})
sort.Slice(newIndexEntries, func(i, j int) bool {
return compareIndexEntries(newIndexEntries[i], newIndexEntries[j]) < 0
})
oldLen, newLen := unique.UniquifyAcrossSlices(
oldIndexEntries, newIndexEntries,
func(l, r int) int {
return compareIndexEntries(oldIndexEntries[l], newIndexEntries[r])
},
func(i, j int) {
oldIndexEntries[i] = oldIndexEntries[j]
},
func(i, j int) {
newIndexEntries[i] = newIndexEntries[j]
})
ru.oldIndexEntries[i] = oldIndexEntries[:oldLen]
ru.newIndexEntries[i] = newIndexEntries[:newLen]
}
}
if rowPrimaryKeyChanged {
if err := ru.rd.DeleteRow(ctx, batch, oldValues, pm, traceKV); err != nil {
return nil, err
}
if err := ru.ri.InsertRow(
ctx, batch, ru.newValues, pm, false /* ignoreConflicts */, traceKV,
); err != nil {
return nil, err
}
return ru.newValues, nil
}
// Add the new values.
ru.valueBuf, err = prepareInsertOrUpdateBatch(ctx, batch,
&ru.Helper, primaryIndexKey, ru.FetchCols,
ru.newValues, ru.FetchColIDtoRowIndex,
ru.marshaled, ru.UpdateColIDtoRowIndex,
&ru.key, &ru.value, ru.valueBuf, insertPutFn, true /* overwrite */, traceKV)
if err != nil {
return nil, err
}
// Update secondary indexes.
// We're iterating through all of the indexes, which should have corresponding entries
// in the new and old values.
for i, index := range ru.Helper.Indexes {
if index.GetType() == descpb.IndexDescriptor_FORWARD {
oldIdx, newIdx := 0, 0
oldEntries, newEntries := ru.oldIndexEntries[i], ru.newIndexEntries[i]
// The index entries for a particular index are stored in
// family sorted order. We use this fact to update rows.
// The algorithm to update a row using the old k/v pairs
// for the row and the new k/v pairs for the row is very
// similar to the algorithm to merge two sorted lists.
// We move in lock step through the entries, and potentially
// update k/v's that belong to the same family.
// If we are in the case where there exists a family's k/v
// in the old entries but not the new entries, we need to
// delete that k/v. If we are in the case where a family's
// k/v exists in the new index entries, then we need to just
// insert that new k/v.
for oldIdx < len(oldEntries) && newIdx < len(newEntries) {
oldEntry, newEntry := &oldEntries[oldIdx], &newEntries[newIdx]
if oldEntry.Family == newEntry.Family {
// If the families are equal, then check if the keys have changed. If so, delete the old key.
// Then, issue a CPut for the new value of the key if the value has changed.
// Because the indexes will always have a k/v for family 0, it suffices to only
// add foreign key checks in this case, because we are guaranteed to enter here.
oldIdx++
newIdx++
var expValue []byte
if !bytes.Equal(oldEntry.Key, newEntry.Key) {
if traceKV {
log.VEventf(ctx, 2, "Del %s", keys.PrettyPrint(ru.Helper.secIndexValDirs[i], oldEntry.Key))
}
batch.Del(oldEntry.Key)
} else if !newEntry.Value.EqualTagAndData(oldEntry.Value) {
expValue = oldEntry.Value.TagAndDataBytes()
} else {
continue
}
if traceKV {
k := keys.PrettyPrint(ru.Helper.secIndexValDirs[i], newEntry.Key)
v := newEntry.Value.PrettyPrint()
if expValue != nil {
log.VEventf(ctx, 2, "CPut %s -> %v (replacing %v, if exists)", k, v, expValue)
} else {
log.VEventf(ctx, 2, "CPut %s -> %v (expecting does not exist)", k, v)
}
}
batch.CPutAllowingIfNotExists(newEntry.Key, &newEntry.Value, expValue)
} else if oldEntry.Family < newEntry.Family {
if oldEntry.Family == descpb.FamilyID(0) {
return nil, errors.AssertionFailedf(
"index entry for family 0 for table %s, index %s was not generated",
ru.Helper.TableDesc.GetName(), index.GetName(),
)
}
// In this case, the index has a k/v for a family that does not exist in
// the new set of k/v's for the row. So, we need to delete the old k/v.
if traceKV {
log.VEventf(ctx, 2, "Del %s", keys.PrettyPrint(ru.Helper.secIndexValDirs[i], oldEntry.Key))
}
batch.Del(oldEntry.Key)
oldIdx++
} else {
if newEntry.Family == descpb.FamilyID(0) {
return nil, errors.AssertionFailedf(
"index entry for family 0 for table %s, index %s was not generated",
ru.Helper.TableDesc.GetName(), index.GetName(),
)
}
// In this case, the index now has a k/v that did not exist in the
// old row, so we should expect to not see a value for the new
// key, and put the new key in place.
if traceKV {
k := keys.PrettyPrint(ru.Helper.secIndexValDirs[i], newEntry.Key)
v := newEntry.Value.PrettyPrint()
log.VEventf(ctx, 2, "CPut %s -> %v (expecting does not exist)", k, v)
}
batch.CPut(newEntry.Key, &newEntry.Value, nil)
newIdx++
}
}
for oldIdx < len(oldEntries) {
// Delete any remaining old entries that are not matched by new
// entries in this row because 1) the family does not exist in
// the new set of k/v's or 2) the index is a partial index and
// the new row values do not match the partial index predicate.
oldEntry := &oldEntries[oldIdx]
if traceKV {
log.VEventf(ctx, 2, "Del %s", keys.PrettyPrint(ru.Helper.secIndexValDirs[i], oldEntry.Key))
}
batch.Del(oldEntry.Key)
oldIdx++
}
for newIdx < len(newEntries) {
// Insert any remaining new entries that are not present in the
// old row. Insert any remaining new entries that are not
// present in the old row because 1) the family does not exist
// in the old set of k/v's or 2) the index is a partial index
// and the old row values do not match the partial index
// predicate.
newEntry := &newEntries[newIdx]
if traceKV {
k := keys.PrettyPrint(ru.Helper.secIndexValDirs[i], newEntry.Key)
v := newEntry.Value.PrettyPrint()
log.VEventf(ctx, 2, "CPut %s -> %v (expecting does not exist)", k, v)
}
batch.CPut(newEntry.Key, &newEntry.Value, nil)
newIdx++
}
} else {
// Remove all inverted index entries, and re-add them.
for j := range ru.oldIndexEntries[i] {
if traceKV {
log.VEventf(ctx, 2, "Del %s", ru.oldIndexEntries[i][j].Key)
}
batch.Del(ru.oldIndexEntries[i][j].Key)
}
putFn := insertInvertedPutFn
// We're adding all of the inverted index entries from the row being updated.
for j := range ru.newIndexEntries[i] {
putFn(ctx, batch, &ru.newIndexEntries[i][j].Key, &ru.newIndexEntries[i][j].Value, traceKV)
}
}
}
// We're deleting indexes in a delete only state. We're bounding this by the number of indexes because inverted
// indexed will be handled separately.
if ru.DeleteHelper != nil {
for _, deletedSecondaryIndexEntry := range deleteOldSecondaryIndexEntries {
if traceKV {
log.VEventf(ctx, 2, "Del %s", deletedSecondaryIndexEntry.Key)
}
batch.Del(deletedSecondaryIndexEntry.Key)
}
}
return ru.newValues, nil
}
func compareIndexEntries(left, right rowenc.IndexEntry) int {
cmp := bytes.Compare(left.Key, right.Key)
if cmp != 0 {
return cmp
}
return bytes.Compare(left.Value.RawBytes, right.Value.RawBytes)
}
// IsColumnOnlyUpdate returns true if this Updater is only updating column
// data (in contrast to updating the primary key or other indexes).
func (ru *Updater) IsColumnOnlyUpdate() bool {
// TODO(dan): This is used in the schema change backfill to assert that it was
// configured correctly and will not be doing things it shouldn't. This is an
// unfortunate bleeding of responsibility and indicates the abstraction could
// be improved. Specifically, Updater currently has two responsibilities
// (computing which indexes need to be updated and mapping sql rows to k/v
// operations) and these should be split.
return !ru.primaryKeyColChange && ru.DeleteHelper == nil && len(ru.Helper.Indexes) == 0
}
| pkg/sql/row/updater.go | 0 | https://github.com/cockroachdb/cockroach/commit/6a1b9c891f20b32d985a451118f690de01dc6e37 | [
0.00018435569654684514,
0.00017059115634765476,
0.00015628786059096456,
0.00017124514852184802,
0.000005632324246107601
] |
{
"id": 2,
"code_window": [
"\t\t\t\t\t\t\t// wrap it with a noop operator. It is ok for the\n",
"\t\t\t\t\t\t\t// purposes of this benchmark.\n",
"\t\t\t\t\t\t\treturn colexecop.NewNoop(op), err\n",
"\t\t\t\t\t\t},\n",
"\t\t\t\t\t\tname: fmt.Sprintf(\"spilled=%t\", spillForced),\n",
"\t\t\t\t\t},\n",
"\t\t\t\t\taggFn, []*types.T{types.Int}, 1 /* numGroupCol */, groupSize,\n",
"\t\t\t\t\t0 /* distinctProb */, numInputRows, 0 /* chunkSize */, 0 /* limit */)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t\tname: fmt.Sprintf(\"spilled=%t\", spillForced),\n",
"\t\t\t\t\t\torder: unordered,\n"
],
"file_path": "pkg/sql/colexec/external_hash_aggregator_test.go",
"type": "replace",
"edit_start_line_idx": 220
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package clisqlclient
import (
"database/sql/driver"
"io"
"os"
"time"
"github.com/cockroachdb/errors"
)
// StmtDiagBundleInfo contains information about a statement diagnostics bundle
// that was collected.
type StmtDiagBundleInfo struct {
ID int64
// Statement is the SQL statement fingerprint.
Statement string
CollectedAt time.Time
}
// StmtDiagListBundles retrieves information about all available statement
// diagnostics bundles.
func StmtDiagListBundles(conn Conn) ([]StmtDiagBundleInfo, error) {
result, err := stmtDiagListBundlesInternal(conn)
if err != nil {
return nil, errors.Wrap(
err, "failed to retrieve statement diagnostics bundles",
)
}
return result, nil
}
func stmtDiagListBundlesInternal(conn Conn) ([]StmtDiagBundleInfo, error) {
rows, err := conn.Query(
`SELECT id, statement_fingerprint, collected_at
FROM system.statement_diagnostics
WHERE error IS NULL
ORDER BY collected_at DESC`,
nil, /* args */
)
if err != nil {
return nil, err
}
var result []StmtDiagBundleInfo
vals := make([]driver.Value, 3)
for {
if err := rows.Next(vals); err == io.EOF {
break
} else if err != nil {
return nil, err
}
info := StmtDiagBundleInfo{
ID: vals[0].(int64),
Statement: vals[1].(string),
CollectedAt: vals[2].(time.Time),
}
result = append(result, info)
}
if err := rows.Close(); err != nil {
return nil, err
}
return result, nil
}
// StmtDiagActivationRequest contains information about a statement diagnostics
// activation request.
type StmtDiagActivationRequest struct {
ID int64
// Statement is the SQL statement fingerprint.
Statement string
RequestedAt time.Time
}
// StmtDiagListOutstandingRequests retrieves outstanding statement diagnostics
// activation requests.
func StmtDiagListOutstandingRequests(conn Conn) ([]StmtDiagActivationRequest, error) {
result, err := stmtDiagListOutstandingRequestsInternal(conn)
if err != nil {
return nil, errors.Wrap(
err, "failed to retrieve outstanding statement diagnostics activation requests",
)
}
return result, nil
}
func stmtDiagListOutstandingRequestsInternal(conn Conn) ([]StmtDiagActivationRequest, error) {
rows, err := conn.Query(
`SELECT id, statement_fingerprint, requested_at
FROM system.statement_diagnostics_requests
WHERE NOT completed
ORDER BY requested_at DESC`,
nil, /* args */
)
if err != nil {
return nil, err
}
var result []StmtDiagActivationRequest
vals := make([]driver.Value, 3)
for {
if err := rows.Next(vals); err == io.EOF {
break
} else if err != nil {
return nil, err
}
info := StmtDiagActivationRequest{
ID: vals[0].(int64),
Statement: vals[1].(string),
RequestedAt: vals[2].(time.Time),
}
result = append(result, info)
}
if err := rows.Close(); err != nil {
return nil, err
}
return result, nil
}
// StmtDiagDownloadBundle downloads the bundle with the given ID to a file.
func StmtDiagDownloadBundle(conn Conn, id int64, filename string) error {
if err := stmtDiagDownloadBundleInternal(conn, id, filename); err != nil {
return errors.Wrapf(
err, "failed to download statement diagnostics bundle %d to '%s'", id, filename,
)
}
return nil
}
func stmtDiagDownloadBundleInternal(conn Conn, id int64, filename string) error {
// Retrieve the chunk IDs; these are stored in an INT ARRAY column.
rows, err := conn.Query(
"SELECT unnest(bundle_chunks) FROM system.statement_diagnostics WHERE id = $1",
[]driver.Value{id},
)
if err != nil {
return err
}
var chunkIDs []int64
vals := make([]driver.Value, 1)
for {
if err := rows.Next(vals); err == io.EOF {
break
} else if err != nil {
return err
}
chunkIDs = append(chunkIDs, vals[0].(int64))
}
if err := rows.Close(); err != nil {
return err
}
if len(chunkIDs) == 0 {
return errors.Newf("no statement diagnostics bundle with ID %d", id)
}
// Create the file and write out the chunks.
out, err := os.Create(filename)
if err != nil {
return err
}
for _, chunkID := range chunkIDs {
data, err := conn.QueryRow(
"SELECT data FROM system.statement_bundle_chunks WHERE id = $1",
[]driver.Value{chunkID},
)
if err != nil {
_ = out.Close()
return err
}
if _, err := out.Write(data[0].([]byte)); err != nil {
_ = out.Close()
return err
}
}
return out.Close()
}
// StmtDiagDeleteBundle deletes a statement diagnostics bundle.
func StmtDiagDeleteBundle(conn Conn, id int64) error {
_, err := conn.QueryRow(
"SELECT 1 FROM system.statement_diagnostics WHERE id = $1",
[]driver.Value{id},
)
if err != nil {
if err == io.EOF {
return errors.Newf("no statement diagnostics bundle with ID %d", id)
}
return err
}
return conn.ExecTxn(func(conn TxBoundConn) error {
// Delete the request metadata.
if err := conn.Exec(
"DELETE FROM system.statement_diagnostics_requests WHERE statement_diagnostics_id = $1",
[]driver.Value{id},
); err != nil {
return err
}
// Delete the bundle chunks.
if err := conn.Exec(
`DELETE FROM system.statement_bundle_chunks
WHERE id IN (
SELECT unnest(bundle_chunks) FROM system.statement_diagnostics WHERE id = $1
)`,
[]driver.Value{id},
); err != nil {
return err
}
// Finally, delete the diagnostics entry.
return conn.Exec(
"DELETE FROM system.statement_diagnostics WHERE id = $1",
[]driver.Value{id},
)
})
}
// StmtDiagDeleteAllBundles deletes all statement diagnostics bundles.
func StmtDiagDeleteAllBundles(conn Conn) error {
return conn.ExecTxn(func(conn TxBoundConn) error {
// Delete the request metadata.
if err := conn.Exec(
"DELETE FROM system.statement_diagnostics_requests WHERE completed",
nil,
); err != nil {
return err
}
// Delete all bundle chunks.
if err := conn.Exec(
`DELETE FROM system.statement_bundle_chunks WHERE true`,
nil,
); err != nil {
return err
}
// Finally, delete the diagnostics entry.
return conn.Exec(
"DELETE FROM system.statement_diagnostics WHERE true",
nil,
)
})
}
// StmtDiagCancelOutstandingRequest deletes an outstanding statement diagnostics
// activation request.
func StmtDiagCancelOutstandingRequest(conn Conn, id int64) error {
_, err := conn.QueryRow(
"DELETE FROM system.statement_diagnostics_requests WHERE id = $1 RETURNING id",
[]driver.Value{id},
)
if err != nil {
if err == io.EOF {
return errors.Newf("no outstanding activation request with ID %d", id)
}
return err
}
return nil
}
// StmtDiagCancelAllOutstandingRequests deletes all outstanding statement
// diagnostics activation requests.
func StmtDiagCancelAllOutstandingRequests(conn Conn) error {
return conn.Exec(
"DELETE FROM system.statement_diagnostics_requests WHERE NOT completed",
nil,
)
}
| pkg/cli/clisqlclient/statement_diag.go | 0 | https://github.com/cockroachdb/cockroach/commit/6a1b9c891f20b32d985a451118f690de01dc6e37 | [
0.007679580245167017,
0.0009548780508339405,
0.00016244183643721044,
0.00017241996829397976,
0.0016767523484304547
] |
{
"id": 2,
"code_window": [
"\t\t\t\t\t\t\t// wrap it with a noop operator. It is ok for the\n",
"\t\t\t\t\t\t\t// purposes of this benchmark.\n",
"\t\t\t\t\t\t\treturn colexecop.NewNoop(op), err\n",
"\t\t\t\t\t\t},\n",
"\t\t\t\t\t\tname: fmt.Sprintf(\"spilled=%t\", spillForced),\n",
"\t\t\t\t\t},\n",
"\t\t\t\t\taggFn, []*types.T{types.Int}, 1 /* numGroupCol */, groupSize,\n",
"\t\t\t\t\t0 /* distinctProb */, numInputRows, 0 /* chunkSize */, 0 /* limit */)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\t\tname: fmt.Sprintf(\"spilled=%t\", spillForced),\n",
"\t\t\t\t\t\torder: unordered,\n"
],
"file_path": "pkg/sql/colexec/external_hash_aggregator_test.go",
"type": "replace",
"edit_start_line_idx": 220
} | ## CockroachDB Projects
Suitable for | Project | Resources
-------------|---------|------------
For new developers | [Create a to-do app using CockroachDB and a language/ORM of your choice](https://github.com/cockroachdb/cockroachdb-todo-apps) | [How to contribute to the to-do apps repository](https://github.com/cockroachdb/cockroachdb-todo-apps#how-to-contribute-to-this-repository)
For Go developers | Work on CockroachDB code: [List of good first issues](https://github.com/cockroachdb/cockroach/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) | [Your first CockroachDB PR](https://wiki.crdb.io/wiki/spaces/CRDB/pages/181633464/Your+first+CockroachDB+PR)
For Kubernetes enthusiasts | Work on the Kubernetes Operator: [List of good first issues](https://github.com/cockroachdb/cockroach-operator/labels/good%20first%20issue) | [Your first CockroachDB PR](https://wiki.crdb.io/wiki/spaces/CRDB/pages/181633464/Your+first+CockroachDB+PR)
For tech writers and docs enthusiasts | Help improve CockroachDB docs: [List of good first issues](https://github.com/cockroachdb/docs/issues?q=is%3Aopen+is%3Aissue+label%3Agood-first-issue) | [Docs contribution guide](https://github.com/cockroachdb/docs/wiki#using-github-desktop)
## Contributor Guidelines
Our contributor guidelines are available on [the public wiki at **wiki.crdb.io**](https://wiki.crdb.io/wiki/spaces/CRDB/pages/73204033/Contributing+to+CockroachDB).
At this location, we share our team guidelines and knowledge base
regarding:
- repository layout
- how to build from source
- how to organize your code change
- commenting guidelines
- commit message guidelines
- code style guidelines
- how to write and run tests
- how to write release notes
- how to submit a change for review
- how to use continuous integration (CI)
- how to troubleshoot certain issues
as well as many other practical topics.
## Don’t Forget to Join our Community
Join our [Community Slack](https://go.crdb.dev/p/slack) (there's a dedicated #contributors channel!) to ask questions, discuss your ideas, or connect with other contributors.
Please follow the guidelines outlined in our [Code of Conduct](https://docs.google.com/document/d/1_BB3IrsAVglDNPy37Z6KQlii_c3fYETFlWMMBUpbY1M/edit#) to help us make the CockroachDB community a welcoming and helpful place for everyone.
| CONTRIBUTING.md | 0 | https://github.com/cockroachdb/cockroach/commit/6a1b9c891f20b32d985a451118f690de01dc6e37 | [
0.00017510561156086624,
0.00017108276369981468,
0.00016253588546533138,
0.0001733448007144034,
0.0000049999653128907084
] |
{
"id": 0,
"code_window": [
"// AttachedVolume represents a volume that is attached to a node.\n",
"type AttachedVolume struct {\n",
"\toperationexecutor.AttachedVolume\n",
"\n",
"\t// MountedByNode indicates that this volume has been been mounted by the\n",
"\t// node and is unsafe to detach.\n",
"\t// The value is set and unset by SetVolumeMountedByNode(...).\n",
"\tMountedByNode bool\n",
"\n",
"\t// DetachRequestedTime is used to capture the desire to detach this volume.\n",
"\t// When the volume is newly created this value is set to time zero.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// MountedByNode indicates that this volume has been mounted by the node and\n",
"\t// is unsafe to detach.\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/actual_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 133
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the attach/detach controller
to keep track of volumes, the nodes they are attached to, and the pods that
reference them.
*/
package cache
import (
"fmt"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// ActualStateOfWorld defines a set of thread-safe operations supported on
// the attach/detach controller's actual state of the world cache.
// This cache contains volumes->nodes i.e. a set of all volumes and the nodes
// the attach/detach controller believes are successfully attached.
// Note: This is distinct from the ActualStateOfWorld implemented by the kubelet
// volume manager. They both keep track of different objects. This contains
// attach/detach controller specific state.
type ActualStateOfWorld interface {
// ActualStateOfWorld must implement the methods required to allow
// operationexecutor to interact with it.
operationexecutor.ActualStateOfWorldAttacherUpdater
// AddVolumeNode adds the given volume and node to the underlying store
// indicating the specified volume is attached to the specified node.
// A unique volume name is generated from the volumeSpec and returned on
// success.
// If volumeSpec is not an attachable volume plugin, an error is returned.
// If no volume with the name volumeName exists in the store, the volume is
// added.
// If no node with the name nodeName exists in list of attached nodes for
// the specified volume, the node is added.
AddVolumeNode(uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (v1.UniqueVolumeName, error)
// SetVolumeMountedByNode sets the MountedByNode value for the given volume
// and node. When set to true the mounted parameter indicates the volume
// is mounted by the given node, indicating it may not be safe to detach.
// If the forceUnmount is set to true the MountedByNode value would be reset
// to false even it was not set yet (this is required during a controller
// crash recovery).
// If no volume with the name volumeName exists in the store, an error is
// returned.
// If no node with the name nodeName exists in list of attached nodes for
// the specified volume, an error is returned.
SetVolumeMountedByNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error
// SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified
// node to true indicating the AttachedVolume field in the Node's Status
// object needs to be updated by the node updater again.
// If the specified node does not exist in the nodesToUpdateStatusFor list,
// log the error and return
SetNodeStatusUpdateNeeded(nodeName types.NodeName)
// ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach
// request any more for the volume
ResetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// SetDetachRequestTime sets the detachRequestedTime to current time if this is no
// previous request (the previous detachRequestedTime is zero) and return the time elapsed
// since last request
SetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error)
// DeleteVolumeNode removes the given volume and node from the underlying
// store indicating the specified volume is no longer attached to the
// specified node.
// If the volume/node combo does not exist, this is a no-op.
// If after deleting the node, the specified volume contains no other child
// nodes, the volume is also deleted.
DeleteVolumeNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// VolumeNodeExists returns true if the specified volume/node combo exists
// in the underlying store indicating the specified volume is attached to
// the specified node.
VolumeNodeExists(volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool
// GetAttachedVolumes generates and returns a list of volumes/node pairs
// reflecting which volumes are attached to which nodes based on the
// current actual state of the world.
GetAttachedVolumes() []AttachedVolume
// GetAttachedVolumes generates and returns a list of volumes attached to
// the specified node reflecting which volumes are attached to that node
// based on the current actual state of the world.
GetAttachedVolumesForNode(nodeName types.NodeName) []AttachedVolume
GetAttachedVolumesPerNode() map[types.NodeName][]operationexecutor.AttachedVolume
// GetNodesForVolume returns the nodes on which the volume is attached
GetNodesForVolume(volumeName v1.UniqueVolumeName) []types.NodeName
// GetVolumesToReportAttached returns a map containing the set of nodes for
// which the VolumesAttached Status field in the Node API object should be
// updated. The key in this map is the name of the node to update and the
// value is list of volumes that should be reported as attached (note that
// this may differ from the actual list of attached volumes for the node
// since volumes should be removed from this list as soon a detach operation
// is considered, before the detach operation is triggered).
GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume
// GetNodesToUpdateStatusFor returns the map of nodeNames to nodeToUpdateStatusFor
GetNodesToUpdateStatusFor() map[types.NodeName]nodeToUpdateStatusFor
}
// AttachedVolume represents a volume that is attached to a node.
type AttachedVolume struct {
operationexecutor.AttachedVolume
// MountedByNode indicates that this volume has been been mounted by the
// node and is unsafe to detach.
// The value is set and unset by SetVolumeMountedByNode(...).
MountedByNode bool
// DetachRequestedTime is used to capture the desire to detach this volume.
// When the volume is newly created this value is set to time zero.
// It is set to current time, when SetDetachRequestTime(...) is called, if it
// was previously set to zero (other wise its value remains the same).
// It is reset to zero on ResetDetachRequestTime(...) calls.
DetachRequestedTime time.Time
}
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
return &actualStateOfWorld{
attachedVolumes: make(map[v1.UniqueVolumeName]attachedVolume),
nodesToUpdateStatusFor: make(map[types.NodeName]nodeToUpdateStatusFor),
volumePluginMgr: volumePluginMgr,
}
}
type actualStateOfWorld struct {
// attachedVolumes is a map containing the set of volumes the attach/detach
// controller believes to be successfully attached to the nodes it is
// managing. The key in this map is the name of the volume and the value is
// an object containing more information about the attached volume.
attachedVolumes map[v1.UniqueVolumeName]attachedVolume
// nodesToUpdateStatusFor is a map containing the set of nodes for which to
// update the VolumesAttached Status field. The key in this map is the name
// of the node and the value is an object containing more information about
// the node (including the list of volumes to report attached).
nodesToUpdateStatusFor map[types.NodeName]nodeToUpdateStatusFor
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
sync.RWMutex
}
// The volume object represents a volume the attach/detach controller
// believes to be successfully attached to a node it is managing.
type attachedVolume struct {
// volumeName contains the unique identifier for this volume.
volumeName v1.UniqueVolumeName
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to attach/detach
// methods.
spec *volume.Spec
// nodesAttachedTo is a map containing the set of nodes this volume has
// successfully been attached to. The key in this map is the name of the
// node and the value is a node object containing more information about
// the node.
nodesAttachedTo map[types.NodeName]nodeAttachedTo
// devicePath contains the path on the node where the volume is attached
devicePath string
}
// The nodeAttachedTo object represents a node that has volumes attached to it.
type nodeAttachedTo struct {
// nodeName contains the name of this node.
nodeName types.NodeName
// mountedByNode indicates that this node/volume combo is mounted by the
// node and is unsafe to detach
mountedByNode bool
// number of times SetVolumeMountedByNode has been called to set the value
// of mountedByNode to true. This is used to prevent mountedByNode from
// being reset during the period between attach and mount when volumesInUse
// status for the node may not be set.
mountedByNodeSetCount uint
// detachRequestedTime used to capture the desire to detach this volume
detachRequestedTime time.Time
}
// nodeToUpdateStatusFor is an object that reflects a node that has one or more
// volume attached. It keeps track of the volumes that should be reported as
// attached in the Node's Status API object.
type nodeToUpdateStatusFor struct {
// nodeName contains the name of this node.
nodeName types.NodeName
// statusUpdateNeeded indicates that the value of the VolumesAttached field
// in the Node's Status API object should be updated. This should be set to
// true whenever a volume is added or deleted from
// volumesToReportAsAttached. It should be reset whenever the status is
// updated.
statusUpdateNeeded bool
// volumesToReportAsAttached is the list of volumes that should be reported
// as attached in the Node's status (note that this may differ from the
// actual list of attached volumes since volumes should be removed from this
// list as soon a detach operation is considered, before the detach
// operation is triggered).
volumesToReportAsAttached map[v1.UniqueVolumeName]v1.UniqueVolumeName
}
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error {
_, err := asw.AddVolumeNode(uniqueName, volumeSpec, nodeName, devicePath)
return err
}
func (asw *actualStateOfWorld) MarkVolumeAsDetached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.DeleteVolumeNode(volumeName, nodeName)
}
func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) error {
asw.Lock()
defer asw.Unlock()
return asw.removeVolumeFromReportAsAttached(volumeName, nodeName)
}
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock()
defer asw.Unlock()
asw.addVolumeToReportAsAttached(volumeName, nodeName)
}
func (asw *actualStateOfWorld) AddVolumeNode(
uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (v1.UniqueVolumeName, error) {
asw.Lock()
defer asw.Unlock()
var volumeName v1.UniqueVolumeName
if volumeSpec != nil {
attachableVolumePlugin, err := asw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || attachableVolumePlugin == nil {
return "", fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
volumeName, err = volumehelper.GetUniqueVolumeNameFromSpec(
attachableVolumePlugin, volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
} else {
// volumeSpec is nil
// This happens only on controller startup when reading the volumes from node
// status; if the pods using the volume have been removed and are unreachable
// the volumes should be detached immediately and the spec is not needed
volumeName = uniqueName
}
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
volumeObj = attachedVolume{
volumeName: volumeName,
spec: volumeSpec,
nodesAttachedTo: make(map[types.NodeName]nodeAttachedTo),
devicePath: devicePath,
}
} else {
// If volume object already exists, it indicates that the information would be out of date.
// Update the fields for volume object except the nodes attached to the volumes.
volumeObj.devicePath = devicePath
volumeObj.spec = volumeSpec
glog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q",
volumeName,
nodeName,
devicePath)
}
asw.attachedVolumes[volumeName] = volumeObj
_, nodeExists := volumeObj.nodesAttachedTo[nodeName]
if !nodeExists {
// Create object if it doesn't exist.
volumeObj.nodesAttachedTo[nodeName] = nodeAttachedTo{
nodeName: nodeName,
mountedByNode: true, // Assume mounted, until proven otherwise
mountedByNodeSetCount: 0,
detachRequestedTime: time.Time{},
}
} else {
glog.V(5).Infof("Volume %q is already added to attachedVolume list to the node %q",
volumeName,
nodeName)
}
asw.addVolumeToReportAsAttached(volumeName, nodeName)
return volumeName, nil
}
func (asw *actualStateOfWorld) SetVolumeMountedByNode(
volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error {
asw.Lock()
defer asw.Unlock()
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
if err != nil {
return fmt.Errorf("Failed to SetVolumeMountedByNode with error: %v", err)
}
if mounted {
// Increment set count
nodeObj.mountedByNodeSetCount = nodeObj.mountedByNodeSetCount + 1
}
nodeObj.mountedByNode = mounted
volumeObj.nodesAttachedTo[nodeName] = nodeObj
glog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %t",
volumeName,
nodeName,
mounted)
return nil
}
func (asw *actualStateOfWorld) ResetDetachRequestTime(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock()
defer asw.Unlock()
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
if err != nil {
glog.Errorf("Failed to ResetDetachRequestTime with error: %v", err)
return
}
nodeObj.detachRequestedTime = time.Time{}
volumeObj.nodesAttachedTo[nodeName] = nodeObj
}
func (asw *actualStateOfWorld) SetDetachRequestTime(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) {
asw.Lock()
defer asw.Unlock()
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
if err != nil {
return 0, fmt.Errorf("Failed to set detach request time with error: %v", err)
}
// If there is no previous detach request, set it to the current time
if nodeObj.detachRequestedTime.IsZero() {
nodeObj.detachRequestedTime = time.Now()
volumeObj.nodesAttachedTo[nodeName] = nodeObj
glog.V(4).Infof("Set detach request time to current time for volume %v on node %q",
volumeName,
nodeName)
}
return time.Since(nodeObj.detachRequestedTime), nil
}
// Get the volume and node object from actual state of world
// This is an internal function and caller should acquire and release the lock
//
// Note that this returns disconnected objects, so if you change the volume object you must set it back with
// `asw.attachedVolumes[volumeName]=volumeObj`.
//
// If you change the node object you must use `volumeObj.nodesAttachedTo[nodeName] = nodeObj`
// This is correct, because if volumeObj is empty this function returns an error, and nodesAttachedTo
// map is a reference type, and thus mutating the copy changes the original map.
func (asw *actualStateOfWorld) getNodeAndVolume(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) (attachedVolume, nodeAttachedTo, error) {
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if volumeExists {
nodeObj, nodeExists := volumeObj.nodesAttachedTo[nodeName]
if nodeExists {
return volumeObj, nodeObj, nil
}
}
return attachedVolume{}, nodeAttachedTo{}, fmt.Errorf("volume %v is no longer attached to the node %q",
volumeName,
nodeName)
}
// Remove the volumeName from the node's volumesToReportAsAttached list
// This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) error {
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
if nodeToUpdateExists {
_, nodeToUpdateVolumeExists :=
nodeToUpdate.volumesToReportAsAttached[volumeName]
if nodeToUpdateVolumeExists {
nodeToUpdate.statusUpdateNeeded = true
delete(nodeToUpdate.volumesToReportAsAttached, volumeName)
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
return nil
}
}
return fmt.Errorf("volume %q does not exist in volumesToReportAsAttached list or node %q does not exist in nodesToUpdateStatusFor list",
volumeName,
nodeName)
}
// Add the volumeName to the node's volumesToReportAsAttached list
// This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
// In case the volume/node entry is no longer in attachedVolume list, skip the rest
if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil {
glog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName)
return
}
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
if !nodeToUpdateExists {
// Create object if it doesn't exist
nodeToUpdate = nodeToUpdateStatusFor{
nodeName: nodeName,
statusUpdateNeeded: true,
volumesToReportAsAttached: make(map[v1.UniqueVolumeName]v1.UniqueVolumeName),
}
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
glog.V(4).Infof("Add new node %q to nodesToUpdateStatusFor", nodeName)
}
_, nodeToUpdateVolumeExists :=
nodeToUpdate.volumesToReportAsAttached[volumeName]
if !nodeToUpdateVolumeExists {
nodeToUpdate.statusUpdateNeeded = true
nodeToUpdate.volumesToReportAsAttached[volumeName] = volumeName
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
glog.V(4).Infof("Report volume %q as attached to node %q", volumeName, nodeName)
}
}
// Update the flag statusUpdateNeeded to indicate whether node status is already updated or
// needs to be updated again by the node status updater.
// If the specified node does not exist in the nodesToUpdateStatusFor list, log the error and return
// This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeName, needed bool) error {
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
if !nodeToUpdateExists {
// should not happen
errMsg := fmt.Sprintf("Failed to set statusUpdateNeeded to needed %t because nodeName=%q does not exist",
needed, nodeName)
return fmt.Errorf(errMsg)
}
nodeToUpdate.statusUpdateNeeded = needed
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
return nil
}
func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName types.NodeName) {
asw.Lock()
defer asw.Unlock()
if err := asw.updateNodeStatusUpdateNeeded(nodeName, true); err != nil {
glog.Warningf("Failed to update statusUpdateNeeded field in actual state of world: %v", err)
}
}
func (asw *actualStateOfWorld) DeleteVolumeNode(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return
}
_, nodeExists := volumeObj.nodesAttachedTo[nodeName]
if nodeExists {
delete(asw.attachedVolumes[volumeName].nodesAttachedTo, nodeName)
}
if len(volumeObj.nodesAttachedTo) == 0 {
delete(asw.attachedVolumes, volumeName)
}
// Remove volume from volumes to report as attached
asw.removeVolumeFromReportAsAttached(volumeName, nodeName)
}
func (asw *actualStateOfWorld) VolumeNodeExists(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if volumeExists {
if _, nodeExists := volumeObj.nodesAttachedTo[nodeName]; nodeExists {
return true
}
}
return false
}
func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {
asw.RLock()
defer asw.RUnlock()
attachedVolumes := make([]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
for _, nodeObj := range volumeObj.nodesAttachedTo {
attachedVolumes = append(
attachedVolumes,
getAttachedVolume(&volumeObj, &nodeObj))
}
}
return attachedVolumes
}
func (asw *actualStateOfWorld) GetAttachedVolumesForNode(
nodeName types.NodeName) []AttachedVolume {
asw.RLock()
defer asw.RUnlock()
attachedVolumes := make(
[]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
for actualNodeName, nodeObj := range volumeObj.nodesAttachedTo {
if actualNodeName == nodeName {
attachedVolumes = append(
attachedVolumes,
getAttachedVolume(&volumeObj, &nodeObj))
}
}
}
return attachedVolumes
}
func (asw *actualStateOfWorld) GetAttachedVolumesPerNode() map[types.NodeName][]operationexecutor.AttachedVolume {
asw.RLock()
defer asw.RUnlock()
attachedVolumesPerNode := make(map[types.NodeName][]operationexecutor.AttachedVolume)
for _, volumeObj := range asw.attachedVolumes {
for nodeName, nodeObj := range volumeObj.nodesAttachedTo {
volumes := attachedVolumesPerNode[nodeName]
volumes = append(volumes, getAttachedVolume(&volumeObj, &nodeObj).AttachedVolume)
attachedVolumesPerNode[nodeName] = volumes
}
}
return attachedVolumesPerNode
}
func (asw *actualStateOfWorld) GetNodesForVolume(volumeName v1.UniqueVolumeName) []types.NodeName {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists || len(volumeObj.nodesAttachedTo) == 0 {
return []types.NodeName{}
}
nodes := []types.NodeName{}
for k := range volumeObj.nodesAttachedTo {
nodes = append(nodes, k)
}
return nodes
}
func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume {
asw.RLock()
defer asw.RUnlock()
volumesToReportAttached := make(map[types.NodeName][]v1.AttachedVolume)
for nodeName, nodeToUpdateObj := range asw.nodesToUpdateStatusFor {
if nodeToUpdateObj.statusUpdateNeeded {
attachedVolumes := make(
[]v1.AttachedVolume,
len(nodeToUpdateObj.volumesToReportAsAttached) /* len */)
i := 0
for _, volume := range nodeToUpdateObj.volumesToReportAsAttached {
attachedVolumes[i] = v1.AttachedVolume{
Name: volume,
DevicePath: asw.attachedVolumes[volume].devicePath,
}
i++
}
volumesToReportAttached[nodeToUpdateObj.nodeName] = attachedVolumes
}
// When GetVolumesToReportAttached is called by node status updater, the current status
// of this node will be updated, so set the flag statusUpdateNeeded to false indicating
// the current status is already updated.
if err := asw.updateNodeStatusUpdateNeeded(nodeName, false); err != nil {
glog.Errorf("Failed to update statusUpdateNeeded field when getting volumes: %v", err)
}
}
return volumesToReportAttached
}
func (asw *actualStateOfWorld) GetNodesToUpdateStatusFor() map[types.NodeName]nodeToUpdateStatusFor {
return asw.nodesToUpdateStatusFor
}
func getAttachedVolume(
attachedVolume *attachedVolume,
nodeAttachedTo *nodeAttachedTo) AttachedVolume {
return AttachedVolume{
AttachedVolume: operationexecutor.AttachedVolume{
VolumeName: attachedVolume.volumeName,
VolumeSpec: attachedVolume.spec,
NodeName: nodeAttachedTo.nodeName,
DevicePath: attachedVolume.devicePath,
PluginIsAttachable: true,
},
MountedByNode: nodeAttachedTo.mountedByNode,
DetachRequestedTime: nodeAttachedTo.detachRequestedTime}
}
| pkg/controller/volume/attachdetach/cache/actual_state_of_world.go | 1 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.9960959553718567,
0.35041436553001404,
0.0001658927067182958,
0.05257829651236534,
0.41833576560020447
] |
{
"id": 0,
"code_window": [
"// AttachedVolume represents a volume that is attached to a node.\n",
"type AttachedVolume struct {\n",
"\toperationexecutor.AttachedVolume\n",
"\n",
"\t// MountedByNode indicates that this volume has been been mounted by the\n",
"\t// node and is unsafe to detach.\n",
"\t// The value is set and unset by SetVolumeMountedByNode(...).\n",
"\tMountedByNode bool\n",
"\n",
"\t// DetachRequestedTime is used to capture the desire to detach this volume.\n",
"\t// When the volume is newly created this value is set to time zero.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// MountedByNode indicates that this volume has been mounted by the node and\n",
"\t// is unsafe to detach.\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/actual_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 133
} | This file is autogenerated, but we've stopped checking such files into the
repository to reduce the need for rebases. Please run hack/generate-docs.sh to
populate this file.
| docs/yaml/kubectl/kubectl_port-forward.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.0001739876897772774,
0.0001739876897772774,
0.0001739876897772774,
0.0001739876897772774,
0
] |
{
"id": 0,
"code_window": [
"// AttachedVolume represents a volume that is attached to a node.\n",
"type AttachedVolume struct {\n",
"\toperationexecutor.AttachedVolume\n",
"\n",
"\t// MountedByNode indicates that this volume has been been mounted by the\n",
"\t// node and is unsafe to detach.\n",
"\t// The value is set and unset by SetVolumeMountedByNode(...).\n",
"\tMountedByNode bool\n",
"\n",
"\t// DetachRequestedTime is used to capture the desire to detach this volume.\n",
"\t// When the volume is newly created this value is set to time zero.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// MountedByNode indicates that this volume has been mounted by the node and\n",
"\t// is unsafe to detach.\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/actual_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 133
} | approvers:
- thockin
reviewers:
- sig-network-reviewers
| pkg/proxy/apis/kubeproxyconfig/OWNERS | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00017333855794277042,
0.00017333855794277042,
0.00017333855794277042,
0.00017333855794277042,
0
] |
{
"id": 0,
"code_window": [
"// AttachedVolume represents a volume that is attached to a node.\n",
"type AttachedVolume struct {\n",
"\toperationexecutor.AttachedVolume\n",
"\n",
"\t// MountedByNode indicates that this volume has been been mounted by the\n",
"\t// node and is unsafe to detach.\n",
"\t// The value is set and unset by SetVolumeMountedByNode(...).\n",
"\tMountedByNode bool\n",
"\n",
"\t// DetachRequestedTime is used to capture the desire to detach this volume.\n",
"\t// When the volume is newly created this value is set to time zero.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// MountedByNode indicates that this volume has been mounted by the node and\n",
"\t// is unsafe to detach.\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/actual_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 133
} | package client
import (
"bytes"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"time"
"github.com/influxdata/influxdb/models"
)
// HTTPConfig is the config data needed to create an HTTP Client
type HTTPConfig struct {
// Addr should be of the form "http://host:port"
// or "http://[ipv6-host%zone]:port".
Addr string
// Username is the influxdb username, optional
Username string
// Password is the influxdb password, optional
Password string
// UserAgent is the http User Agent, defaults to "InfluxDBClient"
UserAgent string
// Timeout for influxdb writes, defaults to no timeout
Timeout time.Duration
// InsecureSkipVerify gets passed to the http client, if true, it will
// skip https certificate verification. Defaults to false
InsecureSkipVerify bool
// TLSConfig allows the user to set their own TLS config for the HTTP
// Client. If set, this option overrides InsecureSkipVerify.
TLSConfig *tls.Config
}
// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct
type BatchPointsConfig struct {
// Precision is the write precision of the points, defaults to "ns"
Precision string
// Database is the database to write points to
Database string
// RetentionPolicy is the retention policy of the points
RetentionPolicy string
// Write consistency is the number of servers required to confirm write
WriteConsistency string
}
// Client is a client interface for writing & querying the database
type Client interface {
// Ping checks that status of cluster, and will always return 0 time and no
// error for UDP clients
Ping(timeout time.Duration) (time.Duration, string, error)
// Write takes a BatchPoints object and writes all Points to InfluxDB.
Write(bp BatchPoints) error
// Query makes an InfluxDB Query on the database. This will fail if using
// the UDP client.
Query(q Query) (*Response, error)
// Close releases any resources a Client may be using.
Close() error
}
// NewHTTPClient returns a new Client from the provided config.
// Client is safe for concurrent use by multiple goroutines.
func NewHTTPClient(conf HTTPConfig) (Client, error) {
if conf.UserAgent == "" {
conf.UserAgent = "InfluxDBClient"
}
u, err := url.Parse(conf.Addr)
if err != nil {
return nil, err
} else if u.Scheme != "http" && u.Scheme != "https" {
m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+
" must start with http:// or https://", u.Scheme)
return nil, errors.New(m)
}
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: conf.InsecureSkipVerify,
},
}
if conf.TLSConfig != nil {
tr.TLSClientConfig = conf.TLSConfig
}
return &client{
url: *u,
username: conf.Username,
password: conf.Password,
useragent: conf.UserAgent,
httpClient: &http.Client{
Timeout: conf.Timeout,
Transport: tr,
},
transport: tr,
}, nil
}
// Ping will check to see if the server is up with an optional timeout on waiting for leader.
// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) {
now := time.Now()
u := c.url
u.Path = "ping"
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return 0, "", err
}
req.Header.Set("User-Agent", c.useragent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
if timeout > 0 {
params := req.URL.Query()
params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds()))
req.URL.RawQuery = params.Encode()
}
resp, err := c.httpClient.Do(req)
if err != nil {
return 0, "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return 0, "", err
}
if resp.StatusCode != http.StatusNoContent {
var err = fmt.Errorf(string(body))
return 0, "", err
}
version := resp.Header.Get("X-Influxdb-Version")
return time.Since(now), version, nil
}
// Close releases the client's resources.
func (c *client) Close() error {
c.transport.CloseIdleConnections()
return nil
}
// client is safe for concurrent use as the fields are all read-only
// once the client is instantiated.
type client struct {
// N.B - if url.UserInfo is accessed in future modifications to the
// methods on client, you will need to syncronise access to url.
url url.URL
username string
password string
useragent string
httpClient *http.Client
transport *http.Transport
}
// BatchPoints is an interface into a batched grouping of points to write into
// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate
// batch for each goroutine.
type BatchPoints interface {
// AddPoint adds the given point to the Batch of points
AddPoint(p *Point)
// AddPoints adds the given points to the Batch of points
AddPoints(ps []*Point)
// Points lists the points in the Batch
Points() []*Point
// Precision returns the currently set precision of this Batch
Precision() string
// SetPrecision sets the precision of this batch.
SetPrecision(s string) error
// Database returns the currently set database of this Batch
Database() string
// SetDatabase sets the database of this Batch
SetDatabase(s string)
// WriteConsistency returns the currently set write consistency of this Batch
WriteConsistency() string
// SetWriteConsistency sets the write consistency of this Batch
SetWriteConsistency(s string)
// RetentionPolicy returns the currently set retention policy of this Batch
RetentionPolicy() string
// SetRetentionPolicy sets the retention policy of this Batch
SetRetentionPolicy(s string)
}
// NewBatchPoints returns a BatchPoints interface based on the given config.
func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) {
if conf.Precision == "" {
conf.Precision = "ns"
}
if _, err := time.ParseDuration("1" + conf.Precision); err != nil {
return nil, err
}
bp := &batchpoints{
database: conf.Database,
precision: conf.Precision,
retentionPolicy: conf.RetentionPolicy,
writeConsistency: conf.WriteConsistency,
}
return bp, nil
}
type batchpoints struct {
points []*Point
database string
precision string
retentionPolicy string
writeConsistency string
}
func (bp *batchpoints) AddPoint(p *Point) {
bp.points = append(bp.points, p)
}
func (bp *batchpoints) AddPoints(ps []*Point) {
bp.points = append(bp.points, ps...)
}
func (bp *batchpoints) Points() []*Point {
return bp.points
}
func (bp *batchpoints) Precision() string {
return bp.precision
}
func (bp *batchpoints) Database() string {
return bp.database
}
func (bp *batchpoints) WriteConsistency() string {
return bp.writeConsistency
}
func (bp *batchpoints) RetentionPolicy() string {
return bp.retentionPolicy
}
func (bp *batchpoints) SetPrecision(p string) error {
if _, err := time.ParseDuration("1" + p); err != nil {
return err
}
bp.precision = p
return nil
}
func (bp *batchpoints) SetDatabase(db string) {
bp.database = db
}
func (bp *batchpoints) SetWriteConsistency(wc string) {
bp.writeConsistency = wc
}
func (bp *batchpoints) SetRetentionPolicy(rp string) {
bp.retentionPolicy = rp
}
// Point represents a single data point
type Point struct {
pt models.Point
}
// NewPoint returns a point with the given timestamp. If a timestamp is not
// given, then data is sent to the database without a timestamp, in which case
// the server will assign local time upon reception. NOTE: it is recommended to
// send data with a timestamp.
func NewPoint(
name string,
tags map[string]string,
fields map[string]interface{},
t ...time.Time,
) (*Point, error) {
var T time.Time
if len(t) > 0 {
T = t[0]
}
pt, err := models.NewPoint(name, models.NewTags(tags), fields, T)
if err != nil {
return nil, err
}
return &Point{
pt: pt,
}, nil
}
// String returns a line-protocol string of the Point
func (p *Point) String() string {
return p.pt.String()
}
// PrecisionString returns a line-protocol string of the Point, at precision
func (p *Point) PrecisionString(precison string) string {
return p.pt.PrecisionString(precison)
}
// Name returns the measurement name of the point
func (p *Point) Name() string {
return p.pt.Name()
}
// Tags returns the tags associated with the point
func (p *Point) Tags() map[string]string {
return p.pt.Tags().Map()
}
// Time return the timestamp for the point
func (p *Point) Time() time.Time {
return p.pt.Time()
}
// UnixNano returns the unix nano time of the point
func (p *Point) UnixNano() int64 {
return p.pt.UnixNano()
}
// Fields returns the fields for the point
func (p *Point) Fields() map[string]interface{} {
return p.pt.Fields()
}
// NewPointFrom returns a point from the provided models.Point.
func NewPointFrom(pt models.Point) *Point {
return &Point{pt: pt}
}
func (c *client) Write(bp BatchPoints) error {
var b bytes.Buffer
for _, p := range bp.Points() {
if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil {
return err
}
if err := b.WriteByte('\n'); err != nil {
return err
}
}
u := c.url
u.Path = "write"
req, err := http.NewRequest("POST", u.String(), &b)
if err != nil {
return err
}
req.Header.Set("Content-Type", "")
req.Header.Set("User-Agent", c.useragent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
params := req.URL.Query()
params.Set("db", bp.Database())
params.Set("rp", bp.RetentionPolicy())
params.Set("precision", bp.Precision())
params.Set("consistency", bp.WriteConsistency())
req.URL.RawQuery = params.Encode()
resp, err := c.httpClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
var err = fmt.Errorf(string(body))
return err
}
return nil
}
// Query defines a query to send to the server
type Query struct {
Command string
Database string
Precision string
}
// NewQuery returns a query object
// database and precision strings can be empty strings if they are not needed
// for the query.
func NewQuery(command, database, precision string) Query {
return Query{
Command: command,
Database: database,
Precision: precision,
}
}
// Response represents a list of statement results.
type Response struct {
Results []Result
Err string `json:"error,omitempty"`
}
// Error returns the first error from any statement.
// Returns nil if no errors occurred on any statements.
func (r *Response) Error() error {
if r.Err != "" {
return fmt.Errorf(r.Err)
}
for _, result := range r.Results {
if result.Err != "" {
return fmt.Errorf(result.Err)
}
}
return nil
}
// Message represents a user message.
type Message struct {
Level string
Text string
}
// Result represents a resultset returned from a single statement.
type Result struct {
Series []models.Row
Messages []*Message
Err string `json:"error,omitempty"`
}
// Query sends a command to the server and returns the Response
func (c *client) Query(q Query) (*Response, error) {
u := c.url
u.Path = "query"
req, err := http.NewRequest("POST", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "")
req.Header.Set("User-Agent", c.useragent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
params := req.URL.Query()
params.Set("q", q.Command)
params.Set("db", q.Database)
if q.Precision != "" {
params.Set("epoch", q.Precision)
}
req.URL.RawQuery = params.Encode()
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var response Response
dec := json.NewDecoder(resp.Body)
dec.UseNumber()
decErr := dec.Decode(&response)
// ignore this error if we got an invalid status code
if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK {
decErr = nil
}
// If we got a valid decode error, send that back
if decErr != nil {
return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr)
}
// If we don't have an error in our json response, and didn't get statusOK
// then send back an error
if resp.StatusCode != http.StatusOK && response.Error() == nil {
return &response, fmt.Errorf("received status code %d from server",
resp.StatusCode)
}
return &response, nil
}
| vendor/github.com/influxdata/influxdb/client/v2/client.go | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.0015418820548802614,
0.00024830360780470073,
0.0001602640695637092,
0.00016958480409812182,
0.0002254453138448298
] |
{
"id": 1,
"code_window": [
"// This is an internal function and caller should acquire and release the lock\n",
"func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeName, needed bool) error {\n",
"\tnodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]\n",
"\tif !nodeToUpdateExists {\n",
"\t\t// should not happen\n",
"\t\terrMsg := fmt.Sprintf(\"Failed to set statusUpdateNeeded to needed %t because nodeName=%q does not exist\",\n",
"\t\t\tneeded, nodeName)\n",
"\t\treturn fmt.Errorf(errMsg)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terrMsg := fmt.Sprintf(\"Failed to set statusUpdateNeeded to needed %t, because nodeName=%q does not exist\",\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/actual_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 475
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the attach/detach controller
to keep track of volumes, the nodes they are attached to, and the pods that
reference them.
*/
package cache
import (
"fmt"
"sync"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// DesiredStateOfWorld defines a set of thread-safe operations supported on
// the attach/detach controller's desired state of the world cache.
// This cache contains nodes->volumes->pods where nodes are all the nodes
// managed by the attach/detach controller, volumes are all the volumes that
// should be attached to the specified node, and pods are the pods that
// reference the volume and are scheduled to that node.
// Note: This is distinct from the DesiredStateOfWorld implemented by the
// kubelet volume manager. The both keep track of different objects. This
// contains attach/detach controller specific state.
type DesiredStateOfWorld interface {
// AddNode adds the given node to the list of nodes managed by the attach/
// detach controller.
// If the node already exists this is a no-op.
// keepTerminatedPodVolumes is a property of the node that determines
// if for terminated pods volumes should be mounted and attached.
AddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool)
// AddPod adds the given pod to the list of pods that reference the
// specified volume and is scheduled to the specified node.
// A unique volumeName is generated from the volumeSpec and returned on
// success.
// If the pod already exists under the specified volume, this is a no-op.
// If volumeSpec is not an attachable volume plugin, an error is returned.
// If no volume with the name volumeName exists in the list of volumes that
// should be attached to the specified node, the volume is implicitly added.
// If no node with the name nodeName exists in list of nodes managed by the
// attach/detach attached controller, an error is returned.
AddPod(podName types.UniquePodName, pod *v1.Pod, volumeSpec *volume.Spec, nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error)
// DeleteNode removes the given node from the list of nodes managed by the
// attach/detach controller.
// If the node does not exist this is a no-op.
// If the node exists but has 1 or more child volumes, an error is returned.
DeleteNode(nodeName k8stypes.NodeName) error
// DeletePod removes the given pod from the list of pods that reference the
// specified volume and are scheduled to the specified node.
// If no pod exists in the list of pods that reference the specified volume
// and are scheduled to the specified node, this is a no-op.
// If a node with the name nodeName does not exist in the list of nodes
// managed by the attach/detach attached controller, this is a no-op.
// If no volume with the name volumeName exists in the list of managed
// volumes under the specified node, this is a no-op.
// If after deleting the pod, the specified volume contains no other child
// pods, the volume is also deleted.
DeletePod(podName types.UniquePodName, volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName)
// NodeExists returns true if the node with the specified name exists in
// the list of nodes managed by the attach/detach controller.
NodeExists(nodeName k8stypes.NodeName) bool
// VolumeExists returns true if the volume with the specified name exists
// in the list of volumes that should be attached to the specified node by
// the attach detach controller.
VolumeExists(volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool
// GetVolumesToAttach generates and returns a list of volumes to attach
// and the nodes they should be attached to based on the current desired
// state of the world.
GetVolumesToAttach() []VolumeToAttach
// GetPodToAdd generates and returns a map of pods based on the current desired
// state of world
GetPodToAdd() map[types.UniquePodName]PodToAdd
// GetKeepTerminatedPodVolumesForNode determines if node wants volumes to be
// mounted and attached for terminated pods
GetKeepTerminatedPodVolumesForNode(k8stypes.NodeName) bool
// Mark multiattach error as reported to prevent spamming multiple
// events for same error
SetMultiAttachError(v1.UniqueVolumeName, k8stypes.NodeName)
// GetPodsOnNodes returns list of pods ("namespace/name") that require
// given volume on given nodes.
GetVolumePodsOnNodes(nodes []k8stypes.NodeName, volumeName v1.UniqueVolumeName) []*v1.Pod
}
// VolumeToAttach represents a volume that should be attached to a node.
type VolumeToAttach struct {
operationexecutor.VolumeToAttach
}
// PodToAdd represents a pod that references the underlying volume and is
// scheduled to the underlying node.
type PodToAdd struct {
// pod contains the api object of pod
Pod *v1.Pod
// volumeName contains the unique identifier for this volume.
VolumeName v1.UniqueVolumeName
// nodeName contains the name of this node.
NodeName k8stypes.NodeName
}
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld {
return &desiredStateOfWorld{
nodesManaged: make(map[k8stypes.NodeName]nodeManaged),
volumePluginMgr: volumePluginMgr,
}
}
type desiredStateOfWorld struct {
// nodesManaged is a map containing the set of nodes managed by the attach/
// detach controller. The key in this map is the name of the node and the
// value is a node object containing more information about the node.
nodesManaged map[k8stypes.NodeName]nodeManaged
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
sync.RWMutex
}
// nodeManaged represents a node that is being managed by the attach/detach
// controller.
type nodeManaged struct {
// nodeName contains the name of this node.
nodeName k8stypes.NodeName
// volumesToAttach is a map containing the set of volumes that should be
// attached to this node. The key in the map is the name of the volume and
// the value is a volumeToAttach object containing more information about the volume.
volumesToAttach map[v1.UniqueVolumeName]volumeToAttach
// keepTerminatedPodVolumes determines if for terminated pods(on this node) - volumes
// should be kept mounted and attached.
keepTerminatedPodVolumes bool
}
// The volumeToAttach object represents a volume that should be attached to a node.
type volumeToAttach struct {
// multiAttachErrorReported indicates whether the multi-attach error has been reported for the given volume.
// It is used to to prevent reporting the error from being reported more than once for a given volume.
multiAttachErrorReported bool
// volumeName contains the unique identifier for this volume.
volumeName v1.UniqueVolumeName
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to attach/detach
// methods.
spec *volume.Spec
// scheduledPods is a map containing the set of pods that reference this
// volume and are scheduled to the underlying node. The key in the map is
// the name of the pod and the value is a pod object containing more
// information about the pod.
scheduledPods map[types.UniquePodName]pod
}
// The pod represents a pod that references the underlying volume and is
// scheduled to the underlying node.
type pod struct {
// podName contains the unique identifier for this pod
podName types.UniquePodName
// pod object contains the api object of pod
podObj *v1.Pod
}
func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool) {
dsw.Lock()
defer dsw.Unlock()
if _, nodeExists := dsw.nodesManaged[nodeName]; !nodeExists {
dsw.nodesManaged[nodeName] = nodeManaged{
nodeName: nodeName,
volumesToAttach: make(map[v1.UniqueVolumeName]volumeToAttach),
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
}
}
}
func (dsw *desiredStateOfWorld) AddPod(
podName types.UniquePodName,
podToAdd *v1.Pod,
volumeSpec *volume.Spec,
nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return "", fmt.Errorf(
"no node with the name %q exists in the list of managed nodes",
nodeName)
}
attachableVolumePlugin, err := dsw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || attachableVolumePlugin == nil {
return "", fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
attachableVolumePlugin, volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]
if !volumeExists {
volumeObj = volumeToAttach{
multiAttachErrorReported: false,
volumeName: volumeName,
spec: volumeSpec,
scheduledPods: make(map[types.UniquePodName]pod),
}
dsw.nodesManaged[nodeName].volumesToAttach[volumeName] = volumeObj
}
if _, podExists := volumeObj.scheduledPods[podName]; !podExists {
dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods[podName] =
pod{
podName: podName,
podObj: podToAdd,
}
}
return volumeName, nil
}
func (dsw *desiredStateOfWorld) DeleteNode(nodeName k8stypes.NodeName) error {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return nil
}
if len(nodeObj.volumesToAttach) > 0 {
return fmt.Errorf(
"failed to delete node %q from list of nodes managed by attach/detach controller--the node still contains %v volumes in its list of volumes to attach",
nodeName,
len(nodeObj.volumesToAttach))
}
delete(
dsw.nodesManaged,
nodeName)
return nil
}
func (dsw *desiredStateOfWorld) DeletePod(
podName types.UniquePodName,
volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return
}
volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]
if !volumeExists {
return
}
if _, podExists := volumeObj.scheduledPods[podName]; !podExists {
return
}
delete(
dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods,
podName)
if len(volumeObj.scheduledPods) == 0 {
delete(
dsw.nodesManaged[nodeName].volumesToAttach,
volumeName)
}
}
func (dsw *desiredStateOfWorld) NodeExists(nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
_, nodeExists := dsw.nodesManaged[nodeName]
return nodeExists
}
func (dsw *desiredStateOfWorld) VolumeExists(
volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if nodeExists {
if _, volumeExists := nodeObj.volumesToAttach[volumeName]; volumeExists {
return true
}
}
return false
}
func (dsw *desiredStateOfWorld) SetMultiAttachError(
volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if nodeExists {
if volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]; volumeExists {
volumeObj.multiAttachErrorReported = true
dsw.nodesManaged[nodeName].volumesToAttach[volumeName] = volumeObj
}
}
}
// GetKeepTerminatedPodVolumesForNode determines if node wants volumes to be
// mounted and attached for terminated pods
func (dsw *desiredStateOfWorld) GetKeepTerminatedPodVolumesForNode(nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
if nodeName == "" {
return false
}
if node, ok := dsw.nodesManaged[nodeName]; ok {
return node.keepTerminatedPodVolumes
}
return false
}
func (dsw *desiredStateOfWorld) GetVolumesToAttach() []VolumeToAttach {
dsw.RLock()
defer dsw.RUnlock()
volumesToAttach := make([]VolumeToAttach, 0 /* len */, len(dsw.nodesManaged) /* cap */)
for nodeName, nodeObj := range dsw.nodesManaged {
for volumeName, volumeObj := range nodeObj.volumesToAttach {
volumesToAttach = append(volumesToAttach,
VolumeToAttach{
VolumeToAttach: operationexecutor.VolumeToAttach{
MultiAttachErrorReported: volumeObj.multiAttachErrorReported,
VolumeName: volumeName,
VolumeSpec: volumeObj.spec,
NodeName: nodeName,
ScheduledPods: getPodsFromMap(volumeObj.scheduledPods),
}})
}
}
return volumesToAttach
}
// Construct a list of v1.Pod objects from the given pod map
func getPodsFromMap(podMap map[types.UniquePodName]pod) []*v1.Pod {
pods := make([]*v1.Pod, 0, len(podMap))
for _, pod := range podMap {
pods = append(pods, pod.podObj)
}
return pods
}
func (dsw *desiredStateOfWorld) GetPodToAdd() map[types.UniquePodName]PodToAdd {
dsw.RLock()
defer dsw.RUnlock()
pods := make(map[types.UniquePodName]PodToAdd)
for nodeName, nodeObj := range dsw.nodesManaged {
for volumeName, volumeObj := range nodeObj.volumesToAttach {
for podUID, pod := range volumeObj.scheduledPods {
pods[podUID] = PodToAdd{
Pod: pod.podObj,
VolumeName: volumeName,
NodeName: nodeName,
}
}
}
}
return pods
}
func (dsw *desiredStateOfWorld) GetVolumePodsOnNodes(nodes []k8stypes.NodeName, volumeName v1.UniqueVolumeName) []*v1.Pod {
dsw.RLock()
defer dsw.RUnlock()
pods := []*v1.Pod{}
for _, nodeName := range nodes {
node, ok := dsw.nodesManaged[nodeName]
if !ok {
continue
}
volume, ok := node.volumesToAttach[volumeName]
if !ok {
continue
}
for _, pod := range volume.scheduledPods {
pods = append(pods, pod.podObj)
}
}
return pods
}
| pkg/controller/volume/attachdetach/cache/desired_state_of_world.go | 1 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.006815893109887838,
0.001319659175351262,
0.00016382578178308904,
0.00039792072493582964,
0.0016279725823551416
] |
{
"id": 1,
"code_window": [
"// This is an internal function and caller should acquire and release the lock\n",
"func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeName, needed bool) error {\n",
"\tnodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]\n",
"\tif !nodeToUpdateExists {\n",
"\t\t// should not happen\n",
"\t\terrMsg := fmt.Sprintf(\"Failed to set statusUpdateNeeded to needed %t because nodeName=%q does not exist\",\n",
"\t\t\tneeded, nodeName)\n",
"\t\treturn fmt.Errorf(errMsg)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terrMsg := fmt.Sprintf(\"Failed to set statusUpdateNeeded to needed %t, because nodeName=%q does not exist\",\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/actual_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 475
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = Describe("[sig-api-machinery] ConfigMap", func() {
f := framework.NewDefaultFramework("configmap")
/*
Testname: configmap-in-env-field
Description: Make sure config map value can be used as an environment
variable in the container (on container.env field)
*/
framework.ConformanceIt("should be consumable via environment variable ", func() {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newConfigMap(f, name)
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
Name: "CONFIG_DATA_1",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "data-1",
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
"CONFIG_DATA_1=value-1",
})
})
/*
Testname: configmap-envfrom-field
Description: Make sure config map value can be used as an source for
environment variables in the container (on container.envFrom field)
*/
framework.ConformanceIt("should be consumable via the environment ", func() {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newEnvFromConfigMap(f, name)
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Command: []string{"sh", "-c", "env"},
EnvFrom: []v1.EnvFromSource{
{
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
},
{
Prefix: "p_",
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
"data_1=value-1", "data_2=value-2", "data_3=value-3",
"p_data_1=value-1", "p_data_2=value-2", "p_data_3=value-3",
})
})
})
func newEnvFromConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string]string{
"data_1": "value-1",
"data_2": "value-2",
"data_3": "value-3",
},
}
}
| test/e2e/common/configmap.go | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00017898700025398284,
0.00017183975432999432,
0.00016651109035592526,
0.00017124632722698152,
0.0000033201977203134447
] |
{
"id": 1,
"code_window": [
"// This is an internal function and caller should acquire and release the lock\n",
"func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeName, needed bool) error {\n",
"\tnodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]\n",
"\tif !nodeToUpdateExists {\n",
"\t\t// should not happen\n",
"\t\terrMsg := fmt.Sprintf(\"Failed to set statusUpdateNeeded to needed %t because nodeName=%q does not exist\",\n",
"\t\t\tneeded, nodeName)\n",
"\t\treturn fmt.Errorf(errMsg)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terrMsg := fmt.Sprintf(\"Failed to set statusUpdateNeeded to needed %t, because nodeName=%q does not exist\",\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/actual_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 475
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"expansion_generated.go",
"localsubjectaccessreview.go",
"selfsubjectaccessreview.go",
"selfsubjectrulesreview.go",
"subjectaccessreview.go",
],
importpath = "k8s.io/client-go/listers/authorization/v1",
deps = [
"//vendor/k8s.io/api/authorization/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| staging/src/k8s.io/client-go/listers/authorization/v1/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00017612453666515648,
0.00017519306857138872,
0.0001739123836159706,
0.00017536766245029867,
8.158971240845858e-7
] |
{
"id": 1,
"code_window": [
"// This is an internal function and caller should acquire and release the lock\n",
"func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeName, needed bool) error {\n",
"\tnodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]\n",
"\tif !nodeToUpdateExists {\n",
"\t\t// should not happen\n",
"\t\terrMsg := fmt.Sprintf(\"Failed to set statusUpdateNeeded to needed %t because nodeName=%q does not exist\",\n",
"\t\t\tneeded, nodeName)\n",
"\t\treturn fmt.Errorf(errMsg)\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terrMsg := fmt.Sprintf(\"Failed to set statusUpdateNeeded to needed %t, because nodeName=%q does not exist\",\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/actual_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 475
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This package has the automatically generated clientset.
package clientset
| staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00017914647469297051,
0.00017895884229801595,
0.00017877122445497662,
0.00017895884229801595,
1.87625118996948e-7
] |
{
"id": 2,
"code_window": [
"// managed by the attach/detach controller, volumes are all the volumes that\n",
"// should be attached to the specified node, and pods are the pods that\n",
"// reference the volume and are scheduled to that node.\n",
"// Note: This is distinct from the DesiredStateOfWorld implemented by the\n",
"// kubelet volume manager. The both keep track of different objects. This\n",
"// contains attach/detach controller specific state.\n",
"type DesiredStateOfWorld interface {\n",
"\t// AddNode adds the given node to the list of nodes managed by the attach/\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// kubelet volume manager. They both keep track of different objects. This\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 42
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the attach/detach controller
to keep track of volumes, the nodes they are attached to, and the pods that
reference them.
*/
package cache
import (
"fmt"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// ActualStateOfWorld defines a set of thread-safe operations supported on
// the attach/detach controller's actual state of the world cache.
// This cache contains volumes->nodes i.e. a set of all volumes and the nodes
// the attach/detach controller believes are successfully attached.
// Note: This is distinct from the ActualStateOfWorld implemented by the kubelet
// volume manager. They both keep track of different objects. This contains
// attach/detach controller specific state.
type ActualStateOfWorld interface {
// ActualStateOfWorld must implement the methods required to allow
// operationexecutor to interact with it.
operationexecutor.ActualStateOfWorldAttacherUpdater
// AddVolumeNode adds the given volume and node to the underlying store
// indicating the specified volume is attached to the specified node.
// A unique volume name is generated from the volumeSpec and returned on
// success.
// If volumeSpec is not an attachable volume plugin, an error is returned.
// If no volume with the name volumeName exists in the store, the volume is
// added.
// If no node with the name nodeName exists in list of attached nodes for
// the specified volume, the node is added.
AddVolumeNode(uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (v1.UniqueVolumeName, error)
// SetVolumeMountedByNode sets the MountedByNode value for the given volume
// and node. When set to true the mounted parameter indicates the volume
// is mounted by the given node, indicating it may not be safe to detach.
// If the forceUnmount is set to true the MountedByNode value would be reset
// to false even it was not set yet (this is required during a controller
// crash recovery).
// If no volume with the name volumeName exists in the store, an error is
// returned.
// If no node with the name nodeName exists in list of attached nodes for
// the specified volume, an error is returned.
SetVolumeMountedByNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error
// SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified
// node to true indicating the AttachedVolume field in the Node's Status
// object needs to be updated by the node updater again.
// If the specified node does not exist in the nodesToUpdateStatusFor list,
// log the error and return
SetNodeStatusUpdateNeeded(nodeName types.NodeName)
// ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach
// request any more for the volume
ResetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// SetDetachRequestTime sets the detachRequestedTime to current time if this is no
// previous request (the previous detachRequestedTime is zero) and return the time elapsed
// since last request
SetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error)
// DeleteVolumeNode removes the given volume and node from the underlying
// store indicating the specified volume is no longer attached to the
// specified node.
// If the volume/node combo does not exist, this is a no-op.
// If after deleting the node, the specified volume contains no other child
// nodes, the volume is also deleted.
DeleteVolumeNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// VolumeNodeExists returns true if the specified volume/node combo exists
// in the underlying store indicating the specified volume is attached to
// the specified node.
VolumeNodeExists(volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool
// GetAttachedVolumes generates and returns a list of volumes/node pairs
// reflecting which volumes are attached to which nodes based on the
// current actual state of the world.
GetAttachedVolumes() []AttachedVolume
// GetAttachedVolumes generates and returns a list of volumes attached to
// the specified node reflecting which volumes are attached to that node
// based on the current actual state of the world.
GetAttachedVolumesForNode(nodeName types.NodeName) []AttachedVolume
GetAttachedVolumesPerNode() map[types.NodeName][]operationexecutor.AttachedVolume
// GetNodesForVolume returns the nodes on which the volume is attached
GetNodesForVolume(volumeName v1.UniqueVolumeName) []types.NodeName
// GetVolumesToReportAttached returns a map containing the set of nodes for
// which the VolumesAttached Status field in the Node API object should be
// updated. The key in this map is the name of the node to update and the
// value is list of volumes that should be reported as attached (note that
// this may differ from the actual list of attached volumes for the node
// since volumes should be removed from this list as soon a detach operation
// is considered, before the detach operation is triggered).
GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume
// GetNodesToUpdateStatusFor returns the map of nodeNames to nodeToUpdateStatusFor
GetNodesToUpdateStatusFor() map[types.NodeName]nodeToUpdateStatusFor
}
// AttachedVolume represents a volume that is attached to a node.
type AttachedVolume struct {
operationexecutor.AttachedVolume
// MountedByNode indicates that this volume has been been mounted by the
// node and is unsafe to detach.
// The value is set and unset by SetVolumeMountedByNode(...).
MountedByNode bool
// DetachRequestedTime is used to capture the desire to detach this volume.
// When the volume is newly created this value is set to time zero.
// It is set to current time, when SetDetachRequestTime(...) is called, if it
// was previously set to zero (other wise its value remains the same).
// It is reset to zero on ResetDetachRequestTime(...) calls.
DetachRequestedTime time.Time
}
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
return &actualStateOfWorld{
attachedVolumes: make(map[v1.UniqueVolumeName]attachedVolume),
nodesToUpdateStatusFor: make(map[types.NodeName]nodeToUpdateStatusFor),
volumePluginMgr: volumePluginMgr,
}
}
type actualStateOfWorld struct {
// attachedVolumes is a map containing the set of volumes the attach/detach
// controller believes to be successfully attached to the nodes it is
// managing. The key in this map is the name of the volume and the value is
// an object containing more information about the attached volume.
attachedVolumes map[v1.UniqueVolumeName]attachedVolume
// nodesToUpdateStatusFor is a map containing the set of nodes for which to
// update the VolumesAttached Status field. The key in this map is the name
// of the node and the value is an object containing more information about
// the node (including the list of volumes to report attached).
nodesToUpdateStatusFor map[types.NodeName]nodeToUpdateStatusFor
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
sync.RWMutex
}
// The volume object represents a volume the attach/detach controller
// believes to be successfully attached to a node it is managing.
type attachedVolume struct {
// volumeName contains the unique identifier for this volume.
volumeName v1.UniqueVolumeName
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to attach/detach
// methods.
spec *volume.Spec
// nodesAttachedTo is a map containing the set of nodes this volume has
// successfully been attached to. The key in this map is the name of the
// node and the value is a node object containing more information about
// the node.
nodesAttachedTo map[types.NodeName]nodeAttachedTo
// devicePath contains the path on the node where the volume is attached
devicePath string
}
// The nodeAttachedTo object represents a node that has volumes attached to it.
type nodeAttachedTo struct {
// nodeName contains the name of this node.
nodeName types.NodeName
// mountedByNode indicates that this node/volume combo is mounted by the
// node and is unsafe to detach
mountedByNode bool
// number of times SetVolumeMountedByNode has been called to set the value
// of mountedByNode to true. This is used to prevent mountedByNode from
// being reset during the period between attach and mount when volumesInUse
// status for the node may not be set.
mountedByNodeSetCount uint
// detachRequestedTime used to capture the desire to detach this volume
detachRequestedTime time.Time
}
// nodeToUpdateStatusFor is an object that reflects a node that has one or more
// volume attached. It keeps track of the volumes that should be reported as
// attached in the Node's Status API object.
type nodeToUpdateStatusFor struct {
// nodeName contains the name of this node.
nodeName types.NodeName
// statusUpdateNeeded indicates that the value of the VolumesAttached field
// in the Node's Status API object should be updated. This should be set to
// true whenever a volume is added or deleted from
// volumesToReportAsAttached. It should be reset whenever the status is
// updated.
statusUpdateNeeded bool
// volumesToReportAsAttached is the list of volumes that should be reported
// as attached in the Node's status (note that this may differ from the
// actual list of attached volumes since volumes should be removed from this
// list as soon a detach operation is considered, before the detach
// operation is triggered).
volumesToReportAsAttached map[v1.UniqueVolumeName]v1.UniqueVolumeName
}
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error {
_, err := asw.AddVolumeNode(uniqueName, volumeSpec, nodeName, devicePath)
return err
}
func (asw *actualStateOfWorld) MarkVolumeAsDetached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.DeleteVolumeNode(volumeName, nodeName)
}
func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) error {
asw.Lock()
defer asw.Unlock()
return asw.removeVolumeFromReportAsAttached(volumeName, nodeName)
}
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock()
defer asw.Unlock()
asw.addVolumeToReportAsAttached(volumeName, nodeName)
}
func (asw *actualStateOfWorld) AddVolumeNode(
uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (v1.UniqueVolumeName, error) {
asw.Lock()
defer asw.Unlock()
var volumeName v1.UniqueVolumeName
if volumeSpec != nil {
attachableVolumePlugin, err := asw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || attachableVolumePlugin == nil {
return "", fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
volumeName, err = volumehelper.GetUniqueVolumeNameFromSpec(
attachableVolumePlugin, volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
} else {
// volumeSpec is nil
// This happens only on controller startup when reading the volumes from node
// status; if the pods using the volume have been removed and are unreachable
// the volumes should be detached immediately and the spec is not needed
volumeName = uniqueName
}
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
volumeObj = attachedVolume{
volumeName: volumeName,
spec: volumeSpec,
nodesAttachedTo: make(map[types.NodeName]nodeAttachedTo),
devicePath: devicePath,
}
} else {
// If volume object already exists, it indicates that the information would be out of date.
// Update the fields for volume object except the nodes attached to the volumes.
volumeObj.devicePath = devicePath
volumeObj.spec = volumeSpec
glog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q",
volumeName,
nodeName,
devicePath)
}
asw.attachedVolumes[volumeName] = volumeObj
_, nodeExists := volumeObj.nodesAttachedTo[nodeName]
if !nodeExists {
// Create object if it doesn't exist.
volumeObj.nodesAttachedTo[nodeName] = nodeAttachedTo{
nodeName: nodeName,
mountedByNode: true, // Assume mounted, until proven otherwise
mountedByNodeSetCount: 0,
detachRequestedTime: time.Time{},
}
} else {
glog.V(5).Infof("Volume %q is already added to attachedVolume list to the node %q",
volumeName,
nodeName)
}
asw.addVolumeToReportAsAttached(volumeName, nodeName)
return volumeName, nil
}
func (asw *actualStateOfWorld) SetVolumeMountedByNode(
volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error {
asw.Lock()
defer asw.Unlock()
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
if err != nil {
return fmt.Errorf("Failed to SetVolumeMountedByNode with error: %v", err)
}
if mounted {
// Increment set count
nodeObj.mountedByNodeSetCount = nodeObj.mountedByNodeSetCount + 1
}
nodeObj.mountedByNode = mounted
volumeObj.nodesAttachedTo[nodeName] = nodeObj
glog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %t",
volumeName,
nodeName,
mounted)
return nil
}
func (asw *actualStateOfWorld) ResetDetachRequestTime(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock()
defer asw.Unlock()
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
if err != nil {
glog.Errorf("Failed to ResetDetachRequestTime with error: %v", err)
return
}
nodeObj.detachRequestedTime = time.Time{}
volumeObj.nodesAttachedTo[nodeName] = nodeObj
}
func (asw *actualStateOfWorld) SetDetachRequestTime(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) {
asw.Lock()
defer asw.Unlock()
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
if err != nil {
return 0, fmt.Errorf("Failed to set detach request time with error: %v", err)
}
// If there is no previous detach request, set it to the current time
if nodeObj.detachRequestedTime.IsZero() {
nodeObj.detachRequestedTime = time.Now()
volumeObj.nodesAttachedTo[nodeName] = nodeObj
glog.V(4).Infof("Set detach request time to current time for volume %v on node %q",
volumeName,
nodeName)
}
return time.Since(nodeObj.detachRequestedTime), nil
}
// Get the volume and node object from actual state of world
// This is an internal function and caller should acquire and release the lock
//
// Note that this returns disconnected objects, so if you change the volume object you must set it back with
// `asw.attachedVolumes[volumeName]=volumeObj`.
//
// If you change the node object you must use `volumeObj.nodesAttachedTo[nodeName] = nodeObj`
// This is correct, because if volumeObj is empty this function returns an error, and nodesAttachedTo
// map is a reference type, and thus mutating the copy changes the original map.
func (asw *actualStateOfWorld) getNodeAndVolume(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) (attachedVolume, nodeAttachedTo, error) {
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if volumeExists {
nodeObj, nodeExists := volumeObj.nodesAttachedTo[nodeName]
if nodeExists {
return volumeObj, nodeObj, nil
}
}
return attachedVolume{}, nodeAttachedTo{}, fmt.Errorf("volume %v is no longer attached to the node %q",
volumeName,
nodeName)
}
// Remove the volumeName from the node's volumesToReportAsAttached list
// This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) error {
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
if nodeToUpdateExists {
_, nodeToUpdateVolumeExists :=
nodeToUpdate.volumesToReportAsAttached[volumeName]
if nodeToUpdateVolumeExists {
nodeToUpdate.statusUpdateNeeded = true
delete(nodeToUpdate.volumesToReportAsAttached, volumeName)
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
return nil
}
}
return fmt.Errorf("volume %q does not exist in volumesToReportAsAttached list or node %q does not exist in nodesToUpdateStatusFor list",
volumeName,
nodeName)
}
// Add the volumeName to the node's volumesToReportAsAttached list
// This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
// In case the volume/node entry is no longer in attachedVolume list, skip the rest
if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil {
glog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName)
return
}
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
if !nodeToUpdateExists {
// Create object if it doesn't exist
nodeToUpdate = nodeToUpdateStatusFor{
nodeName: nodeName,
statusUpdateNeeded: true,
volumesToReportAsAttached: make(map[v1.UniqueVolumeName]v1.UniqueVolumeName),
}
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
glog.V(4).Infof("Add new node %q to nodesToUpdateStatusFor", nodeName)
}
_, nodeToUpdateVolumeExists :=
nodeToUpdate.volumesToReportAsAttached[volumeName]
if !nodeToUpdateVolumeExists {
nodeToUpdate.statusUpdateNeeded = true
nodeToUpdate.volumesToReportAsAttached[volumeName] = volumeName
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
glog.V(4).Infof("Report volume %q as attached to node %q", volumeName, nodeName)
}
}
// Update the flag statusUpdateNeeded to indicate whether node status is already updated or
// needs to be updated again by the node status updater.
// If the specified node does not exist in the nodesToUpdateStatusFor list, log the error and return
// This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeName, needed bool) error {
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
if !nodeToUpdateExists {
// should not happen
errMsg := fmt.Sprintf("Failed to set statusUpdateNeeded to needed %t because nodeName=%q does not exist",
needed, nodeName)
return fmt.Errorf(errMsg)
}
nodeToUpdate.statusUpdateNeeded = needed
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
return nil
}
func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName types.NodeName) {
asw.Lock()
defer asw.Unlock()
if err := asw.updateNodeStatusUpdateNeeded(nodeName, true); err != nil {
glog.Warningf("Failed to update statusUpdateNeeded field in actual state of world: %v", err)
}
}
func (asw *actualStateOfWorld) DeleteVolumeNode(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock()
defer asw.Unlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists {
return
}
_, nodeExists := volumeObj.nodesAttachedTo[nodeName]
if nodeExists {
delete(asw.attachedVolumes[volumeName].nodesAttachedTo, nodeName)
}
if len(volumeObj.nodesAttachedTo) == 0 {
delete(asw.attachedVolumes, volumeName)
}
// Remove volume from volumes to report as attached
asw.removeVolumeFromReportAsAttached(volumeName, nodeName)
}
func (asw *actualStateOfWorld) VolumeNodeExists(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if volumeExists {
if _, nodeExists := volumeObj.nodesAttachedTo[nodeName]; nodeExists {
return true
}
}
return false
}
func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {
asw.RLock()
defer asw.RUnlock()
attachedVolumes := make([]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
for _, nodeObj := range volumeObj.nodesAttachedTo {
attachedVolumes = append(
attachedVolumes,
getAttachedVolume(&volumeObj, &nodeObj))
}
}
return attachedVolumes
}
func (asw *actualStateOfWorld) GetAttachedVolumesForNode(
nodeName types.NodeName) []AttachedVolume {
asw.RLock()
defer asw.RUnlock()
attachedVolumes := make(
[]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
for _, volumeObj := range asw.attachedVolumes {
for actualNodeName, nodeObj := range volumeObj.nodesAttachedTo {
if actualNodeName == nodeName {
attachedVolumes = append(
attachedVolumes,
getAttachedVolume(&volumeObj, &nodeObj))
}
}
}
return attachedVolumes
}
func (asw *actualStateOfWorld) GetAttachedVolumesPerNode() map[types.NodeName][]operationexecutor.AttachedVolume {
asw.RLock()
defer asw.RUnlock()
attachedVolumesPerNode := make(map[types.NodeName][]operationexecutor.AttachedVolume)
for _, volumeObj := range asw.attachedVolumes {
for nodeName, nodeObj := range volumeObj.nodesAttachedTo {
volumes := attachedVolumesPerNode[nodeName]
volumes = append(volumes, getAttachedVolume(&volumeObj, &nodeObj).AttachedVolume)
attachedVolumesPerNode[nodeName] = volumes
}
}
return attachedVolumesPerNode
}
func (asw *actualStateOfWorld) GetNodesForVolume(volumeName v1.UniqueVolumeName) []types.NodeName {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if !volumeExists || len(volumeObj.nodesAttachedTo) == 0 {
return []types.NodeName{}
}
nodes := []types.NodeName{}
for k := range volumeObj.nodesAttachedTo {
nodes = append(nodes, k)
}
return nodes
}
func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume {
asw.RLock()
defer asw.RUnlock()
volumesToReportAttached := make(map[types.NodeName][]v1.AttachedVolume)
for nodeName, nodeToUpdateObj := range asw.nodesToUpdateStatusFor {
if nodeToUpdateObj.statusUpdateNeeded {
attachedVolumes := make(
[]v1.AttachedVolume,
len(nodeToUpdateObj.volumesToReportAsAttached) /* len */)
i := 0
for _, volume := range nodeToUpdateObj.volumesToReportAsAttached {
attachedVolumes[i] = v1.AttachedVolume{
Name: volume,
DevicePath: asw.attachedVolumes[volume].devicePath,
}
i++
}
volumesToReportAttached[nodeToUpdateObj.nodeName] = attachedVolumes
}
// When GetVolumesToReportAttached is called by node status updater, the current status
// of this node will be updated, so set the flag statusUpdateNeeded to false indicating
// the current status is already updated.
if err := asw.updateNodeStatusUpdateNeeded(nodeName, false); err != nil {
glog.Errorf("Failed to update statusUpdateNeeded field when getting volumes: %v", err)
}
}
return volumesToReportAttached
}
func (asw *actualStateOfWorld) GetNodesToUpdateStatusFor() map[types.NodeName]nodeToUpdateStatusFor {
return asw.nodesToUpdateStatusFor
}
func getAttachedVolume(
attachedVolume *attachedVolume,
nodeAttachedTo *nodeAttachedTo) AttachedVolume {
return AttachedVolume{
AttachedVolume: operationexecutor.AttachedVolume{
VolumeName: attachedVolume.volumeName,
VolumeSpec: attachedVolume.spec,
NodeName: nodeAttachedTo.nodeName,
DevicePath: attachedVolume.devicePath,
PluginIsAttachable: true,
},
MountedByNode: nodeAttachedTo.mountedByNode,
DetachRequestedTime: nodeAttachedTo.detachRequestedTime}
}
| pkg/controller/volume/attachdetach/cache/actual_state_of_world.go | 1 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.15458862483501434,
0.008346579037606716,
0.00016534488531760871,
0.0005808718269690871,
0.026873769238591194
] |
{
"id": 2,
"code_window": [
"// managed by the attach/detach controller, volumes are all the volumes that\n",
"// should be attached to the specified node, and pods are the pods that\n",
"// reference the volume and are scheduled to that node.\n",
"// Note: This is distinct from the DesiredStateOfWorld implemented by the\n",
"// kubelet volume manager. The both keep track of different objects. This\n",
"// contains attach/detach controller specific state.\n",
"type DesiredStateOfWorld interface {\n",
"\t// AddNode adds the given node to the list of nodes managed by the attach/\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// kubelet volume manager. They both keep track of different objects. This\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 42
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"alert.go",
"fingerprinting.go",
"fnv.go",
"labels.go",
"labelset.go",
"metric.go",
"model.go",
"signature.go",
"silence.go",
"time.go",
"value.go",
],
importpath = "github.com/prometheus/common/model",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| vendor/github.com/prometheus/common/model/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00017264873895328492,
0.00017047971778083593,
0.00016746036999393255,
0.00017090488108806312,
0.0000021034911696915515
] |
{
"id": 2,
"code_window": [
"// managed by the attach/detach controller, volumes are all the volumes that\n",
"// should be attached to the specified node, and pods are the pods that\n",
"// reference the volume and are scheduled to that node.\n",
"// Note: This is distinct from the DesiredStateOfWorld implemented by the\n",
"// kubelet volume manager. The both keep track of different objects. This\n",
"// contains attach/detach controller specific state.\n",
"type DesiredStateOfWorld interface {\n",
"\t// AddNode adds the given node to the list of nodes managed by the attach/\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// kubelet volume manager. They both keep track of different objects. This\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 42
} | reviewers:
- sttts
| staging/src/k8s.io/apiserver/pkg/endpoints/request/OWNERS | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00016563222743570805,
0.00016563222743570805,
0.00016563222743570805,
0.00016563222743570805,
0
] |
{
"id": 2,
"code_window": [
"// managed by the attach/detach controller, volumes are all the volumes that\n",
"// should be attached to the specified node, and pods are the pods that\n",
"// reference the volume and are scheduled to that node.\n",
"// Note: This is distinct from the DesiredStateOfWorld implemented by the\n",
"// kubelet volume manager. The both keep track of different objects. This\n",
"// contains attach/detach controller specific state.\n",
"type DesiredStateOfWorld interface {\n",
"\t// AddNode adds the given node to the list of nodes managed by the attach/\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"// kubelet volume manager. They both keep track of different objects. This\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 42
} | #!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x
NODE_INSTANCE_PREFIX=${NODE_INSTANCE_PREFIX:-"${INSTANCE_PREFIX}-node"}
source "${KUBE_ROOT}/cluster/gce/util.sh"
| cluster/kubernetes-anywhere/util.sh | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.0002681270125322044,
0.00020569917978718877,
0.00017366546671837568,
0.00017530506011098623,
0.00004414821887621656
] |
{
"id": 3,
"code_window": [
"\t// detach controller.\n",
"\t// If the node already exists this is a no-op.\n",
"\t// keepTerminatedPodVolumes is a property of the node that determines\n",
"\t// if for terminated pods volumes should be mounted and attached.\n",
"\tAddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool)\n",
"\n",
"\t// AddPod adds the given pod to the list of pods that reference the\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// if volumes should be mounted and attached for terminated pods.\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 49
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the attach/detach controller
to keep track of volumes, the nodes they are attached to, and the pods that
reference them.
*/
package cache
import (
"fmt"
"sync"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// DesiredStateOfWorld defines a set of thread-safe operations supported on
// the attach/detach controller's desired state of the world cache.
// This cache contains nodes->volumes->pods where nodes are all the nodes
// managed by the attach/detach controller, volumes are all the volumes that
// should be attached to the specified node, and pods are the pods that
// reference the volume and are scheduled to that node.
// Note: This is distinct from the DesiredStateOfWorld implemented by the
// kubelet volume manager. The both keep track of different objects. This
// contains attach/detach controller specific state.
type DesiredStateOfWorld interface {
// AddNode adds the given node to the list of nodes managed by the attach/
// detach controller.
// If the node already exists this is a no-op.
// keepTerminatedPodVolumes is a property of the node that determines
// if for terminated pods volumes should be mounted and attached.
AddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool)
// AddPod adds the given pod to the list of pods that reference the
// specified volume and is scheduled to the specified node.
// A unique volumeName is generated from the volumeSpec and returned on
// success.
// If the pod already exists under the specified volume, this is a no-op.
// If volumeSpec is not an attachable volume plugin, an error is returned.
// If no volume with the name volumeName exists in the list of volumes that
// should be attached to the specified node, the volume is implicitly added.
// If no node with the name nodeName exists in list of nodes managed by the
// attach/detach attached controller, an error is returned.
AddPod(podName types.UniquePodName, pod *v1.Pod, volumeSpec *volume.Spec, nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error)
// DeleteNode removes the given node from the list of nodes managed by the
// attach/detach controller.
// If the node does not exist this is a no-op.
// If the node exists but has 1 or more child volumes, an error is returned.
DeleteNode(nodeName k8stypes.NodeName) error
// DeletePod removes the given pod from the list of pods that reference the
// specified volume and are scheduled to the specified node.
// If no pod exists in the list of pods that reference the specified volume
// and are scheduled to the specified node, this is a no-op.
// If a node with the name nodeName does not exist in the list of nodes
// managed by the attach/detach attached controller, this is a no-op.
// If no volume with the name volumeName exists in the list of managed
// volumes under the specified node, this is a no-op.
// If after deleting the pod, the specified volume contains no other child
// pods, the volume is also deleted.
DeletePod(podName types.UniquePodName, volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName)
// NodeExists returns true if the node with the specified name exists in
// the list of nodes managed by the attach/detach controller.
NodeExists(nodeName k8stypes.NodeName) bool
// VolumeExists returns true if the volume with the specified name exists
// in the list of volumes that should be attached to the specified node by
// the attach detach controller.
VolumeExists(volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool
// GetVolumesToAttach generates and returns a list of volumes to attach
// and the nodes they should be attached to based on the current desired
// state of the world.
GetVolumesToAttach() []VolumeToAttach
// GetPodToAdd generates and returns a map of pods based on the current desired
// state of world
GetPodToAdd() map[types.UniquePodName]PodToAdd
// GetKeepTerminatedPodVolumesForNode determines if node wants volumes to be
// mounted and attached for terminated pods
GetKeepTerminatedPodVolumesForNode(k8stypes.NodeName) bool
// Mark multiattach error as reported to prevent spamming multiple
// events for same error
SetMultiAttachError(v1.UniqueVolumeName, k8stypes.NodeName)
// GetPodsOnNodes returns list of pods ("namespace/name") that require
// given volume on given nodes.
GetVolumePodsOnNodes(nodes []k8stypes.NodeName, volumeName v1.UniqueVolumeName) []*v1.Pod
}
// VolumeToAttach represents a volume that should be attached to a node.
type VolumeToAttach struct {
operationexecutor.VolumeToAttach
}
// PodToAdd represents a pod that references the underlying volume and is
// scheduled to the underlying node.
type PodToAdd struct {
// pod contains the api object of pod
Pod *v1.Pod
// volumeName contains the unique identifier for this volume.
VolumeName v1.UniqueVolumeName
// nodeName contains the name of this node.
NodeName k8stypes.NodeName
}
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld {
return &desiredStateOfWorld{
nodesManaged: make(map[k8stypes.NodeName]nodeManaged),
volumePluginMgr: volumePluginMgr,
}
}
type desiredStateOfWorld struct {
// nodesManaged is a map containing the set of nodes managed by the attach/
// detach controller. The key in this map is the name of the node and the
// value is a node object containing more information about the node.
nodesManaged map[k8stypes.NodeName]nodeManaged
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
sync.RWMutex
}
// nodeManaged represents a node that is being managed by the attach/detach
// controller.
type nodeManaged struct {
// nodeName contains the name of this node.
nodeName k8stypes.NodeName
// volumesToAttach is a map containing the set of volumes that should be
// attached to this node. The key in the map is the name of the volume and
// the value is a volumeToAttach object containing more information about the volume.
volumesToAttach map[v1.UniqueVolumeName]volumeToAttach
// keepTerminatedPodVolumes determines if for terminated pods(on this node) - volumes
// should be kept mounted and attached.
keepTerminatedPodVolumes bool
}
// The volumeToAttach object represents a volume that should be attached to a node.
type volumeToAttach struct {
// multiAttachErrorReported indicates whether the multi-attach error has been reported for the given volume.
// It is used to to prevent reporting the error from being reported more than once for a given volume.
multiAttachErrorReported bool
// volumeName contains the unique identifier for this volume.
volumeName v1.UniqueVolumeName
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to attach/detach
// methods.
spec *volume.Spec
// scheduledPods is a map containing the set of pods that reference this
// volume and are scheduled to the underlying node. The key in the map is
// the name of the pod and the value is a pod object containing more
// information about the pod.
scheduledPods map[types.UniquePodName]pod
}
// The pod represents a pod that references the underlying volume and is
// scheduled to the underlying node.
type pod struct {
// podName contains the unique identifier for this pod
podName types.UniquePodName
// pod object contains the api object of pod
podObj *v1.Pod
}
func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool) {
dsw.Lock()
defer dsw.Unlock()
if _, nodeExists := dsw.nodesManaged[nodeName]; !nodeExists {
dsw.nodesManaged[nodeName] = nodeManaged{
nodeName: nodeName,
volumesToAttach: make(map[v1.UniqueVolumeName]volumeToAttach),
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
}
}
}
func (dsw *desiredStateOfWorld) AddPod(
podName types.UniquePodName,
podToAdd *v1.Pod,
volumeSpec *volume.Spec,
nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return "", fmt.Errorf(
"no node with the name %q exists in the list of managed nodes",
nodeName)
}
attachableVolumePlugin, err := dsw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || attachableVolumePlugin == nil {
return "", fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
attachableVolumePlugin, volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]
if !volumeExists {
volumeObj = volumeToAttach{
multiAttachErrorReported: false,
volumeName: volumeName,
spec: volumeSpec,
scheduledPods: make(map[types.UniquePodName]pod),
}
dsw.nodesManaged[nodeName].volumesToAttach[volumeName] = volumeObj
}
if _, podExists := volumeObj.scheduledPods[podName]; !podExists {
dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods[podName] =
pod{
podName: podName,
podObj: podToAdd,
}
}
return volumeName, nil
}
func (dsw *desiredStateOfWorld) DeleteNode(nodeName k8stypes.NodeName) error {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return nil
}
if len(nodeObj.volumesToAttach) > 0 {
return fmt.Errorf(
"failed to delete node %q from list of nodes managed by attach/detach controller--the node still contains %v volumes in its list of volumes to attach",
nodeName,
len(nodeObj.volumesToAttach))
}
delete(
dsw.nodesManaged,
nodeName)
return nil
}
func (dsw *desiredStateOfWorld) DeletePod(
podName types.UniquePodName,
volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return
}
volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]
if !volumeExists {
return
}
if _, podExists := volumeObj.scheduledPods[podName]; !podExists {
return
}
delete(
dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods,
podName)
if len(volumeObj.scheduledPods) == 0 {
delete(
dsw.nodesManaged[nodeName].volumesToAttach,
volumeName)
}
}
func (dsw *desiredStateOfWorld) NodeExists(nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
_, nodeExists := dsw.nodesManaged[nodeName]
return nodeExists
}
func (dsw *desiredStateOfWorld) VolumeExists(
volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if nodeExists {
if _, volumeExists := nodeObj.volumesToAttach[volumeName]; volumeExists {
return true
}
}
return false
}
func (dsw *desiredStateOfWorld) SetMultiAttachError(
volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if nodeExists {
if volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]; volumeExists {
volumeObj.multiAttachErrorReported = true
dsw.nodesManaged[nodeName].volumesToAttach[volumeName] = volumeObj
}
}
}
// GetKeepTerminatedPodVolumesForNode determines if node wants volumes to be
// mounted and attached for terminated pods
func (dsw *desiredStateOfWorld) GetKeepTerminatedPodVolumesForNode(nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
if nodeName == "" {
return false
}
if node, ok := dsw.nodesManaged[nodeName]; ok {
return node.keepTerminatedPodVolumes
}
return false
}
func (dsw *desiredStateOfWorld) GetVolumesToAttach() []VolumeToAttach {
dsw.RLock()
defer dsw.RUnlock()
volumesToAttach := make([]VolumeToAttach, 0 /* len */, len(dsw.nodesManaged) /* cap */)
for nodeName, nodeObj := range dsw.nodesManaged {
for volumeName, volumeObj := range nodeObj.volumesToAttach {
volumesToAttach = append(volumesToAttach,
VolumeToAttach{
VolumeToAttach: operationexecutor.VolumeToAttach{
MultiAttachErrorReported: volumeObj.multiAttachErrorReported,
VolumeName: volumeName,
VolumeSpec: volumeObj.spec,
NodeName: nodeName,
ScheduledPods: getPodsFromMap(volumeObj.scheduledPods),
}})
}
}
return volumesToAttach
}
// Construct a list of v1.Pod objects from the given pod map
func getPodsFromMap(podMap map[types.UniquePodName]pod) []*v1.Pod {
pods := make([]*v1.Pod, 0, len(podMap))
for _, pod := range podMap {
pods = append(pods, pod.podObj)
}
return pods
}
func (dsw *desiredStateOfWorld) GetPodToAdd() map[types.UniquePodName]PodToAdd {
dsw.RLock()
defer dsw.RUnlock()
pods := make(map[types.UniquePodName]PodToAdd)
for nodeName, nodeObj := range dsw.nodesManaged {
for volumeName, volumeObj := range nodeObj.volumesToAttach {
for podUID, pod := range volumeObj.scheduledPods {
pods[podUID] = PodToAdd{
Pod: pod.podObj,
VolumeName: volumeName,
NodeName: nodeName,
}
}
}
}
return pods
}
func (dsw *desiredStateOfWorld) GetVolumePodsOnNodes(nodes []k8stypes.NodeName, volumeName v1.UniqueVolumeName) []*v1.Pod {
dsw.RLock()
defer dsw.RUnlock()
pods := []*v1.Pod{}
for _, nodeName := range nodes {
node, ok := dsw.nodesManaged[nodeName]
if !ok {
continue
}
volume, ok := node.volumesToAttach[volumeName]
if !ok {
continue
}
for _, pod := range volume.scheduledPods {
pods = append(pods, pod.podObj)
}
}
return pods
}
| pkg/controller/volume/attachdetach/cache/desired_state_of_world.go | 1 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.993480920791626,
0.2728038728237152,
0.00016226428851950914,
0.012145379558205605,
0.40155982971191406
] |
{
"id": 3,
"code_window": [
"\t// detach controller.\n",
"\t// If the node already exists this is a no-op.\n",
"\t// keepTerminatedPodVolumes is a property of the node that determines\n",
"\t// if for terminated pods volumes should be mounted and attached.\n",
"\tAddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool)\n",
"\n",
"\t// AddPod adds the given pod to the list of pods that reference the\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// if volumes should be mounted and attached for terminated pods.\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 49
} | // +build !ignore_autogenerated
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package podtolerationrestriction
import (
runtime "k8s.io/apimachinery/pkg/runtime"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Configuration) DeepCopyInto(out *Configuration) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Default != nil {
in, out := &in.Default, &out.Default
*out = make([]core.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Whitelist != nil {
in, out := &in.Whitelist, &out.Whitelist
*out = make([]core.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Configuration.
func (in *Configuration) DeepCopy() *Configuration {
if in == nil {
return nil
}
out := new(Configuration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Configuration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
| plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/zz_generated.deepcopy.go | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00017774835578165948,
0.00017111389024648815,
0.00016579878865741193,
0.00017039297381415963,
0.000004090538368473062
] |
{
"id": 3,
"code_window": [
"\t// detach controller.\n",
"\t// If the node already exists this is a no-op.\n",
"\t// keepTerminatedPodVolumes is a property of the node that determines\n",
"\t// if for terminated pods volumes should be mounted and attached.\n",
"\tAddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool)\n",
"\n",
"\t// AddPod adds the given pod to the list of pods that reference the\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// if volumes should be mounted and attached for terminated pods.\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 49
} | ignore:
- "output_tests/.*"
| vendor/github.com/json-iterator/go/.codecov.yml | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.0001763434847816825,
0.0001763434847816825,
0.0001763434847816825,
0.0001763434847816825,
0
] |
{
"id": 3,
"code_window": [
"\t// detach controller.\n",
"\t// If the node already exists this is a no-op.\n",
"\t// keepTerminatedPodVolumes is a property of the node that determines\n",
"\t// if for terminated pods volumes should be mounted and attached.\n",
"\tAddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool)\n",
"\n",
"\t// AddPod adds the given pod to the list of pods that reference the\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// if volumes should be mounted and attached for terminated pods.\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 49
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["storage.go"],
importpath = "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased",
deps = [
"//pkg/apis/core/helper:go_default_library",
"//pkg/apis/rbac:go_default_library",
"//pkg/registry/rbac:go_default_library",
"//pkg/registry/rbac/validation:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| pkg/registry/rbac/clusterrole/policybased/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00017644642503000796,
0.00017496399232186377,
0.00017316675803158432,
0.00017512138583697379,
0.0000012667371720453957
] |
{
"id": 4,
"code_window": [
"// The volumeToAttach object represents a volume that should be attached to a node.\n",
"type volumeToAttach struct {\n",
"\t// multiAttachErrorReported indicates whether the multi-attach error has been reported for the given volume.\n",
"\t// It is used to to prevent reporting the error from being reported more than once for a given volume.\n",
"\tmultiAttachErrorReported bool\n",
"\n",
"\t// volumeName contains the unique identifier for this volume.\n",
"\tvolumeName v1.UniqueVolumeName\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// It is used to prevent reporting the error from being reported more than once for a given volume.\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 169
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the attach/detach controller
to keep track of volumes, the nodes they are attached to, and the pods that
reference them.
*/
package cache
import (
"fmt"
"sync"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// DesiredStateOfWorld defines a set of thread-safe operations supported on
// the attach/detach controller's desired state of the world cache.
// This cache contains nodes->volumes->pods where nodes are all the nodes
// managed by the attach/detach controller, volumes are all the volumes that
// should be attached to the specified node, and pods are the pods that
// reference the volume and are scheduled to that node.
// Note: This is distinct from the DesiredStateOfWorld implemented by the
// kubelet volume manager. The both keep track of different objects. This
// contains attach/detach controller specific state.
type DesiredStateOfWorld interface {
// AddNode adds the given node to the list of nodes managed by the attach/
// detach controller.
// If the node already exists this is a no-op.
// keepTerminatedPodVolumes is a property of the node that determines
// if for terminated pods volumes should be mounted and attached.
AddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool)
// AddPod adds the given pod to the list of pods that reference the
// specified volume and is scheduled to the specified node.
// A unique volumeName is generated from the volumeSpec and returned on
// success.
// If the pod already exists under the specified volume, this is a no-op.
// If volumeSpec is not an attachable volume plugin, an error is returned.
// If no volume with the name volumeName exists in the list of volumes that
// should be attached to the specified node, the volume is implicitly added.
// If no node with the name nodeName exists in list of nodes managed by the
// attach/detach attached controller, an error is returned.
AddPod(podName types.UniquePodName, pod *v1.Pod, volumeSpec *volume.Spec, nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error)
// DeleteNode removes the given node from the list of nodes managed by the
// attach/detach controller.
// If the node does not exist this is a no-op.
// If the node exists but has 1 or more child volumes, an error is returned.
DeleteNode(nodeName k8stypes.NodeName) error
// DeletePod removes the given pod from the list of pods that reference the
// specified volume and are scheduled to the specified node.
// If no pod exists in the list of pods that reference the specified volume
// and are scheduled to the specified node, this is a no-op.
// If a node with the name nodeName does not exist in the list of nodes
// managed by the attach/detach attached controller, this is a no-op.
// If no volume with the name volumeName exists in the list of managed
// volumes under the specified node, this is a no-op.
// If after deleting the pod, the specified volume contains no other child
// pods, the volume is also deleted.
DeletePod(podName types.UniquePodName, volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName)
// NodeExists returns true if the node with the specified name exists in
// the list of nodes managed by the attach/detach controller.
NodeExists(nodeName k8stypes.NodeName) bool
// VolumeExists returns true if the volume with the specified name exists
// in the list of volumes that should be attached to the specified node by
// the attach detach controller.
VolumeExists(volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool
// GetVolumesToAttach generates and returns a list of volumes to attach
// and the nodes they should be attached to based on the current desired
// state of the world.
GetVolumesToAttach() []VolumeToAttach
// GetPodToAdd generates and returns a map of pods based on the current desired
// state of world
GetPodToAdd() map[types.UniquePodName]PodToAdd
// GetKeepTerminatedPodVolumesForNode determines if node wants volumes to be
// mounted and attached for terminated pods
GetKeepTerminatedPodVolumesForNode(k8stypes.NodeName) bool
// Mark multiattach error as reported to prevent spamming multiple
// events for same error
SetMultiAttachError(v1.UniqueVolumeName, k8stypes.NodeName)
// GetPodsOnNodes returns list of pods ("namespace/name") that require
// given volume on given nodes.
GetVolumePodsOnNodes(nodes []k8stypes.NodeName, volumeName v1.UniqueVolumeName) []*v1.Pod
}
// VolumeToAttach represents a volume that should be attached to a node.
type VolumeToAttach struct {
operationexecutor.VolumeToAttach
}
// PodToAdd represents a pod that references the underlying volume and is
// scheduled to the underlying node.
type PodToAdd struct {
// pod contains the api object of pod
Pod *v1.Pod
// volumeName contains the unique identifier for this volume.
VolumeName v1.UniqueVolumeName
// nodeName contains the name of this node.
NodeName k8stypes.NodeName
}
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld {
return &desiredStateOfWorld{
nodesManaged: make(map[k8stypes.NodeName]nodeManaged),
volumePluginMgr: volumePluginMgr,
}
}
type desiredStateOfWorld struct {
// nodesManaged is a map containing the set of nodes managed by the attach/
// detach controller. The key in this map is the name of the node and the
// value is a node object containing more information about the node.
nodesManaged map[k8stypes.NodeName]nodeManaged
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
sync.RWMutex
}
// nodeManaged represents a node that is being managed by the attach/detach
// controller.
type nodeManaged struct {
// nodeName contains the name of this node.
nodeName k8stypes.NodeName
// volumesToAttach is a map containing the set of volumes that should be
// attached to this node. The key in the map is the name of the volume and
// the value is a volumeToAttach object containing more information about the volume.
volumesToAttach map[v1.UniqueVolumeName]volumeToAttach
// keepTerminatedPodVolumes determines if for terminated pods(on this node) - volumes
// should be kept mounted and attached.
keepTerminatedPodVolumes bool
}
// The volumeToAttach object represents a volume that should be attached to a node.
type volumeToAttach struct {
// multiAttachErrorReported indicates whether the multi-attach error has been reported for the given volume.
// It is used to to prevent reporting the error from being reported more than once for a given volume.
multiAttachErrorReported bool
// volumeName contains the unique identifier for this volume.
volumeName v1.UniqueVolumeName
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to attach/detach
// methods.
spec *volume.Spec
// scheduledPods is a map containing the set of pods that reference this
// volume and are scheduled to the underlying node. The key in the map is
// the name of the pod and the value is a pod object containing more
// information about the pod.
scheduledPods map[types.UniquePodName]pod
}
// The pod represents a pod that references the underlying volume and is
// scheduled to the underlying node.
type pod struct {
// podName contains the unique identifier for this pod
podName types.UniquePodName
// pod object contains the api object of pod
podObj *v1.Pod
}
func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool) {
dsw.Lock()
defer dsw.Unlock()
if _, nodeExists := dsw.nodesManaged[nodeName]; !nodeExists {
dsw.nodesManaged[nodeName] = nodeManaged{
nodeName: nodeName,
volumesToAttach: make(map[v1.UniqueVolumeName]volumeToAttach),
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
}
}
}
func (dsw *desiredStateOfWorld) AddPod(
podName types.UniquePodName,
podToAdd *v1.Pod,
volumeSpec *volume.Spec,
nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return "", fmt.Errorf(
"no node with the name %q exists in the list of managed nodes",
nodeName)
}
attachableVolumePlugin, err := dsw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || attachableVolumePlugin == nil {
return "", fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
attachableVolumePlugin, volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]
if !volumeExists {
volumeObj = volumeToAttach{
multiAttachErrorReported: false,
volumeName: volumeName,
spec: volumeSpec,
scheduledPods: make(map[types.UniquePodName]pod),
}
dsw.nodesManaged[nodeName].volumesToAttach[volumeName] = volumeObj
}
if _, podExists := volumeObj.scheduledPods[podName]; !podExists {
dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods[podName] =
pod{
podName: podName,
podObj: podToAdd,
}
}
return volumeName, nil
}
func (dsw *desiredStateOfWorld) DeleteNode(nodeName k8stypes.NodeName) error {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return nil
}
if len(nodeObj.volumesToAttach) > 0 {
return fmt.Errorf(
"failed to delete node %q from list of nodes managed by attach/detach controller--the node still contains %v volumes in its list of volumes to attach",
nodeName,
len(nodeObj.volumesToAttach))
}
delete(
dsw.nodesManaged,
nodeName)
return nil
}
func (dsw *desiredStateOfWorld) DeletePod(
podName types.UniquePodName,
volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return
}
volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]
if !volumeExists {
return
}
if _, podExists := volumeObj.scheduledPods[podName]; !podExists {
return
}
delete(
dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods,
podName)
if len(volumeObj.scheduledPods) == 0 {
delete(
dsw.nodesManaged[nodeName].volumesToAttach,
volumeName)
}
}
func (dsw *desiredStateOfWorld) NodeExists(nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
_, nodeExists := dsw.nodesManaged[nodeName]
return nodeExists
}
func (dsw *desiredStateOfWorld) VolumeExists(
volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if nodeExists {
if _, volumeExists := nodeObj.volumesToAttach[volumeName]; volumeExists {
return true
}
}
return false
}
func (dsw *desiredStateOfWorld) SetMultiAttachError(
volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if nodeExists {
if volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]; volumeExists {
volumeObj.multiAttachErrorReported = true
dsw.nodesManaged[nodeName].volumesToAttach[volumeName] = volumeObj
}
}
}
// GetKeepTerminatedPodVolumesForNode determines if node wants volumes to be
// mounted and attached for terminated pods
func (dsw *desiredStateOfWorld) GetKeepTerminatedPodVolumesForNode(nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
if nodeName == "" {
return false
}
if node, ok := dsw.nodesManaged[nodeName]; ok {
return node.keepTerminatedPodVolumes
}
return false
}
func (dsw *desiredStateOfWorld) GetVolumesToAttach() []VolumeToAttach {
dsw.RLock()
defer dsw.RUnlock()
volumesToAttach := make([]VolumeToAttach, 0 /* len */, len(dsw.nodesManaged) /* cap */)
for nodeName, nodeObj := range dsw.nodesManaged {
for volumeName, volumeObj := range nodeObj.volumesToAttach {
volumesToAttach = append(volumesToAttach,
VolumeToAttach{
VolumeToAttach: operationexecutor.VolumeToAttach{
MultiAttachErrorReported: volumeObj.multiAttachErrorReported,
VolumeName: volumeName,
VolumeSpec: volumeObj.spec,
NodeName: nodeName,
ScheduledPods: getPodsFromMap(volumeObj.scheduledPods),
}})
}
}
return volumesToAttach
}
// Construct a list of v1.Pod objects from the given pod map
func getPodsFromMap(podMap map[types.UniquePodName]pod) []*v1.Pod {
pods := make([]*v1.Pod, 0, len(podMap))
for _, pod := range podMap {
pods = append(pods, pod.podObj)
}
return pods
}
func (dsw *desiredStateOfWorld) GetPodToAdd() map[types.UniquePodName]PodToAdd {
dsw.RLock()
defer dsw.RUnlock()
pods := make(map[types.UniquePodName]PodToAdd)
for nodeName, nodeObj := range dsw.nodesManaged {
for volumeName, volumeObj := range nodeObj.volumesToAttach {
for podUID, pod := range volumeObj.scheduledPods {
pods[podUID] = PodToAdd{
Pod: pod.podObj,
VolumeName: volumeName,
NodeName: nodeName,
}
}
}
}
return pods
}
func (dsw *desiredStateOfWorld) GetVolumePodsOnNodes(nodes []k8stypes.NodeName, volumeName v1.UniqueVolumeName) []*v1.Pod {
dsw.RLock()
defer dsw.RUnlock()
pods := []*v1.Pod{}
for _, nodeName := range nodes {
node, ok := dsw.nodesManaged[nodeName]
if !ok {
continue
}
volume, ok := node.volumesToAttach[volumeName]
if !ok {
continue
}
for _, pod := range volume.scheduledPods {
pods = append(pods, pod.podObj)
}
}
return pods
}
| pkg/controller/volume/attachdetach/cache/desired_state_of_world.go | 1 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.9990203380584717,
0.44592511653900146,
0.00016252690693363547,
0.035261157900094986,
0.48115819692611694
] |
{
"id": 4,
"code_window": [
"// The volumeToAttach object represents a volume that should be attached to a node.\n",
"type volumeToAttach struct {\n",
"\t// multiAttachErrorReported indicates whether the multi-attach error has been reported for the given volume.\n",
"\t// It is used to to prevent reporting the error from being reported more than once for a given volume.\n",
"\tmultiAttachErrorReported bool\n",
"\n",
"\t// volumeName contains the unique identifier for this volume.\n",
"\tvolumeName v1.UniqueVolumeName\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// It is used to prevent reporting the error from being reported more than once for a given volume.\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 169
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["dedent.go"],
importpath = "github.com/renstrom/dedent",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| vendor/github.com/renstrom/dedent/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00017487634613644332,
0.00017215228581335396,
0.00016748113557696342,
0.00017409937572665513,
0.000003318197741464246
] |
{
"id": 4,
"code_window": [
"// The volumeToAttach object represents a volume that should be attached to a node.\n",
"type volumeToAttach struct {\n",
"\t// multiAttachErrorReported indicates whether the multi-attach error has been reported for the given volume.\n",
"\t// It is used to to prevent reporting the error from being reported more than once for a given volume.\n",
"\tmultiAttachErrorReported bool\n",
"\n",
"\t// volumeName contains the unique identifier for this volume.\n",
"\tvolumeName v1.UniqueVolumeName\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// It is used to prevent reporting the error from being reported more than once for a given volume.\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 169
} | reviewers:
- gmarek
- shyamjvs
- wojtek-t
approvers:
- gmarek
- shyamjvs
- wojtek-t
| test/kubemark/OWNERS | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00017021011444739997,
0.00017021011444739997,
0.00017021011444739997,
0.00017021011444739997,
0
] |
{
"id": 4,
"code_window": [
"// The volumeToAttach object represents a volume that should be attached to a node.\n",
"type volumeToAttach struct {\n",
"\t// multiAttachErrorReported indicates whether the multi-attach error has been reported for the given volume.\n",
"\t// It is used to to prevent reporting the error from being reported more than once for a given volume.\n",
"\tmultiAttachErrorReported bool\n",
"\n",
"\t// volumeName contains the unique identifier for this volume.\n",
"\tvolumeName v1.UniqueVolumeName\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// It is used to prevent reporting the error from being reported more than once for a given volume.\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 169
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topology
import (
"reflect"
"testing"
cadvisorapi "github.com/google/cadvisor/info/v1"
)
func Test_Discover(t *testing.T) {
tests := []struct {
name string
args *cadvisorapi.MachineInfo
want *CPUTopology
wantErr bool
}{
{
name: "FailNumCores",
args: &cadvisorapi.MachineInfo{
NumCores: 0,
},
want: &CPUTopology{},
wantErr: true,
},
{
name: "OneSocketHT",
args: &cadvisorapi.MachineInfo{
NumCores: 8,
Topology: []cadvisorapi.Node{
{Id: 0,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{0, 4}},
{Id: 1, Threads: []int{1, 5}},
{Id: 2, Threads: []int{2, 6}},
{Id: 3, Threads: []int{3, 7}},
},
},
},
},
want: &CPUTopology{
NumCPUs: 8,
NumSockets: 1,
NumCores: 4,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0},
1: {CoreID: 1, SocketID: 0},
2: {CoreID: 2, SocketID: 0},
3: {CoreID: 3, SocketID: 0},
4: {CoreID: 0, SocketID: 0},
5: {CoreID: 1, SocketID: 0},
6: {CoreID: 2, SocketID: 0},
7: {CoreID: 3, SocketID: 0},
},
},
wantErr: false,
},
{
name: "DualSocketNoHT",
args: &cadvisorapi.MachineInfo{
NumCores: 4,
Topology: []cadvisorapi.Node{
{Id: 0,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{0}},
{Id: 2, Threads: []int{2}},
},
},
{Id: 1,
Cores: []cadvisorapi.Core{
{Id: 1, Threads: []int{1}},
{Id: 3, Threads: []int{3}},
},
},
},
},
want: &CPUTopology{
NumCPUs: 4,
NumSockets: 2,
NumCores: 4,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0},
1: {CoreID: 1, SocketID: 1},
2: {CoreID: 2, SocketID: 0},
3: {CoreID: 3, SocketID: 1},
},
},
wantErr: false,
},
{
name: "DualSocketHT - non unique Core'ID's",
args: &cadvisorapi.MachineInfo{
NumCores: 12,
Topology: []cadvisorapi.Node{
{Id: 0,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{0, 6}},
{Id: 1, Threads: []int{1, 7}},
{Id: 2, Threads: []int{2, 8}},
},
},
{Id: 1,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{3, 9}},
{Id: 1, Threads: []int{4, 10}},
{Id: 2, Threads: []int{5, 11}},
},
},
},
},
want: &CPUTopology{
NumCPUs: 12,
NumSockets: 2,
NumCores: 6,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0},
1: {CoreID: 1, SocketID: 0},
2: {CoreID: 2, SocketID: 0},
3: {CoreID: 3, SocketID: 1},
4: {CoreID: 4, SocketID: 1},
5: {CoreID: 5, SocketID: 1},
6: {CoreID: 0, SocketID: 0},
7: {CoreID: 1, SocketID: 0},
8: {CoreID: 2, SocketID: 0},
9: {CoreID: 3, SocketID: 1},
10: {CoreID: 4, SocketID: 1},
11: {CoreID: 5, SocketID: 1},
},
},
wantErr: false,
},
{
name: "OneSocketHT fail",
args: &cadvisorapi.MachineInfo{
NumCores: 8,
Topology: []cadvisorapi.Node{
{Id: 0,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{0, 4}},
{Id: 1, Threads: []int{1, 5}},
{Id: 2, Threads: []int{2, 2}}, // Wrong case - should fail here
{Id: 3, Threads: []int{3, 7}},
},
},
},
},
want: &CPUTopology{},
wantErr: true,
},
{
name: "OneSocketHT fail",
args: &cadvisorapi.MachineInfo{
NumCores: 8,
Topology: []cadvisorapi.Node{
{Id: 0,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{0, 4}},
{Id: 1, Threads: []int{1, 5}},
{Id: 2, Threads: []int{2, 6}},
{Id: 3, Threads: []int{}}, // Wrong case - should fail here
},
},
},
},
want: &CPUTopology{},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := Discover(tt.args)
if err != nil {
if tt.wantErr {
t.Logf("Discover() expected error = %v", err)
} else {
t.Errorf("Discover() error = %v, wantErr %v", err, tt.wantErr)
}
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("Discover() = %v, want %v", got, tt.want)
}
})
}
}
| pkg/kubelet/cm/cpumanager/topology/topology_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00021011936769355088,
0.00017162862059194595,
0.00016650903853587806,
0.0001692891528364271,
0.000009031963600136805
] |
{
"id": 5,
"code_window": [
"\n",
"\tvolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(\n",
"\t\tattachableVolumePlugin, volumeSpec)\n",
"\tif err != nil {\n",
"\t\treturn \"\", fmt.Errorf(\n",
"\t\t\t\"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v\",\n",
"\t\t\tvolumeSpec.Name(),\n",
"\t\t\terr)\n",
"\t}\n",
"\n",
"\tvolumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"failed to get UniqueVolumeName from volumeSpec for plugin=%q and volume=%q err=%v\",\n",
"\t\t\tattachableVolumePlugin.GetPluginName(),\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 237
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package cache implements data structures used by the attach/detach controller
to keep track of volumes, the nodes they are attached to, and the pods that
reference them.
*/
package cache
import (
"fmt"
"sync"
"k8s.io/api/core/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// DesiredStateOfWorld defines a set of thread-safe operations supported on
// the attach/detach controller's desired state of the world cache.
// This cache contains nodes->volumes->pods where nodes are all the nodes
// managed by the attach/detach controller, volumes are all the volumes that
// should be attached to the specified node, and pods are the pods that
// reference the volume and are scheduled to that node.
// Note: This is distinct from the DesiredStateOfWorld implemented by the
// kubelet volume manager. The both keep track of different objects. This
// contains attach/detach controller specific state.
type DesiredStateOfWorld interface {
// AddNode adds the given node to the list of nodes managed by the attach/
// detach controller.
// If the node already exists this is a no-op.
// keepTerminatedPodVolumes is a property of the node that determines
// if for terminated pods volumes should be mounted and attached.
AddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool)
// AddPod adds the given pod to the list of pods that reference the
// specified volume and is scheduled to the specified node.
// A unique volumeName is generated from the volumeSpec and returned on
// success.
// If the pod already exists under the specified volume, this is a no-op.
// If volumeSpec is not an attachable volume plugin, an error is returned.
// If no volume with the name volumeName exists in the list of volumes that
// should be attached to the specified node, the volume is implicitly added.
// If no node with the name nodeName exists in list of nodes managed by the
// attach/detach attached controller, an error is returned.
AddPod(podName types.UniquePodName, pod *v1.Pod, volumeSpec *volume.Spec, nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error)
// DeleteNode removes the given node from the list of nodes managed by the
// attach/detach controller.
// If the node does not exist this is a no-op.
// If the node exists but has 1 or more child volumes, an error is returned.
DeleteNode(nodeName k8stypes.NodeName) error
// DeletePod removes the given pod from the list of pods that reference the
// specified volume and are scheduled to the specified node.
// If no pod exists in the list of pods that reference the specified volume
// and are scheduled to the specified node, this is a no-op.
// If a node with the name nodeName does not exist in the list of nodes
// managed by the attach/detach attached controller, this is a no-op.
// If no volume with the name volumeName exists in the list of managed
// volumes under the specified node, this is a no-op.
// If after deleting the pod, the specified volume contains no other child
// pods, the volume is also deleted.
DeletePod(podName types.UniquePodName, volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName)
// NodeExists returns true if the node with the specified name exists in
// the list of nodes managed by the attach/detach controller.
NodeExists(nodeName k8stypes.NodeName) bool
// VolumeExists returns true if the volume with the specified name exists
// in the list of volumes that should be attached to the specified node by
// the attach detach controller.
VolumeExists(volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool
// GetVolumesToAttach generates and returns a list of volumes to attach
// and the nodes they should be attached to based on the current desired
// state of the world.
GetVolumesToAttach() []VolumeToAttach
// GetPodToAdd generates and returns a map of pods based on the current desired
// state of world
GetPodToAdd() map[types.UniquePodName]PodToAdd
// GetKeepTerminatedPodVolumesForNode determines if node wants volumes to be
// mounted and attached for terminated pods
GetKeepTerminatedPodVolumesForNode(k8stypes.NodeName) bool
// Mark multiattach error as reported to prevent spamming multiple
// events for same error
SetMultiAttachError(v1.UniqueVolumeName, k8stypes.NodeName)
// GetPodsOnNodes returns list of pods ("namespace/name") that require
// given volume on given nodes.
GetVolumePodsOnNodes(nodes []k8stypes.NodeName, volumeName v1.UniqueVolumeName) []*v1.Pod
}
// VolumeToAttach represents a volume that should be attached to a node.
type VolumeToAttach struct {
operationexecutor.VolumeToAttach
}
// PodToAdd represents a pod that references the underlying volume and is
// scheduled to the underlying node.
type PodToAdd struct {
// pod contains the api object of pod
Pod *v1.Pod
// volumeName contains the unique identifier for this volume.
VolumeName v1.UniqueVolumeName
// nodeName contains the name of this node.
NodeName k8stypes.NodeName
}
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld {
return &desiredStateOfWorld{
nodesManaged: make(map[k8stypes.NodeName]nodeManaged),
volumePluginMgr: volumePluginMgr,
}
}
type desiredStateOfWorld struct {
// nodesManaged is a map containing the set of nodes managed by the attach/
// detach controller. The key in this map is the name of the node and the
// value is a node object containing more information about the node.
nodesManaged map[k8stypes.NodeName]nodeManaged
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
sync.RWMutex
}
// nodeManaged represents a node that is being managed by the attach/detach
// controller.
type nodeManaged struct {
// nodeName contains the name of this node.
nodeName k8stypes.NodeName
// volumesToAttach is a map containing the set of volumes that should be
// attached to this node. The key in the map is the name of the volume and
// the value is a volumeToAttach object containing more information about the volume.
volumesToAttach map[v1.UniqueVolumeName]volumeToAttach
// keepTerminatedPodVolumes determines if for terminated pods(on this node) - volumes
// should be kept mounted and attached.
keepTerminatedPodVolumes bool
}
// The volumeToAttach object represents a volume that should be attached to a node.
type volumeToAttach struct {
// multiAttachErrorReported indicates whether the multi-attach error has been reported for the given volume.
// It is used to to prevent reporting the error from being reported more than once for a given volume.
multiAttachErrorReported bool
// volumeName contains the unique identifier for this volume.
volumeName v1.UniqueVolumeName
// spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to attach/detach
// methods.
spec *volume.Spec
// scheduledPods is a map containing the set of pods that reference this
// volume and are scheduled to the underlying node. The key in the map is
// the name of the pod and the value is a pod object containing more
// information about the pod.
scheduledPods map[types.UniquePodName]pod
}
// The pod represents a pod that references the underlying volume and is
// scheduled to the underlying node.
type pod struct {
// podName contains the unique identifier for this pod
podName types.UniquePodName
// pod object contains the api object of pod
podObj *v1.Pod
}
func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName, keepTerminatedPodVolumes bool) {
dsw.Lock()
defer dsw.Unlock()
if _, nodeExists := dsw.nodesManaged[nodeName]; !nodeExists {
dsw.nodesManaged[nodeName] = nodeManaged{
nodeName: nodeName,
volumesToAttach: make(map[v1.UniqueVolumeName]volumeToAttach),
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
}
}
}
func (dsw *desiredStateOfWorld) AddPod(
podName types.UniquePodName,
podToAdd *v1.Pod,
volumeSpec *volume.Spec,
nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return "", fmt.Errorf(
"no node with the name %q exists in the list of managed nodes",
nodeName)
}
attachableVolumePlugin, err := dsw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || attachableVolumePlugin == nil {
return "", fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeSpec.Name(),
err)
}
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
attachableVolumePlugin, volumeSpec)
if err != nil {
return "", fmt.Errorf(
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]
if !volumeExists {
volumeObj = volumeToAttach{
multiAttachErrorReported: false,
volumeName: volumeName,
spec: volumeSpec,
scheduledPods: make(map[types.UniquePodName]pod),
}
dsw.nodesManaged[nodeName].volumesToAttach[volumeName] = volumeObj
}
if _, podExists := volumeObj.scheduledPods[podName]; !podExists {
dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods[podName] =
pod{
podName: podName,
podObj: podToAdd,
}
}
return volumeName, nil
}
func (dsw *desiredStateOfWorld) DeleteNode(nodeName k8stypes.NodeName) error {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return nil
}
if len(nodeObj.volumesToAttach) > 0 {
return fmt.Errorf(
"failed to delete node %q from list of nodes managed by attach/detach controller--the node still contains %v volumes in its list of volumes to attach",
nodeName,
len(nodeObj.volumesToAttach))
}
delete(
dsw.nodesManaged,
nodeName)
return nil
}
func (dsw *desiredStateOfWorld) DeletePod(
podName types.UniquePodName,
volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if !nodeExists {
return
}
volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]
if !volumeExists {
return
}
if _, podExists := volumeObj.scheduledPods[podName]; !podExists {
return
}
delete(
dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods,
podName)
if len(volumeObj.scheduledPods) == 0 {
delete(
dsw.nodesManaged[nodeName].volumesToAttach,
volumeName)
}
}
func (dsw *desiredStateOfWorld) NodeExists(nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
_, nodeExists := dsw.nodesManaged[nodeName]
return nodeExists
}
func (dsw *desiredStateOfWorld) VolumeExists(
volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if nodeExists {
if _, volumeExists := nodeObj.volumesToAttach[volumeName]; volumeExists {
return true
}
}
return false
}
func (dsw *desiredStateOfWorld) SetMultiAttachError(
volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName) {
dsw.Lock()
defer dsw.Unlock()
nodeObj, nodeExists := dsw.nodesManaged[nodeName]
if nodeExists {
if volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]; volumeExists {
volumeObj.multiAttachErrorReported = true
dsw.nodesManaged[nodeName].volumesToAttach[volumeName] = volumeObj
}
}
}
// GetKeepTerminatedPodVolumesForNode determines if node wants volumes to be
// mounted and attached for terminated pods
func (dsw *desiredStateOfWorld) GetKeepTerminatedPodVolumesForNode(nodeName k8stypes.NodeName) bool {
dsw.RLock()
defer dsw.RUnlock()
if nodeName == "" {
return false
}
if node, ok := dsw.nodesManaged[nodeName]; ok {
return node.keepTerminatedPodVolumes
}
return false
}
func (dsw *desiredStateOfWorld) GetVolumesToAttach() []VolumeToAttach {
dsw.RLock()
defer dsw.RUnlock()
volumesToAttach := make([]VolumeToAttach, 0 /* len */, len(dsw.nodesManaged) /* cap */)
for nodeName, nodeObj := range dsw.nodesManaged {
for volumeName, volumeObj := range nodeObj.volumesToAttach {
volumesToAttach = append(volumesToAttach,
VolumeToAttach{
VolumeToAttach: operationexecutor.VolumeToAttach{
MultiAttachErrorReported: volumeObj.multiAttachErrorReported,
VolumeName: volumeName,
VolumeSpec: volumeObj.spec,
NodeName: nodeName,
ScheduledPods: getPodsFromMap(volumeObj.scheduledPods),
}})
}
}
return volumesToAttach
}
// Construct a list of v1.Pod objects from the given pod map
func getPodsFromMap(podMap map[types.UniquePodName]pod) []*v1.Pod {
pods := make([]*v1.Pod, 0, len(podMap))
for _, pod := range podMap {
pods = append(pods, pod.podObj)
}
return pods
}
func (dsw *desiredStateOfWorld) GetPodToAdd() map[types.UniquePodName]PodToAdd {
dsw.RLock()
defer dsw.RUnlock()
pods := make(map[types.UniquePodName]PodToAdd)
for nodeName, nodeObj := range dsw.nodesManaged {
for volumeName, volumeObj := range nodeObj.volumesToAttach {
for podUID, pod := range volumeObj.scheduledPods {
pods[podUID] = PodToAdd{
Pod: pod.podObj,
VolumeName: volumeName,
NodeName: nodeName,
}
}
}
}
return pods
}
func (dsw *desiredStateOfWorld) GetVolumePodsOnNodes(nodes []k8stypes.NodeName, volumeName v1.UniqueVolumeName) []*v1.Pod {
dsw.RLock()
defer dsw.RUnlock()
pods := []*v1.Pod{}
for _, nodeName := range nodes {
node, ok := dsw.nodesManaged[nodeName]
if !ok {
continue
}
volume, ok := node.volumesToAttach[volumeName]
if !ok {
continue
}
for _, pod := range volume.scheduledPods {
pods = append(pods, pod.podObj)
}
}
return pods
}
| pkg/controller/volume/attachdetach/cache/desired_state_of_world.go | 1 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.9986674785614014,
0.14737100899219513,
0.00016373395919799805,
0.00431592995300889,
0.33919280767440796
] |
{
"id": 5,
"code_window": [
"\n",
"\tvolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(\n",
"\t\tattachableVolumePlugin, volumeSpec)\n",
"\tif err != nil {\n",
"\t\treturn \"\", fmt.Errorf(\n",
"\t\t\t\"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v\",\n",
"\t\t\tvolumeSpec.Name(),\n",
"\t\t\terr)\n",
"\t}\n",
"\n",
"\tvolumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"failed to get UniqueVolumeName from volumeSpec for plugin=%q and volume=%q err=%v\",\n",
"\t\t\tattachableVolumePlugin.GetPluginName(),\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 237
} | /*
Copyright YEAR The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
| staging/src/k8s.io/apiextensions-apiserver/hack/boilerplate.go.txt | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00017968278552871197,
0.00017967814346775413,
0.0001796735159587115,
0.00017967814346775413,
4.63479077339457e-9
] |
{
"id": 5,
"code_window": [
"\n",
"\tvolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(\n",
"\t\tattachableVolumePlugin, volumeSpec)\n",
"\tif err != nil {\n",
"\t\treturn \"\", fmt.Errorf(\n",
"\t\t\t\"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v\",\n",
"\t\t\tvolumeSpec.Name(),\n",
"\t\t\terr)\n",
"\t}\n",
"\n",
"\tvolumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"failed to get UniqueVolumeName from volumeSpec for plugin=%q and volume=%q err=%v\",\n",
"\t\t\tattachableVolumePlugin.GetPluginName(),\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 237
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"position.go",
"token.go",
],
importpath = "github.com/hashicorp/hcl/json/token",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/hashicorp/hcl/hcl/token:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| vendor/github.com/hashicorp/hcl/json/token/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00017827487317845225,
0.0001776412973413244,
0.00017710277461446822,
0.00017754627333488315,
4.832027684642526e-7
] |
{
"id": 5,
"code_window": [
"\n",
"\tvolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(\n",
"\t\tattachableVolumePlugin, volumeSpec)\n",
"\tif err != nil {\n",
"\t\treturn \"\", fmt.Errorf(\n",
"\t\t\t\"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v\",\n",
"\t\t\tvolumeSpec.Name(),\n",
"\t\t\terr)\n",
"\t}\n",
"\n",
"\tvolumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"failed to get UniqueVolumeName from volumeSpec for plugin=%q and volume=%q err=%v\",\n",
"\t\t\tattachableVolumePlugin.GetPluginName(),\n"
],
"file_path": "pkg/controller/volume/attachdetach/cache/desired_state_of_world.go",
"type": "replace",
"edit_start_line_idx": 237
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package store
import (
"fmt"
"regexp"
)
const (
keyMaxLength = 250
keyCharFmt string = "[A-Za-z0-9]"
keyExtCharFmt string = "[-A-Za-z0-9_.]"
qualifiedKeyFmt string = "(" + keyCharFmt + keyExtCharFmt + "*)?" + keyCharFmt
)
var (
// Key must consist of alphanumeric characters, '-', '_' or '.', and must start
// and end with an alphanumeric character.
keyRegex = regexp.MustCompile("^" + qualifiedKeyFmt + "$")
// ErrKeyNotFound is the error returned if key is not found in Store.
ErrKeyNotFound = fmt.Errorf("key is not found")
)
// Store provides the interface for storing keyed data.
// Store must be thread-safe
type Store interface {
// key must contain one or more characters in [A-Za-z0-9]
// Write writes data with key.
Write(key string, data []byte) error
// Read retrieves data with key
// Read must return ErrKeyNotFound if key is not found.
Read(key string) ([]byte, error)
// Delete deletes data by key
// Delete must not return error if key does not exist
Delete(key string) error
// List lists all existing keys.
List() ([]string, error)
}
// ValidateKey returns an error if the given key does not meet the requirement
// of the key format and length.
func ValidateKey(key string) error {
if len(key) <= keyMaxLength && keyRegex.MatchString(key) {
return nil
}
return fmt.Errorf("invalid key: %q", key)
}
| pkg/kubelet/util/store/store.go | 0 | https://github.com/kubernetes/kubernetes/commit/2e0abfa29f5c61e35677f164ca9694f93da61689 | [
0.00017967575695365667,
0.0001720507862046361,
0.00016428896924480796,
0.00017199003195855767,
0.0000057389420362596866
] |
{
"id": 0,
"code_window": [
"\tif agg.order == partial {\n",
"\t\tif chunkSize > numInputRows || groupSize > chunkSize {\n",
"\t\t\treturn\n",
"\t\t}\n",
"\t}\n",
"\trng, _ := randutil.NewTestRand()\n",
"\tctx := context.Background()\n",
"\tevalCtx := eval.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())\n",
"\tdefer evalCtx.Stop(ctx)\n",
"\taggMemAcc := evalCtx.TestingMon.MakeBoundAccount()\n",
"\tdefer aggMemAcc.Close(ctx)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trng := randutil.NewTestRandWithSeed(17)\n"
],
"file_path": "pkg/sql/colexec/aggregators_test.go",
"type": "replace",
"edit_start_line_idx": 1000
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecbase"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/stretchr/testify/require"
)
type andOrTestCase struct {
tuples []colexectestutils.Tuple
expected []colexectestutils.Tuple
skipAllNullsInjection bool
}
var (
andTestCases []andOrTestCase
orTestCases []andOrTestCase
)
func init() {
andTestCases = []andOrTestCase{
// All variations of pairs separately first.
{
tuples: colexectestutils.Tuples{{false, true}},
expected: colexectestutils.Tuples{{false}},
},
{
tuples: colexectestutils.Tuples{{false, nil}},
expected: colexectestutils.Tuples{{false}},
},
{
tuples: colexectestutils.Tuples{{false, false}},
expected: colexectestutils.Tuples{{false}},
},
{
tuples: colexectestutils.Tuples{{true, true}},
expected: colexectestutils.Tuples{{true}},
},
{
tuples: colexectestutils.Tuples{{true, false}},
expected: colexectestutils.Tuples{{false}},
},
{
tuples: colexectestutils.Tuples{{true, nil}},
expected: colexectestutils.Tuples{{nil}},
// The case of {nil, nil} is explicitly tested below.
skipAllNullsInjection: true,
},
{
tuples: colexectestutils.Tuples{{nil, true}},
expected: colexectestutils.Tuples{{nil}},
// The case of {nil, nil} is explicitly tested below.
skipAllNullsInjection: true,
},
{
tuples: colexectestutils.Tuples{{nil, false}},
expected: colexectestutils.Tuples{{false}},
},
{
tuples: colexectestutils.Tuples{{nil, nil}},
expected: colexectestutils.Tuples{{nil}},
},
// Now all variations of pairs combined together to make sure that nothing
// funky going on with multiple tuples.
{
tuples: colexectestutils.Tuples{
{false, true}, {false, nil}, {false, false},
{true, true}, {true, false}, {true, nil},
{nil, true}, {nil, false}, {nil, nil},
},
expected: colexectestutils.Tuples{
{false}, {false}, {false},
{true}, {false}, {nil},
{nil}, {false}, {nil},
},
},
}
orTestCases = []andOrTestCase{
// All variations of pairs separately first.
{
tuples: colexectestutils.Tuples{{false, true}},
expected: colexectestutils.Tuples{{true}},
},
{
tuples: colexectestutils.Tuples{{false, nil}},
expected: colexectestutils.Tuples{{nil}},
// The case of {nil, nil} is explicitly tested below.
skipAllNullsInjection: true,
},
{
tuples: colexectestutils.Tuples{{false, false}},
expected: colexectestutils.Tuples{{false}},
},
{
tuples: colexectestutils.Tuples{{true, true}},
expected: colexectestutils.Tuples{{true}},
},
{
tuples: colexectestutils.Tuples{{true, false}},
expected: colexectestutils.Tuples{{true}},
},
{
tuples: colexectestutils.Tuples{{true, nil}},
expected: colexectestutils.Tuples{{true}},
},
{
tuples: colexectestutils.Tuples{{nil, true}},
expected: colexectestutils.Tuples{{true}},
},
{
tuples: colexectestutils.Tuples{{nil, false}},
expected: colexectestutils.Tuples{{nil}},
// The case of {nil, nil} is explicitly tested below.
skipAllNullsInjection: true,
},
{
tuples: colexectestutils.Tuples{{nil, nil}},
expected: colexectestutils.Tuples{{nil}},
},
// Now all variations of pairs combined together to make sure that nothing
// funky going on with multiple tuples.
{
tuples: colexectestutils.Tuples{
{false, true}, {false, nil}, {false, false},
{true, true}, {true, false}, {true, nil},
{nil, true}, {nil, false}, {nil, nil},
},
expected: colexectestutils.Tuples{
{true}, {nil}, {false},
{true}, {true}, {true},
{true}, {nil}, {nil},
},
},
}
}
func TestAndOrOps(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := eval.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Mon: evalCtx.TestingMon,
Cfg: &execinfra.ServerConfig{
Settings: st,
},
}
for _, test := range []struct {
operation string
cases []andOrTestCase
}{
{
operation: "AND",
cases: andTestCases,
},
{
operation: "OR",
cases: orTestCases,
},
} {
t.Run(test.operation, func(t *testing.T) {
for _, tc := range test.cases {
var runner colexectestutils.TestRunner
if tc.skipAllNullsInjection {
// We're omitting all nulls injection test. See comments for each such
// test case.
runner = colexectestutils.RunTestsWithoutAllNullsInjection
} else {
runner = colexectestutils.RunTestsWithTyps
}
runner(
t,
testAllocator,
[]colexectestutils.Tuples{tc.tuples},
[][]*types.T{{types.Bool, types.Bool}},
tc.expected,
colexectestutils.OrderedVerifier,
func(input []colexecop.Operator) (colexecop.Operator, error) {
projOp, err := colexectestutils.CreateTestProjectingOperator(
ctx, flowCtx, input[0], []*types.T{types.Bool, types.Bool},
fmt.Sprintf("@1 %s @2", test.operation), testMemAcc,
)
if err != nil {
return nil, err
}
// We will project out the first two columns in order
// to have test cases be less verbose.
return colexecbase.NewSimpleProjectOp(projOp, 3 /* numInputCols */, []uint32{2}), nil
})
}
})
}
}
func benchmarkLogicalProjOp(
b *testing.B, operation string, useSelectionVector bool, hasNulls bool,
) {
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := eval.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Mon: evalCtx.TestingMon,
Cfg: &execinfra.ServerConfig{
Settings: st,
},
}
rng, _ := randutil.NewTestRand()
batch := testAllocator.NewMemBatchWithMaxCapacity([]*types.T{types.Bool, types.Bool})
col1 := batch.ColVec(0).Bool()
col2 := batch.ColVec(0).Bool()
for i := 0; i < coldata.BatchSize(); i++ {
col1[i] = rng.Float64() < 0.5
col2[i] = rng.Float64() < 0.5
}
if hasNulls {
nulls1 := batch.ColVec(0).Nulls()
nulls2 := batch.ColVec(0).Nulls()
for i := 0; i < coldata.BatchSize(); i++ {
if rng.Float64() < 0.1 {
nulls1.SetNull(i)
}
if rng.Float64() < 0.1 {
nulls2.SetNull(i)
}
}
}
batch.SetLength(coldata.BatchSize())
if useSelectionVector {
batch.SetSelection(true)
sel := batch.Selection()
for i := 0; i < coldata.BatchSize(); i++ {
sel[i] = i
}
}
typs := []*types.T{types.Bool, types.Bool}
input := colexecop.NewRepeatableBatchSource(testAllocator, batch, typs)
logicalProjOp, err := colexectestutils.CreateTestProjectingOperator(
ctx, flowCtx, input, typs, fmt.Sprintf("@1 %s @2", operation), testMemAcc,
)
require.NoError(b, err)
logicalProjOp.Init(ctx)
b.SetBytes(int64(8 * coldata.BatchSize()))
for i := 0; i < b.N; i++ {
logicalProjOp.Next()
}
}
func BenchmarkLogicalProjOp(b *testing.B) {
for _, operation := range []string{"AND", "OR"} {
for _, useSel := range []bool{true, false} {
for _, hasNulls := range []bool{true, false} {
b.Run(fmt.Sprintf("%s,useSel=%t,hasNulls=%t", operation, useSel, hasNulls), func(b *testing.B) {
benchmarkLogicalProjOp(b, operation, useSel, hasNulls)
})
}
}
}
}
| pkg/sql/colexec/and_or_projection_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/ada419810cf861594978bc5e997e1be096a7d35e | [
0.9922055602073669,
0.10044915974140167,
0.00016587451682426035,
0.00017531892808619887,
0.2944004535675049
] |
{
"id": 0,
"code_window": [
"\tif agg.order == partial {\n",
"\t\tif chunkSize > numInputRows || groupSize > chunkSize {\n",
"\t\t\treturn\n",
"\t\t}\n",
"\t}\n",
"\trng, _ := randutil.NewTestRand()\n",
"\tctx := context.Background()\n",
"\tevalCtx := eval.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())\n",
"\tdefer evalCtx.Stop(ctx)\n",
"\taggMemAcc := evalCtx.TestingMon.MakeBoundAccount()\n",
"\tdefer aggMemAcc.Close(ctx)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trng := randutil.NewTestRandWithSeed(17)\n"
],
"file_path": "pkg/sql/colexec/aggregators_test.go",
"type": "replace",
"edit_start_line_idx": 1000
} | exec-ddl
CREATE TABLE kv (k INT PRIMARY KEY, v INT)
----
exec-ddl
CREATE TABLE abcd (
a INT,
b INT,
c INT,
d INT,
INDEX (a,b) STORING (c),
INDEX (c,b,a) STORING (d),
INDEX (d,c,a),
INDEX (d,c,b)
)
----
exec-ddl
CREATE TABLE xyz (
x INT PRIMARY KEY,
y INT,
z INT
)
----
placeholder-fast-path
SELECT * FROM kv WHERE k = $1
----
placeholder-scan kv
├── columns: k:1!null v:2
├── cardinality: [0 - 1]
├── has-placeholder
├── stats: [rows=1, distinct(1)=1, null(1)=0]
├── key: ()
├── fd: ()-->(1,2)
└── span
└── $1
placeholder-fast-path
SELECT * FROM kv WHERE k = $1 FOR UPDATE
----
placeholder-scan kv
├── columns: k:1!null v:2
├── locking: for-update
├── cardinality: [0 - 1]
├── volatile, has-placeholder
├── stats: [rows=1, distinct(1)=1, null(1)=0]
├── key: ()
├── fd: ()-->(1,2)
└── span
└── $1
placeholder-fast-path
SELECT k FROM kv WHERE k = $1
----
placeholder-scan kv
├── columns: k:1!null
├── cardinality: [0 - 1]
├── has-placeholder
├── stats: [rows=1, distinct(1)=1, null(1)=0]
├── key: ()
├── fd: ()-->(1)
└── span
└── $1
placeholder-fast-path
SELECT k FROM kv WHERE k IN ($1)
----
placeholder-scan kv
├── columns: k:1!null
├── cardinality: [0 - 1]
├── has-placeholder
├── stats: [rows=1, distinct(1)=1, null(1)=0]
├── key: ()
├── fd: ()-->(1)
└── span
└── $1
placeholder-fast-path
SELECT v FROM kv WHERE k = $1
----
placeholder-scan kv
├── columns: v:2
├── cardinality: [0 - 1]
├── has-placeholder
├── stats: [rows=1]
├── key: ()
├── fd: ()-->(2)
└── span
└── $1
# Fast path not available when we're projecting a new expression.
placeholder-fast-path
SELECT v+1 FROM kv WHERE k = $1
----
no fast path
# The fast path should not kick in because the estimated row count is too high.
placeholder-fast-path
SELECT a, b, c FROM abcd WHERE a=$1 AND b=$2
----
no fast path
# Now inject statistics so that the estimated row count is small.
exec-ddl
ALTER TABLE abcd INJECT STATISTICS '[
{
"columns": ["a"],
"created_at": "2018-05-01 1:00:00.00000+00:00",
"row_count": 10,
"distinct_count": 5
}
]'
----
# The fast path should now kick in.
placeholder-fast-path
SELECT a, b, c FROM abcd WHERE a=$1 AND b=$2
----
placeholder-scan abcd@abcd_a_b_idx
├── columns: a:1!null b:2!null c:3
├── has-placeholder
├── stats: [rows=1.1, distinct(1)=1.1, null(1)=0, distinct(2)=1, null(2)=0]
├── fd: ()-->(1,2)
└── span
├── $1
└── $2
placeholder-fast-path
SELECT a, b, c FROM abcd WHERE b=$1 AND a=$2
----
placeholder-scan abcd@abcd_a_b_idx
├── columns: a:1!null b:2!null c:3
├── has-placeholder
├── stats: [rows=1.1, distinct(1)=1.1, null(1)=0, distinct(2)=1, null(2)=0]
├── fd: ()-->(1,2)
└── span
├── $2
└── $1
# One constant value, one placeholder.
placeholder-fast-path
SELECT a, b, c FROM abcd WHERE a=0 AND b=$1
----
placeholder-scan abcd@abcd_a_b_idx
├── columns: a:1!null b:2!null c:3
├── has-placeholder
├── stats: [rows=0.666, distinct(1)=0.666, null(1)=0, distinct(2)=0.666, null(2)=0, distinct(1,2)=0.666, null(1,2)=0]
├── fd: ()-->(1,2)
└── span
├── 0
└── $1
placeholder-fast-path
SELECT a, b, c FROM abcd WHERE a=$1 AND b=0
----
placeholder-scan abcd@abcd_a_b_idx
├── columns: a:1!null b:2!null c:3
├── has-placeholder
├── stats: [rows=3.3, distinct(1)=3.3, null(1)=0, distinct(2)=1, null(2)=0]
├── fd: ()-->(1,2)
└── span
├── $1
└── 0
# Constant folding is allowed (for immutable operators).
placeholder-fast-path
SELECT a, b, c FROM abcd WHERE a=1+2 AND b=$1
----
placeholder-scan abcd@abcd_a_b_idx
├── columns: a:1!null b:2!null c:3
├── has-placeholder
├── stats: [rows=0.666, distinct(1)=0.666, null(1)=0, distinct(2)=0.666, null(2)=0, distinct(1,2)=0.666, null(1,2)=0]
├── fd: ()-->(1,2)
└── span
├── 3
└── $1
placeholder-fast-path
SELECT a, b, c FROM abcd WHERE a=fnv32a('foo') AND b=$1
----
placeholder-scan abcd@abcd_a_b_idx
├── columns: a:1!null b:2!null c:3
├── has-placeholder
├── stats: [rows=0.666, distinct(1)=0.666, null(1)=0, distinct(2)=0.666, null(2)=0, distinct(1,2)=0.666, null(1,2)=0]
├── fd: ()-->(1,2)
└── span
├── 2851307223
└── $1
# Fast path not available when value is not constant-foldable.
placeholder-fast-path
SELECT a, b, c FROM abcd WHERE a=now()::string::int AND b=$1
----
no fast path
placeholder-fast-path
SELECT a, b, c FROM abcd WHERE a=0 AND b=$1+1
----
no fast path
# Fast path not available when we have an ordering requirement.
placeholder-fast-path
SELECT a, b, c FROM abcd WHERE a=$1 AND b=$2 ORDER BY c
----
no fast path
# Fast path not available when we have a limit.
placeholder-fast-path
SELECT a, b, c FROM abcd WHERE a=$1 AND b=$2 LIMIT 1
----
no fast path
# Fast path not available when index is not covering.
placeholder-fast-path
SELECT a, b, c, d FROM abcd WHERE a=$1 AND b=$2
----
no fast path
# Fast path not available when two indexes are possible.
placeholder-fast-path
SELECT d FROM abcd WHERE d=$1 AND c=$2
----
no fast path
# Now we have only one covering index.
placeholder-fast-path
SELECT a, d FROM abcd WHERE d=$1 AND c=$2
----
placeholder-scan abcd@abcd_d_c_a_idx
├── columns: a:1 d:4!null
├── has-placeholder
├── stats: [rows=1.0989]
├── fd: ()-->(4)
└── span
├── $1
└── $2
exec-ddl
CREATE TABLE kj (
k INT PRIMARY KEY,
j JSON,
INVERTED INDEX(j)
)
----
# Verify that we don't incorrectly use an inverted index.
placeholder-fast-path
SELECT j FROM kj WHERE j = '{"foo": "bar"}'::JSON
----
no fast path
exec-ddl
CREATE TABLE partial1 (
k INT PRIMARY KEY,
a INT,
b INT,
c INT,
INDEX partial_ab(a, b) WHERE (c = 0),
INDEX cab(c, a, b),
INDEX pseudo_ab(a, b) WHERE (1 = 1)
)
----
# The fast path is conditional on having a small estimated row count. Inject
# statistics so that we don't have to worry about this aspect in tests.
exec-ddl
ALTER TABLE partial1 INJECT STATISTICS '[
{
"columns": ["k"],
"created_at": "2018-05-01 1:00:00.00000+00:00",
"row_count": 10,
"distinct_count": 10
}
]'
----
# Make sure the fast path doesn't choose the cab index, getting in the way of
# using partial_ab (which might be the better index when the placeholder is 0).
placeholder-fast-path
SELECT a, b FROM partial1 WHERE c = $1
----
no fast path
# Ok to ignore the partial index when the filters don't involve predicate
# columns; and, ok to use a pseudo-partial index.
placeholder-fast-path
SELECT a, b FROM partial1 WHERE a = $1
----
placeholder-scan partial1@pseudo_ab,partial
├── columns: a:2!null b:3
├── has-placeholder
├── stats: [rows=3.3, distinct(2)=1, null(2)=0]
├── fd: ()-->(2)
└── span
└── $1
# Regression test for #64765 - we cannot constrain both columns.
placeholder-fast-path
SELECT * FROM xyz WHERE x = $1 AND y = $2
----
no fast path
# Regression test for #81315. Do not use the placeholder fast path
# if the types do not match.
exec-ddl
CREATE TABLE t_dec (a DECIMAL NOT NULL PRIMARY KEY, b INT);
----
# TODO(rytaft): We may be able to use the placeholder fast path for
# this case if we add logic similar to UnifyComparisonTypes.
placeholder-fast-path
SELECT * FROM t_dec WHERE a = $1::INT8;
----
no fast path
placeholder-fast-path
SELECT * FROM t_dec WHERE a = $1;
----
placeholder-scan t_dec
├── columns: a:1!null b:2
├── cardinality: [0 - 1]
├── immutable, has-placeholder
├── stats: [rows=1, distinct(1)=1, null(1)=0]
├── key: ()
├── fd: ()-->(1,2)
└── span
└── $1
| pkg/sql/opt/xform/testdata/placeholder-fast-path/scan | 0 | https://github.com/cockroachdb/cockroach/commit/ada419810cf861594978bc5e997e1be096a7d35e | [
0.0002552511286921799,
0.00017967485473491251,
0.00017037185898516327,
0.00017602161096874624,
0.000017162445146823302
] |
{
"id": 0,
"code_window": [
"\tif agg.order == partial {\n",
"\t\tif chunkSize > numInputRows || groupSize > chunkSize {\n",
"\t\t\treturn\n",
"\t\t}\n",
"\t}\n",
"\trng, _ := randutil.NewTestRand()\n",
"\tctx := context.Background()\n",
"\tevalCtx := eval.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())\n",
"\tdefer evalCtx.Stop(ctx)\n",
"\taggMemAcc := evalCtx.TestingMon.MakeBoundAccount()\n",
"\tdefer aggMemAcc.Close(ctx)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trng := randutil.NewTestRandWithSeed(17)\n"
],
"file_path": "pkg/sql/colexec/aggregators_test.go",
"type": "replace",
"edit_start_line_idx": 1000
} | #! /usr/bin/env expect -f
source [file join [file dirname $argv0] common.tcl]
set certs_dir "/certs"
set home "/home/roach"
set ::env(COCKROACH_INSECURE) "false"
set ::env(COCKROACH_HOST) "localhost"
proc start_secure_server {argv certs_dir extra} {
report "BEGIN START SECURE SERVER"
system "$argv start-single-node --host=localhost --socket-dir=. --certs-dir=$certs_dir --pid-file=server_pid -s=path=logs/db --background $extra >>expect-cmd.log 2>&1;
$argv sql --certs-dir=$certs_dir -e 'select 1'"
report "END START SECURE SERVER"
}
start_secure_server $argv $certs_dir ""
spawn /bin/bash
send "PS1=':''/# '\r"
set prompt ":/# "
eexpect $prompt
send "$argv sql --no-line-editor --certs-dir=$certs_dir\r"
eexpect root@
start_test "Test setting password"
send "drop user if exists myuser;\r"
eexpect "DROP ROLE"
eexpect root@
eexpect "/defaultdb>"
# NB: the user cannot change their own password unless
# they have the createlogin and createrole options.
send "create user myuser with createrole createlogin;\r"
eexpect "CREATE ROLE"
eexpect root@
eexpect "/defaultdb>"
send "\\password myuser\r"
eexpect "Enter password: "
send "123\r"
eexpect "Enter it again: "
send "123\r"
eexpect "ALTER ROLE"
eexpect root@
# check SQL injection
send "\\password a;b\r"
eexpect "Enter password: "
send "123\r"
eexpect "Enter it again: "
send "123\r"
eexpect "ERROR: role/user \"a;b\" does not exist"
eexpect root@
send "\\password myuser\r"
eexpect "Enter password: "
send "123\r"
eexpect "Enter it again: "
send "124\r"
eexpect "passwords didn't match"
eexpect root@
eexpect "/defaultdb>"
send "\\q\r"
eexpect $prompt
end_test
start_test "Test connect to crdb with password"
send "$argv sql --no-line-editor --certs-dir=$certs_dir --user=myuser\r"
eexpect "Enter password:"
send "123\r"
eexpect myuser@
end_test
start_test "Test change own password"
send "\\password\r"
eexpect "Enter password: "
send "124\r"
eexpect "Enter it again: "
send "124\r"
eexpect "ALTER ROLE"
eexpect myuser@
end_test
send "\\q\r"
eexpect $prompt
start_test "Test connect to crdb with new own password"
send "$argv sql --no-line-editor --certs-dir=$certs_dir --user=myuser\r"
eexpect "Enter password:"
send "124\r"
eexpect myuser@
end_test
send "\\q\r"
eexpect $prompt
start_test "Log in with wrong password"
send "$argv sql --no-line-editor --certs-dir=$certs_dir --user=myuser\r"
eexpect "Enter password:"
send "125\r"
eexpect "password authentication failed"
eexpect $prompt
end_test
start_test "Log in using pgpass file"
system "echo 'localhost:*:*:myuser:124' > $home/.pgpass"
send "$argv sql --no-line-editor --certs-dir=$certs_dir --user=myuser\r"
eexpect myuser@
eexpect "defaultdb>"
send "\\q\r"
eexpect $prompt
system "rm $home/.pgpass"
end_test
start_test "Log in using custom pgpass file"
system "echo 'localhost:*:*:myuser:125' > $home/.pgpass"
system "echo 'localhost:*:*:myuser:124' > $home/my_pgpass"
send "export PGPASSFILE=$home/my_pgpass\r"
send "$argv sql --no-line-editor --certs-dir=$certs_dir --user=myuser\r"
eexpect myuser@
eexpect "defaultdb>"
send "\\q\r"
eexpect $prompt
system "rm $home/.pgpass"
system "rm $home/my_pgpass"
send "unset PGPASSFILE\r"
end_test
start_test "Log in using pgservicefile and custom pgpass"
send "export PGDATABASE=postgres\r"
system "echo 'localhost:*:*:myuser:124' > $home/my_pgpass"
system "echo '
# servicefile should override environment variables
\[myservice\]
host=localhost
port=26257
dbname=defaultdb
user=myuser
passfile=$home/my_pgpass
' > $home/.pg_service.conf"
send "$argv sql --no-line-editor --url='postgres://myuser@localhost?service=myservice&sslrootcert=$certs_dir/ca.crt'\r"
eexpect myuser@
eexpect "defaultdb>"
send "\\q\r"
send "unset PGDATABASE\r"
system "rm $home/.pg_service.conf"
system "rm $home/my_pgpass"
eexpect $prompt
end_test
start_test "Log in using custom pgservicefile with default root cert"
system "mkdir $home/.postgresql/ && cp $certs_dir/ca.crt $home/.postgresql/root.crt"
system "echo '
\[myservice\]
host=localhost
port=26257
dbname=postgres
user=myuser
password=124
' > $home/my_pg_service.conf"
send "$argv sql --url='postgres://myuser@localhost?service=myservice&servicefile=$home/my_pg_service.conf'\r"
eexpect myuser@
eexpect "postgres>"
send "\\q\r"
system "rm $home/my_pg_service.conf"
eexpect $prompt
end_test
send "exit 0\r"
eexpect eof
stop_server $argv
| pkg/cli/interactive_tests/test_password.tcl | 0 | https://github.com/cockroachdb/cockroach/commit/ada419810cf861594978bc5e997e1be096a7d35e | [
0.00017742611817084253,
0.0001734385296003893,
0.0001664171286392957,
0.00017468055011704564,
0.0000029943714707769686
] |
{
"id": 0,
"code_window": [
"\tif agg.order == partial {\n",
"\t\tif chunkSize > numInputRows || groupSize > chunkSize {\n",
"\t\t\treturn\n",
"\t\t}\n",
"\t}\n",
"\trng, _ := randutil.NewTestRand()\n",
"\tctx := context.Background()\n",
"\tevalCtx := eval.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())\n",
"\tdefer evalCtx.Stop(ctx)\n",
"\taggMemAcc := evalCtx.TestingMon.MakeBoundAccount()\n",
"\tdefer aggMemAcc.Close(ctx)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trng := randutil.NewTestRandWithSeed(17)\n"
],
"file_path": "pkg/sql/colexec/aggregators_test.go",
"type": "replace",
"edit_start_line_idx": 1000
} | load("//build/bazelutil/unused_checker:unused.bzl", "get_x_data")
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
load("//pkg/testutils/buildutil:buildutil.bzl", "disallowed_imports_test")
go_library(
name = "json",
srcs = [
"config.go",
"contains.go",
"contains_testers.go",
"encode.go",
"encoded.go",
"iterator.go",
"jentry.go",
"json.go",
"parser.go",
"random.go",
"tables.go",
],
importpath = "github.com/cockroachdb/cockroach/pkg/util/json",
visibility = ["//visibility:public"],
deps = [
"//pkg/geo",
"//pkg/geo/geopb",
"//pkg/keysbase",
"//pkg/sql/inverted",
"//pkg/sql/pgwire/pgcode",
"//pkg/sql/pgwire/pgerror",
"//pkg/util",
"//pkg/util/buildutil",
"//pkg/util/encoding",
"//pkg/util/intsets",
"//pkg/util/json/tokenizer",
"//pkg/util/randutil",
"//pkg/util/syncutil",
"//pkg/util/unique",
"@com_github_cockroachdb_apd_v3//:apd",
"@com_github_cockroachdb_errors//:errors",
],
)
go_test(
name = "json_test",
size = "small",
srcs = [
"encode_test.go",
"json_test.go",
],
args = ["-test.timeout=55s"],
data = glob(["testdata/**"]),
embed = [":json"],
deps = [
"//pkg/sql/inverted",
"//pkg/sql/pgwire/pgerror",
"//pkg/testutils/datapathutils",
"//pkg/util/encoding",
"//pkg/util/randutil",
"//pkg/util/timeutil",
"//pkg/util/unique",
"@com_github_cockroachdb_apd_v3//:apd",
"@com_github_cockroachdb_redact//:redact",
"@com_github_stretchr_testify//require",
],
)
disallowed_imports_test(
"json",
["//pkg/roachpb"],
)
get_x_data(name = "get_x_data")
| pkg/util/json/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/ada419810cf861594978bc5e997e1be096a7d35e | [
0.00017733528511598706,
0.00017575564561411738,
0.000174104847246781,
0.0001759322185534984,
9.603622856957372e-7
] |
{
"id": 1,
"code_window": [
"\t\tCfg: &execinfra.ServerConfig{\n",
"\t\t\tSettings: st,\n",
"\t\t},\n",
"\t}\n",
"\trng, _ := randutil.NewTestRand()\n",
"\n",
"\tbatch := testAllocator.NewMemBatchWithMaxCapacity([]*types.T{types.Bool, types.Bool})\n",
"\tcol1 := batch.ColVec(0).Bool()\n",
"\tcol2 := batch.ColVec(0).Bool()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trng := randutil.NewTestRandWithSeed(91)\n"
],
"file_path": "pkg/sql/colexec/and_or_projection_test.go",
"type": "replace",
"edit_start_line_idx": 236
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"fmt"
"math"
"testing"
"github.com/cockroachdb/apd/v3"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/col/coldatatestutils"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecagg"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/cockroach/pkg/util/timeofday"
"github.com/stretchr/testify/require"
)
type aggregatorTestCase struct {
name string
typs []*types.T
input colexectestutils.Tuples
groupCols []uint32
aggCols [][]uint32
aggFns []execinfrapb.AggregatorSpec_Func
expected colexectestutils.Tuples
constArguments [][]execinfrapb.Expression
// spec will be populated during init().
spec *execinfrapb.AggregatorSpec
aggDistinct []bool
aggFilter []int
unorderedInput bool
orderedCols []uint32
// convToDecimal will convert any float64s to apd.Decimals. If a string is
// encountered, a best effort is made to convert that string to an
// apd.Decimal.
convToDecimal bool
}
type ordering int64
const (
ordered ordering = iota
partial
unordered
)
// aggType is a helper struct that allows tests to test both the ordered and
// hash aggregators at the same time.
type aggType struct {
new func(context.Context, *colexecagg.NewAggregatorArgs) colexecop.ResettableOperator
name string
order ordering
}
var aggTypesWithPartial = []aggType{
{
// This is a wrapper around NewHashAggregator so its signature is
// compatible with NewOrderedAggregator.
new: func(ctx context.Context, args *colexecagg.NewAggregatorArgs) colexecop.ResettableOperator {
return NewHashAggregator(
ctx,
&colexecagg.NewHashAggregatorArgs{
NewAggregatorArgs: args,
HashTableAllocator: testAllocator,
OutputUnlimitedAllocator: testAllocator,
MaxOutputBatchMemSize: math.MaxInt64,
},
nil, /* newSpillingQueueArgs */
)
},
name: "hash",
order: unordered,
},
{
new: NewOrderedAggregator,
name: "ordered",
order: ordered,
},
{
// This is a wrapper around NewHashAggregator so its signature is
// compatible with NewOrderedAggregator.
new: func(ctx context.Context, args *colexecagg.NewAggregatorArgs) colexecop.ResettableOperator {
return NewHashAggregator(
ctx,
&colexecagg.NewHashAggregatorArgs{
NewAggregatorArgs: args,
HashTableAllocator: testAllocator,
OutputUnlimitedAllocator: testAllocator,
MaxOutputBatchMemSize: math.MaxInt64,
},
nil, /* newSpillingQueueArgs */
)
},
name: "hash-partial-order",
order: partial,
},
}
var aggTypes = aggTypesWithPartial[:2]
func (tc *aggregatorTestCase) init() error {
if tc.convToDecimal {
for _, tuples := range []colexectestutils.Tuples{tc.input, tc.expected} {
for _, tuple := range tuples {
for i, e := range tuple {
switch v := e.(type) {
case float64:
d := &apd.Decimal{}
d, err := d.SetFloat64(v)
if err != nil {
return err
}
tuple[i] = *d
case string:
d := &apd.Decimal{}
d, _, err := d.SetString(v)
if err != nil {
// If there was an error converting the string to decimal, just
// leave the datum as is.
continue
}
tuple[i] = *d
}
}
}
}
}
aggregations := make([]execinfrapb.AggregatorSpec_Aggregation, len(tc.aggFns))
for i, aggFn := range tc.aggFns {
aggregations[i].Func = aggFn
aggregations[i].ColIdx = tc.aggCols[i]
if tc.constArguments != nil {
aggregations[i].Arguments = tc.constArguments[i]
}
if tc.aggDistinct != nil {
aggregations[i].Distinct = tc.aggDistinct[i]
}
if tc.aggFilter != nil && tc.aggFilter[i] != tree.NoColumnIdx {
filterColIdx := uint32(tc.aggFilter[i])
aggregations[i].FilterColIdx = &filterColIdx
}
}
tc.spec = &execinfrapb.AggregatorSpec{
GroupCols: tc.groupCols,
Aggregations: aggregations,
}
if !tc.unorderedInput {
var outputOrderCols []uint32
if len(tc.orderedCols) == 0 {
outputOrderCols = tc.spec.GroupCols
} else {
outputOrderCols = tc.orderedCols
tc.spec.OrderedGroupCols = tc.orderedCols
}
// If input grouping columns have an ordering, then we'll require the
// output to also have the same ordering.
outputOrdering := execinfrapb.Ordering{Columns: make([]execinfrapb.Ordering_Column, len(outputOrderCols))}
for i, col := range outputOrderCols {
outputOrdering.Columns[i].ColIdx = col
}
tc.spec.OutputOrdering = outputOrdering
}
return nil
}
var aggregatorsTestCases = []aggregatorTestCase{
{
name: "OneTuple",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 1},
},
},
{
name: "OneGroup",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 1},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 2},
},
},
{
name: "MultiGroup",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 0},
{0, 1},
{1, 4},
{2, 5},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 2},
{1, 4},
{2, 5},
},
},
{
name: "CarryBetweenInputBatches",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 2},
{0, 3},
{1, 4},
{1, 5},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 6},
{1, 9},
},
},
{
name: "CarryBetweenOutputBatches",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 2},
{0, 3},
{0, 4},
{1, 5},
{2, 6},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 10},
{1, 5},
{2, 6},
},
},
{
name: "CarryBetweenInputAndOutputBatches",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 1},
{1, 2},
{2, 3},
{2, 3},
{3, 4},
{3, 4},
{4, 5},
{5, 6},
{6, 7},
{7, 8},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 2},
{1, 2},
{2, 6},
{3, 8},
{4, 5},
{5, 6},
{6, 7},
{7, 8},
},
},
{
name: "NoGroupingCols",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 2},
{0, 3},
{0, 4},
},
groupCols: []uint32{},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 10},
},
},
{
name: "UnorderedWithNullsInGroupingCol",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{nil, 1},
{4, 42},
{nil, 2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{nil, 3},
{4, 42},
},
unorderedInput: true,
},
{
name: "CountRows",
typs: types.OneIntCol,
input: colexectestutils.Tuples{
{1},
{2},
{1},
{nil},
{3},
{1},
{3},
{4},
{1},
{nil},
{2},
{4},
{2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
},
expected: colexectestutils.Tuples{
{nil, 2},
{1, 4},
{2, 3},
{3, 2},
{4, 2},
},
unorderedInput: true,
},
{
name: "OutputOrder",
typs: types.ThreeIntCols,
input: colexectestutils.Tuples{
{0, 1, 2},
{0, 1, 2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {2}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 4, 2},
},
},
{
name: "SumMultiType",
typs: []*types.T{types.Int, types.Int, types.Decimal},
input: colexectestutils.Tuples{
{0, 1, 1.3},
{0, 1, 1.6},
{0, 1, 0.5},
{1, 1, 1.2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {2}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Sum,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 3.4, 3},
{1, 1.2, 1},
},
convToDecimal: true,
},
{
name: "AvgSumSingleInputBatch",
typs: []*types.T{types.Int, types.Decimal},
input: colexectestutils.Tuples{
{0, 1.1},
{0, 1.2},
{0, 2.3},
{1, 6.21},
{1, 2.43},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Avg,
execinfrapb.Sum,
},
expected: colexectestutils.Tuples{
{0, "1.5333333333333333333", 4.6},
{1, "4.3200000000000000000", 8.64},
},
convToDecimal: true,
},
{
name: "BoolAndOrBatch",
typs: []*types.T{types.Int, types.Bool},
input: colexectestutils.Tuples{
{0, true},
{1, false},
{2, true},
{2, false},
{3, true},
{3, true},
{4, false},
{4, false},
{5, false},
{5, nil},
{6, nil},
{6, true},
{7, nil},
{7, false},
{7, true},
{8, nil},
{8, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.BoolAnd,
execinfrapb.BoolOr,
},
expected: colexectestutils.Tuples{
{0, true, true},
{1, false, false},
{2, false, true},
{3, true, true},
{4, false, false},
{5, false, false},
{6, true, true},
{7, false, true},
{8, nil, nil},
},
},
{
name: "MultiGroupColsWithPointerTypes",
typs: []*types.T{types.Int, types.Decimal, types.Bytes, types.Decimal},
input: colexectestutils.Tuples{
{2, 1.0, "1.0", 2.0},
{2, 1.0, "1.0", 4.0},
{2, 2.0, "2.0", 6.0},
},
groupCols: []uint32{0, 1, 2},
aggCols: [][]uint32{{0}, {1}, {2}, {3}, {3}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.AnyNotNull,
execinfrapb.AnyNotNull,
execinfrapb.Min,
execinfrapb.Sum,
},
expected: colexectestutils.Tuples{
{2, 1.0, "1.0", 2.0, 6.0},
{2, 2.0, "2.0", 6.0, 6.0},
},
},
{
name: "GroupOnTimeTZColumns",
typs: []*types.T{types.TimeTZ, types.Int},
input: colexectestutils.Tuples{
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 0), -1},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 1), 1},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 1), 2},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 2), 10},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 2), 11},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 3), 100},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 3), 101},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 4), 102},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 0), -1},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 1), 3},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 2), 21},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 3), 201},
{tree.NewDTimeTZFromOffset(timeofday.FromInt(0), 4), 102},
},
},
{
name: "AVG on all types",
typs: []*types.T{types.Int, types.Int2, types.Int4, types.Int, types.Decimal, types.Float, types.Interval},
input: colexectestutils.Tuples{
{0, nil, 1, 1, 1.0, 1.0, duration.MakeDuration(1, 1, 1)},
{0, 1, nil, 2, 2.0, 2.0, duration.MakeDuration(2, 2, 2)},
{0, 2, 2, nil, 3.0, 3.0, duration.MakeDuration(3, 3, 3)},
{0, 3, 3, 3, nil, 4.0, duration.MakeDuration(4, 4, 4)},
{0, 4, 4, 4, 4.0, nil, duration.MakeDuration(5, 5, 5)},
{0, 5, 5, 5, 5.0, 5.0, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {2}, {3}, {4}, {5}, {6}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Avg,
execinfrapb.Avg,
execinfrapb.Avg,
execinfrapb.Avg,
execinfrapb.Avg,
execinfrapb.Avg,
},
expected: colexectestutils.Tuples{
{0, 3.0, 3.0, 3.0, 3.0, 3.0, duration.MakeDuration(3, 3, 3)},
},
},
{
name: "ConcatAgg",
typs: []*types.T{types.Int, types.Bytes},
input: colexectestutils.Tuples{
{1, "1"},
{1, "2"},
{1, "3"},
{2, nil},
{2, "1"},
{2, "2"},
{3, "1"},
{3, nil},
{3, "2"},
{4, nil},
{4, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.ConcatAgg,
},
expected: colexectestutils.Tuples{
{1, "123"},
{2, "12"},
{3, "12"},
{4, nil},
},
},
{
name: "All",
typs: []*types.T{types.Int, types.Decimal, types.Int, types.Bool, types.Bytes},
input: colexectestutils.Tuples{
{0, 3.1, 2, true, "zero"},
{0, 1.1, 3, false, "zero"},
{1, 1.1, 1, false, "one"},
{1, 4.1, 0, false, "one"},
{2, 1.1, 1, true, "two"},
{3, 4.1, 0, false, "three"},
{3, 5.1, 0, true, "three"},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}, {1}, {1}, {1}, {2}, {2}, {2}, {3}, {3}, {4}, {4}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
execinfrapb.Avg,
execinfrapb.Count,
execinfrapb.Sum,
execinfrapb.SumInt,
execinfrapb.Min,
execinfrapb.Max,
execinfrapb.BoolAnd,
execinfrapb.BoolOr,
execinfrapb.AnyNotNull,
execinfrapb.ConcatAgg,
},
expected: colexectestutils.Tuples{
{0, 2, "2.1000000000000000000", 2, 4.2, 5, 2, 3, false, true, "zero", "zerozero"},
{1, 2, "2.6000000000000000000", 2, 5.2, 1, 0, 1, false, false, "one", "oneone"},
{2, 1, "1.1000000000000000000", 1, 1.1, 1, 1, 1, true, true, "two", "two"},
{3, 2, "4.6000000000000000000", 2, 9.2, 0, 0, 0, false, true, "three", "threethree"},
},
convToDecimal: true,
},
{
name: "NullHandling",
typs: []*types.T{types.Int, types.Decimal, types.Int, types.Bool, types.Bytes},
input: colexectestutils.Tuples{
{nil, 1.1, 4, true, "a"},
{0, nil, nil, nil, nil},
{0, 3.1, 5, nil, "b"},
{1, nil, nil, nil, nil},
{1, nil, nil, false, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}, {1}, {1}, {1}, {1}, {2}, {2}, {2}, {3}, {3}, {4}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
execinfrapb.AnyNotNull,
execinfrapb.Count,
execinfrapb.Sum,
execinfrapb.Avg,
execinfrapb.SumInt,
execinfrapb.Min,
execinfrapb.Max,
execinfrapb.BoolAnd,
execinfrapb.BoolOr,
execinfrapb.ConcatAgg,
},
expected: colexectestutils.Tuples{
{nil, 1, 1.1, 1, 1.1, "1.1000000000000000000", 4, 4, 4, true, true, "a"},
{0, 2, 3.1, 1, 3.1, "3.1000000000000000000", 5, 5, 5, nil, nil, "b"},
{1, 2, nil, 0, nil, nil, nil, nil, nil, false, false, nil},
},
convToDecimal: true,
},
{
name: "DistinctAggregation",
typs: types.TwoIntCols,
input: colexectestutils.Tuples{
{0, 1},
{0, 2},
{0, 2},
{0, nil},
{0, 1},
{0, nil},
{1, 1},
{1, 2},
{1, 2},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {1}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Count,
execinfrapb.Count,
execinfrapb.SumInt,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 4, 2, 6, 3},
{1, 3, 2, 5, 3},
},
aggDistinct: []bool{false, false, true, false, true},
},
{
name: "FilteringAggregation",
typs: []*types.T{types.Int, types.Int, types.Bool},
input: colexectestutils.Tuples{
{0, 1, false},
{0, 2, true},
{0, 2, true},
{0, nil, nil},
{0, 1, nil},
{0, nil, true},
{1, 1, true},
{1, 2, nil},
{1, 2, true},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 3, 4},
{1, 2, 3},
},
aggFilter: []int{tree.NoColumnIdx, 2, 2},
},
{
name: "AllGroupsFilteredOut",
typs: []*types.T{types.Int, types.Int, types.Bool},
input: colexectestutils.Tuples{
{0, 1, false},
{0, nil, nil},
{0, 2, false},
{1, 1, true},
{1, 2, nil},
{1, 2, true},
{2, 1, false},
{2, nil, nil},
{2, 2, nil},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.CountRows,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 0, nil},
{1, 2, 3},
{2, 0, nil},
},
aggFilter: []int{tree.NoColumnIdx, 2, 2},
},
{
name: "DistinctFilteringAggregation",
typs: []*types.T{types.Int, types.Int, types.Bool},
input: colexectestutils.Tuples{
{0, 1, false},
{0, 2, true},
{0, 2, true},
{0, nil, nil},
{0, 1, nil},
{0, nil, true},
{1, 1, true},
{1, 2, nil},
{1, 2, true},
},
groupCols: []uint32{0},
aggCols: [][]uint32{{0}, {1}, {1}, {1}, {1}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.AnyNotNull,
execinfrapb.Count,
execinfrapb.Count,
execinfrapb.Count,
execinfrapb.SumInt,
execinfrapb.SumInt,
execinfrapb.SumInt,
},
expected: colexectestutils.Tuples{
{0, 2, 2, 1, 4, 3, 2},
{1, 2, 2, 2, 3, 3, 3},
},
aggDistinct: []bool{false, false, true, true, false, true, true},
aggFilter: []int{tree.NoColumnIdx, 2, tree.NoColumnIdx, 2, 2, tree.NoColumnIdx, 2},
},
}
func init() {
for i := range aggregatorsTestCases {
if err := aggregatorsTestCases[i].init(); err != nil {
colexecerror.InternalError(err)
}
}
}
func TestAggregators(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
evalCtx := eval.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())
defer evalCtx.Stop(context.Background())
ctx := context.Background()
for _, tc := range aggregatorsTestCases {
constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(
ctx, &evalCtx, nil /* semaCtx */, tc.spec.Aggregations, tc.typs,
)
require.NoError(t, err)
for _, agg := range aggTypes {
if tc.unorderedInput && agg.order == ordered {
// This test case has unordered input, so we skip ordered
// aggregator.
continue
}
if agg.order == ordered && tc.aggFilter != nil {
// Filtering aggregation is only supported with hash aggregator.
continue
}
log.Infof(ctx, "%s/%s", tc.name, agg.name)
verifier := colexectestutils.OrderedVerifier
if tc.unorderedInput {
verifier = colexectestutils.UnorderedVerifier
}
colexectestutils.RunTestsWithTyps(t, testAllocator, []colexectestutils.Tuples{tc.input}, [][]*types.T{tc.typs}, tc.expected, verifier,
func(input []colexecop.Operator) (colexecop.Operator, error) {
return agg.new(ctx, &colexecagg.NewAggregatorArgs{
Allocator: testAllocator,
MemAccount: testMemAcc,
Input: input[0],
InputTypes: tc.typs,
Spec: tc.spec,
EvalCtx: &evalCtx,
Constructors: constructors,
ConstArguments: constArguments,
OutputTypes: outputTypes,
}), nil
})
}
}
}
func TestAggregatorRandom(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
evalCtx := eval.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())
defer evalCtx.Stop(context.Background())
// This test aggregates random inputs, keeping track of the expected results
// to make sure the aggregations are correct.
rng, _ := randutil.NewTestRand()
for _, groupSize := range []int{1, 2, coldata.BatchSize() / 4, coldata.BatchSize() / 2} {
if groupSize == 0 {
// We might be varying coldata.BatchSize() so that when it is divided by
// 4, groupSize is 0. We want to skip such configuration.
continue
}
for _, numInputBatches := range []int{1, 2, 64} {
for _, hasNulls := range []bool{true, false} {
for _, agg := range aggTypesWithPartial {
log.Infof(context.Background(), "%s/groupSize=%d/numInputBatches=%d/hasNulls=%t", agg.name, groupSize, numInputBatches, hasNulls)
nTuples := coldata.BatchSize() * numInputBatches
typs := []*types.T{types.Int, types.Float}
cols := []coldata.Vec{
testAllocator.NewMemColumn(typs[0], nTuples),
testAllocator.NewMemColumn(typs[1], nTuples),
}
if agg.order == partial {
typs = append(typs, types.Int)
cols = append(cols, testAllocator.NewMemColumn(typs[2], nTuples))
}
groups, aggCol, aggColNulls := cols[0].Int64(), cols[1].Float64(), cols[1].Nulls()
expectedTuples := colexectestutils.Tuples{}
var expRowCounts, expCounts []int64
var expSums, expMins, expMaxs []float64
// SUM, MIN, MAX, and AVG aggregators can output null.
var expNulls []bool
curGroup := -1
for i := range groups {
if i%groupSize == 0 {
if curGroup != -1 {
if expNulls[curGroup] {
expectedTuples = append(expectedTuples, colexectestutils.Tuple{
expRowCounts[curGroup], expCounts[curGroup], nil, nil, nil, nil,
})
} else {
expectedTuples = append(expectedTuples, colexectestutils.Tuple{
expRowCounts[curGroup], expCounts[curGroup], expSums[curGroup], expMins[curGroup], expMaxs[curGroup], expSums[curGroup] / float64(expCounts[curGroup]),
})
}
}
expRowCounts = append(expRowCounts, 0)
expCounts = append(expCounts, 0)
expSums = append(expSums, 0)
expMins = append(expMins, 2048)
expMaxs = append(expMaxs, -2048)
expNulls = append(expNulls, true)
curGroup++
}
// Keep the inputs small so they are a realistic size. Using a
// large range is not realistic and makes decimal operations
// slower.
aggCol[i] = 2048 * (rng.Float64() - 0.5)
// NULL values contribute to the row count, so we're updating
// the row counts outside of the if block.
expRowCounts[curGroup]++
if hasNulls && rng.Float64() < 0.1 {
aggColNulls.SetNull(i)
} else {
expNulls[curGroup] = false
expCounts[curGroup]++
expSums[curGroup] += aggCol[i]
expMins[curGroup] = min64(aggCol[i], expMins[curGroup])
expMaxs[curGroup] = max64(aggCol[i], expMaxs[curGroup])
}
groups[i] = int64(curGroup)
}
// Add result for last group.
if expNulls[curGroup] {
expectedTuples = append(expectedTuples, colexectestutils.Tuple{
expRowCounts[curGroup], expCounts[curGroup], nil, nil, nil, nil,
})
} else {
expectedTuples = append(expectedTuples, colexectestutils.Tuple{
expRowCounts[curGroup], expCounts[curGroup], expSums[curGroup], expMins[curGroup], expMaxs[curGroup], expSums[curGroup] / float64(expCounts[curGroup]),
})
}
source := colexectestutils.NewChunkingBatchSource(testAllocator, typs, cols, nTuples)
tc := aggregatorTestCase{
typs: typs,
groupCols: []uint32{0},
aggCols: [][]uint32{{}, {1}, {1}, {1}, {1}, {1}},
aggFns: []execinfrapb.AggregatorSpec_Func{
execinfrapb.CountRows,
execinfrapb.Count,
execinfrapb.Sum,
execinfrapb.Min,
execinfrapb.Max,
execinfrapb.Avg,
},
}
if agg.order == partial {
tc.groupCols = []uint32{0, 2}
tc.orderedCols = []uint32{0}
}
require.NoError(t, tc.init())
constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(
context.Background(), &evalCtx, nil /* semaCtx */, tc.spec.Aggregations, tc.typs,
)
require.NoError(t, err)
a := agg.new(context.Background(), &colexecagg.NewAggregatorArgs{
Allocator: testAllocator,
MemAccount: testMemAcc,
Input: source,
InputTypes: tc.typs,
Spec: tc.spec,
EvalCtx: &evalCtx,
Constructors: constructors,
ConstArguments: constArguments,
OutputTypes: outputTypes,
})
a.Init(context.Background())
testOutput := colexectestutils.NewOpTestOutput(a, expectedTuples)
if agg.order == ordered {
err = testOutput.Verify()
} else if agg.order == partial {
err = testOutput.VerifyPartialOrder()
} else {
err = testOutput.VerifyAnyOrder()
}
if err != nil {
t.Fatal(err)
}
}
}
}
}
}
// benchmarkAggregateFunction runs aggregator microbenchmarks. numGroupCol is
// the number of grouping columns. groupSize is the number of tuples to target
// in each distinct aggregation group. chunkSize is the number of tuples to
// target in each distinct partially ordered group column, and is intended for
// use with partial order. Limit is the number of rows to retrieve from the
// aggregation function before ending the microbenchmark.
func benchmarkAggregateFunction(
b *testing.B,
agg aggType,
aggFn execinfrapb.AggregatorSpec_Func,
aggInputTypes []*types.T,
numGroupCol int,
groupSize int,
distinctProb float64,
numInputRows int,
chunkSize int,
limit int,
) {
defer log.Scope(b).Close(b)
if groupSize > numInputRows {
// In this case all tuples will be part of the same group, and we have
// likely already benchmarked such scenario with this value of
// numInputRows, so we short-circuit.
return
}
if numGroupCol < 1 {
// We should always have at least one group column.
return
}
if agg.order == partial {
if chunkSize > numInputRows || groupSize > chunkSize {
return
}
}
rng, _ := randutil.NewTestRand()
ctx := context.Background()
evalCtx := eval.MakeTestingEvalContext(cluster.MakeTestingClusterSettings())
defer evalCtx.Stop(ctx)
aggMemAcc := evalCtx.TestingMon.MakeBoundAccount()
defer aggMemAcc.Close(ctx)
evalCtx.SingleDatumAggMemAccount = &aggMemAcc
const bytesFixedLength = 8
typs := []*types.T{types.Int}
groupCols := []uint32{0}
for g := 1; g < numGroupCol; g++ {
typs = append(typs, types.Int)
groupCols = append(groupCols, uint32(g))
}
typs = append(typs, aggInputTypes...)
cols := make([]coldata.Vec, len(typs))
for i := range typs {
cols[i] = testAllocator.NewMemColumn(typs[i], numInputRows)
}
groups := cols[0].Int64()
if agg.order == ordered {
curGroup := -1
for i := 0; i < numInputRows; i++ {
if i%groupSize == 0 {
curGroup++
}
groups[i] = int64(curGroup)
}
} else if agg.order == unordered {
numGroups := numInputRows / groupSize
for i := 0; i < numInputRows; i++ {
groups[i] = int64(rng.Intn(numGroups))
}
} else {
// partial order.
chunks := cols[0].Int64()
groups = cols[1].Int64()
curChunk := -1
numGroups := chunkSize / groupSize
for i := 0; i < numInputRows; i++ {
if i%chunkSize == 0 {
curChunk++
}
chunks[i] = int64(curChunk)
groups[i] = int64(rng.Intn(numGroups))
}
}
for _, col := range cols[numGroupCol:] {
coldatatestutils.RandomVec(coldatatestutils.RandomVecArgs{
Rand: rng,
Vec: col,
N: numInputRows,
NullProbability: 0,
BytesFixedLength: bytesFixedLength,
})
}
if aggFn == execinfrapb.SumInt {
// Integer summation of random Int64 values can lead
// to overflow, and we will panic. To go around it, we
// restrict the range of values.
vals := cols[numGroupCol].Int64()
for i := range vals {
vals[i] = vals[i] % 1024
}
}
source := colexectestutils.NewChunkingBatchSource(testAllocator, typs, cols, numInputRows)
aggCols := make([]uint32, len(aggInputTypes))
for i := range aggCols {
aggCols[i] = uint32(numGroupCol + i)
}
tc := aggregatorTestCase{
typs: typs,
groupCols: groupCols,
aggCols: [][]uint32{aggCols},
aggFns: []execinfrapb.AggregatorSpec_Func{aggFn},
unorderedInput: agg.order == unordered,
}
if distinctProb > 0 {
if !typs[0].Identical(types.Int) {
skip.IgnoreLint(b, "benchmarking distinct aggregation is supported only on an INT argument")
}
tc.aggDistinct = []bool{true}
distinctModulo := int64(1.0 / distinctProb)
vals := cols[1].Int64()
for i := range vals {
vals[i] = vals[i] % distinctModulo
}
}
if agg.order == partial {
tc.orderedCols = []uint32{0}
}
require.NoError(b, tc.init())
constructors, constArguments, outputTypes, err := colexecagg.ProcessAggregations(
ctx, &evalCtx, nil /* semaCtx */, tc.spec.Aggregations, tc.typs,
)
require.NoError(b, err)
fName := execinfrapb.AggregatorSpec_Func_name[int32(aggFn)]
// Only count the aggregation columns.
var argumentsSize int
if len(aggInputTypes) > 0 {
for _, typ := range aggInputTypes {
if typ.Identical(types.Bool) {
argumentsSize++
} else {
argumentsSize += 8
}
}
} else {
// For COUNT_ROWS we'll just use 8 bytes.
argumentsSize = 8
}
var inputTypesString string
switch len(aggInputTypes) {
case 1:
// Override the string so that the name of the benchmark was the same
// as in pre-20.2 releases (which allows us to compare against old
// numbers).
inputTypesString = aggInputTypes[0].String()
default:
inputTypesString = fmt.Sprintf("%s", aggInputTypes)
}
distinctProbString := ""
if distinctProb > 0 {
distinctProbString = fmt.Sprintf("/distinctProb=%.2f", distinctProb)
}
b.Run(fmt.Sprintf(
"%s/%s/%s/groupSize=%d%s/numInputRows=%d",
fName, agg.name, inputTypesString, groupSize, distinctProbString, numInputRows),
func(b *testing.B) {
b.SetBytes(int64(argumentsSize * numInputRows))
b.ResetTimer()
for i := 0; i < b.N; i++ {
a := agg.new(ctx, &colexecagg.NewAggregatorArgs{
Allocator: testAllocator,
MemAccount: testMemAcc,
Input: source,
InputTypes: tc.typs,
Spec: tc.spec,
EvalCtx: &evalCtx,
Constructors: constructors,
ConstArguments: constArguments,
OutputTypes: outputTypes,
})
a.Init(ctx)
// Exhaust aggregator until all batches have been read or limit, if
// non-zero, is reached.
tupleCount := 0
for b := a.Next(); b.Length() != 0; b = a.Next() {
tupleCount += b.Length()
if limit > 0 && tupleCount >= limit {
break
}
}
if err = a.(colexecop.Closer).Close(ctx); err != nil {
b.Fatal(err)
}
source.Reset(ctx)
}
},
)
}
// BenchmarkAggregator runs the benchmark both aggregators with diverse data
// source parameters but using a single aggregate function. The goal of this
// benchmark is measuring the performance of the aggregators themselves
// depending on the parameters of the input.
func BenchmarkAggregator(b *testing.B) {
numRows := []int{1, 32, coldata.BatchSize(), 32 * coldata.BatchSize(), 1024 * coldata.BatchSize()}
groupSizes := []int{1, 2, 32, 128, coldata.BatchSize()}
if testing.Short() {
numRows = []int{32, 32 * coldata.BatchSize()}
groupSizes = []int{1, coldata.BatchSize()}
}
// We choose any_not_null aggregate function because it is the simplest
// possible and, thus, its Compute function call will have the least impact
// when benchmarking the aggregator logic.
aggFn := execinfrapb.AnyNotNull
for _, agg := range aggTypes {
for _, numInputRows := range numRows {
for _, groupSize := range groupSizes {
benchmarkAggregateFunction(
b, agg, aggFn, []*types.T{types.Int}, 1, /* numGroupCol */
groupSize, 0 /* distinctProb */, numInputRows,
0 /* chunkSize */, 0 /* limit */)
}
}
}
}
// BenchmarkAllOptimizedAggregateFunctions runs the benchmark of all optimized
// aggregate functions in 4 configurations (hash vs ordered, and small groups
// vs big groups). Such configurations were chosen since they provide good
// enough signal on the speeds of aggregate functions. For more diverse
// configurations look at BenchmarkAggregator.
func BenchmarkAllOptimizedAggregateFunctions(b *testing.B) {
var numInputRows = 32 * coldata.BatchSize()
numFnsToRun := len(execinfrapb.AggregatorSpec_Func_name)
if testing.Short() {
numFnsToRun = 1
}
for aggFnNumber := 0; aggFnNumber < numFnsToRun; aggFnNumber++ {
aggFn := execinfrapb.AggregatorSpec_Func(aggFnNumber)
if !colexecagg.IsAggOptimized(aggFn) {
continue
}
for _, agg := range aggTypes {
var aggInputTypes []*types.T
switch aggFn {
case execinfrapb.BoolAnd, execinfrapb.BoolOr:
aggInputTypes = []*types.T{types.Bool}
case execinfrapb.ConcatAgg:
aggInputTypes = []*types.T{types.Bytes}
case execinfrapb.CountRows:
default:
aggInputTypes = []*types.T{types.Int}
}
for _, groupSize := range []int{1, coldata.BatchSize()} {
benchmarkAggregateFunction(b, agg, aggFn, aggInputTypes,
1 /* numGroupCol */, groupSize,
0 /* distinctProb */, numInputRows,
0 /* chunkSize */, 0 /* limit */)
}
}
}
}
func BenchmarkDistinctAggregation(b *testing.B) {
aggFn := execinfrapb.Count
for _, agg := range aggTypes {
for _, numInputRows := range []int{32, 32 * coldata.BatchSize()} {
for _, groupSize := range []int{1, 2, 32, 128, coldata.BatchSize()} {
for _, distinctProb := range []float64{0.01, 0.1, 1.0} {
distinctModulo := int(1.0 / distinctProb)
if (groupSize == 1 && distinctProb != 1.0) || float64(groupSize)/float64(distinctModulo) < 0.1 {
// We have a such combination of groupSize and distinctProb
// parameters that we will be very unlikely to satisfy them
// (for example, with groupSize=1 and distinctProb=0.01,
// every value will be distinct within the group), so we
// skip such configuration.
continue
}
benchmarkAggregateFunction(b, agg, aggFn, []*types.T{types.Int},
1 /* numGroupCol */, groupSize,
0 /* distinctProb */, numInputRows,
0 /* chunkSize */, 0 /* limit */)
}
}
}
}
}
func min64(a, b float64) float64 {
if a < b {
return a
}
return b
}
func max64(a, b float64) float64 {
if a > b {
return a
}
return b
}
| pkg/sql/colexec/aggregators_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/ada419810cf861594978bc5e997e1be096a7d35e | [
0.9968840479850769,
0.02337276190519333,
0.00016206667351070791,
0.00017290064715780318,
0.13438791036605835
] |
{
"id": 1,
"code_window": [
"\t\tCfg: &execinfra.ServerConfig{\n",
"\t\t\tSettings: st,\n",
"\t\t},\n",
"\t}\n",
"\trng, _ := randutil.NewTestRand()\n",
"\n",
"\tbatch := testAllocator.NewMemBatchWithMaxCapacity([]*types.T{types.Bool, types.Bool})\n",
"\tcol1 := batch.ColVec(0).Bool()\n",
"\tcol2 := batch.ColVec(0).Bool()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trng := randutil.NewTestRandWithSeed(91)\n"
],
"file_path": "pkg/sql/colexec/and_or_projection_test.go",
"type": "replace",
"edit_start_line_idx": 236
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import * as React from "react";
interface IconProps {
className?: string;
}
export const StackIcon = ({
className,
...props
}: IconProps): React.ReactElement => (
<svg viewBox="0 0 16 16" className={className} {...props}>
<path
fillRule="evenodd"
clipRule="evenodd"
d="M.503 12.592a.727.727 0 0 1 .93-.44L8 14.5l6.567-2.348a.727.727 0 1 1 .49 1.37l-6.812 2.435a.726.726 0 0 1-.49 0L.943 13.522a.727.727 0 0 1-.44-.93zM7.715.058a.727.727 0 0 1 .57 0l6.851 2.916c1.191.507 1.135 2.203-.081 2.635L8.243 8.024a.727.727 0 0 1-.486 0L.944 5.609C-.272 5.177-.329 3.48.862 2.974L7.715.058zM1.526 4.272L8 6.567l6.473-2.295L8 1.518 1.526 4.272zM.502 8.653a.727.727 0 0 1 .928-.443L8 10.532l6.57-2.322a.727.727 0 0 1 .484 1.371L8.243 11.99a.728.728 0 0 1-.485 0L.946 9.581a.727.727 0 0 1-.444-.928z"
/>
</svg>
);
| pkg/ui/workspaces/db-console/assets/stackIcon.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/ada419810cf861594978bc5e997e1be096a7d35e | [
0.0001788063527783379,
0.00017397047486156225,
0.00016837108705658466,
0.00017473399930167943,
0.000004294252903491724
] |
{
"id": 1,
"code_window": [
"\t\tCfg: &execinfra.ServerConfig{\n",
"\t\t\tSettings: st,\n",
"\t\t},\n",
"\t}\n",
"\trng, _ := randutil.NewTestRand()\n",
"\n",
"\tbatch := testAllocator.NewMemBatchWithMaxCapacity([]*types.T{types.Bool, types.Bool})\n",
"\tcol1 := batch.ColVec(0).Bool()\n",
"\tcol2 := batch.ColVec(0).Bool()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trng := randutil.NewTestRandWithSeed(91)\n"
],
"file_path": "pkg/sql/colexec/and_or_projection_test.go",
"type": "replace",
"edit_start_line_idx": 236
} | // Code generated by TestPretty. DO NOT EDIT.
// GENERATED FILE DO NOT EDIT
1:
-
SELECT phone0_.id AS id1_6_0_,
person1_.id AS id1_4_1_,
phone0_.phone_number
AS phone_nu2_6_0_,
phone0_.person_id
AS person_i4_6_0_,
phone0_.phone_type
AS phone_ty3_6_0_,
addresses2_.person_id
AS person_i1_5_0__,
addresses2_.addresses
AS addresse2_5_0__,
addresses2_.addresses_key
AS addresse3_0__,
person1_.address
AS address2_4_1_,
person1_.createdon
AS createdo3_4_1_,
person1_.name AS name4_4_1_,
person1_.nickname
AS nickname5_4_1_,
person1_.version
AS version6_4_1_,
addresses2_.person_id
AS person_i1_5_0__,
addresses2_.addresses
AS addresse2_5_0__,
addresses2_.addresses_key
AS addresse3_0__
FROM phone AS phone0_
INNER JOIN person AS person1_ ON
phone0_.person_id
= person1_.id
INNER JOIN person_addresses
AS addresses2_ ON
person1_.id
= addresses2_.person_id
WHERE EXISTS(
SELECT calls3_.id
FROM phone_call AS calls3_
WHERE phone0_.id
= calls3_.phone_id
)
| pkg/sql/sem/tree/testdata/pretty/4.align-deindent.golden.short | 0 | https://github.com/cockroachdb/cockroach/commit/ada419810cf861594978bc5e997e1be096a7d35e | [
0.0001770040107658133,
0.00017551735800225288,
0.00017381190264131874,
0.00017549037875141948,
0.0000010726045047704247
] |
{
"id": 1,
"code_window": [
"\t\tCfg: &execinfra.ServerConfig{\n",
"\t\t\tSettings: st,\n",
"\t\t},\n",
"\t}\n",
"\trng, _ := randutil.NewTestRand()\n",
"\n",
"\tbatch := testAllocator.NewMemBatchWithMaxCapacity([]*types.T{types.Bool, types.Bool})\n",
"\tcol1 := batch.ColVec(0).Bool()\n",
"\tcol2 := batch.ColVec(0).Bool()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trng := randutil.NewTestRandWithSeed(91)\n"
],
"file_path": "pkg/sql/colexec/and_or_projection_test.go",
"type": "replace",
"edit_start_line_idx": 236
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import { get } from "lodash";
import { track } from "./trackFilter";
describe("trackFilter", () => {
const filter = "Test";
const filterValue = "test-value";
it("should only call track once", () => {
const spy = jest.fn();
track(spy)(filter, filterValue);
expect(spy).toHaveBeenCalled();
});
it("should send a track call with the correct event", () => {
const spy = jest.fn();
const expected = "Test Filter";
track(spy)(filter, filterValue);
const sent = spy.mock.calls[0][0];
const event = get(sent, "event");
expect(event === expected).toBe(true);
});
it("send the correct payload", () => {
const spy = jest.fn();
track(spy)(filter, filterValue);
const sent = spy.mock.calls[0][0];
const selectedFilter = get(sent, "properties.selectedFilter");
expect(selectedFilter === filterValue).toBe(true);
});
});
| pkg/ui/workspaces/db-console/src/util/analytics/trackFilter.spec.ts | 0 | https://github.com/cockroachdb/cockroach/commit/ada419810cf861594978bc5e997e1be096a7d35e | [
0.00017959234537556767,
0.00017603542073629797,
0.00017325281805824488,
0.00017506821313872933,
0.000002351238435949199
] |
{
"id": 2,
"code_window": [
"\tisExternal bool,\n",
"\tshuffleInput bool,\n",
") {\n",
"\trng, _ := randutil.NewTestRand()\n",
"\tconst nCols = 2\n",
"\tconst bytesValueLength = 8\n",
"\tdistinctCols := []uint32{0, 1}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trng := randutil.NewTestRandWithSeed(41)\n"
],
"file_path": "pkg/sql/colexec/distinct_test.go",
"type": "replace",
"edit_start_line_idx": 474
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecbase"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/stretchr/testify/require"
)
type andOrTestCase struct {
tuples []colexectestutils.Tuple
expected []colexectestutils.Tuple
skipAllNullsInjection bool
}
var (
andTestCases []andOrTestCase
orTestCases []andOrTestCase
)
func init() {
andTestCases = []andOrTestCase{
// All variations of pairs separately first.
{
tuples: colexectestutils.Tuples{{false, true}},
expected: colexectestutils.Tuples{{false}},
},
{
tuples: colexectestutils.Tuples{{false, nil}},
expected: colexectestutils.Tuples{{false}},
},
{
tuples: colexectestutils.Tuples{{false, false}},
expected: colexectestutils.Tuples{{false}},
},
{
tuples: colexectestutils.Tuples{{true, true}},
expected: colexectestutils.Tuples{{true}},
},
{
tuples: colexectestutils.Tuples{{true, false}},
expected: colexectestutils.Tuples{{false}},
},
{
tuples: colexectestutils.Tuples{{true, nil}},
expected: colexectestutils.Tuples{{nil}},
// The case of {nil, nil} is explicitly tested below.
skipAllNullsInjection: true,
},
{
tuples: colexectestutils.Tuples{{nil, true}},
expected: colexectestutils.Tuples{{nil}},
// The case of {nil, nil} is explicitly tested below.
skipAllNullsInjection: true,
},
{
tuples: colexectestutils.Tuples{{nil, false}},
expected: colexectestutils.Tuples{{false}},
},
{
tuples: colexectestutils.Tuples{{nil, nil}},
expected: colexectestutils.Tuples{{nil}},
},
// Now all variations of pairs combined together to make sure that nothing
// funky going on with multiple tuples.
{
tuples: colexectestutils.Tuples{
{false, true}, {false, nil}, {false, false},
{true, true}, {true, false}, {true, nil},
{nil, true}, {nil, false}, {nil, nil},
},
expected: colexectestutils.Tuples{
{false}, {false}, {false},
{true}, {false}, {nil},
{nil}, {false}, {nil},
},
},
}
orTestCases = []andOrTestCase{
// All variations of pairs separately first.
{
tuples: colexectestutils.Tuples{{false, true}},
expected: colexectestutils.Tuples{{true}},
},
{
tuples: colexectestutils.Tuples{{false, nil}},
expected: colexectestutils.Tuples{{nil}},
// The case of {nil, nil} is explicitly tested below.
skipAllNullsInjection: true,
},
{
tuples: colexectestutils.Tuples{{false, false}},
expected: colexectestutils.Tuples{{false}},
},
{
tuples: colexectestutils.Tuples{{true, true}},
expected: colexectestutils.Tuples{{true}},
},
{
tuples: colexectestutils.Tuples{{true, false}},
expected: colexectestutils.Tuples{{true}},
},
{
tuples: colexectestutils.Tuples{{true, nil}},
expected: colexectestutils.Tuples{{true}},
},
{
tuples: colexectestutils.Tuples{{nil, true}},
expected: colexectestutils.Tuples{{true}},
},
{
tuples: colexectestutils.Tuples{{nil, false}},
expected: colexectestutils.Tuples{{nil}},
// The case of {nil, nil} is explicitly tested below.
skipAllNullsInjection: true,
},
{
tuples: colexectestutils.Tuples{{nil, nil}},
expected: colexectestutils.Tuples{{nil}},
},
// Now all variations of pairs combined together to make sure that nothing
// funky going on with multiple tuples.
{
tuples: colexectestutils.Tuples{
{false, true}, {false, nil}, {false, false},
{true, true}, {true, false}, {true, nil},
{nil, true}, {nil, false}, {nil, nil},
},
expected: colexectestutils.Tuples{
{true}, {nil}, {false},
{true}, {true}, {true},
{true}, {nil}, {nil},
},
},
}
}
func TestAndOrOps(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := eval.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Mon: evalCtx.TestingMon,
Cfg: &execinfra.ServerConfig{
Settings: st,
},
}
for _, test := range []struct {
operation string
cases []andOrTestCase
}{
{
operation: "AND",
cases: andTestCases,
},
{
operation: "OR",
cases: orTestCases,
},
} {
t.Run(test.operation, func(t *testing.T) {
for _, tc := range test.cases {
var runner colexectestutils.TestRunner
if tc.skipAllNullsInjection {
// We're omitting all nulls injection test. See comments for each such
// test case.
runner = colexectestutils.RunTestsWithoutAllNullsInjection
} else {
runner = colexectestutils.RunTestsWithTyps
}
runner(
t,
testAllocator,
[]colexectestutils.Tuples{tc.tuples},
[][]*types.T{{types.Bool, types.Bool}},
tc.expected,
colexectestutils.OrderedVerifier,
func(input []colexecop.Operator) (colexecop.Operator, error) {
projOp, err := colexectestutils.CreateTestProjectingOperator(
ctx, flowCtx, input[0], []*types.T{types.Bool, types.Bool},
fmt.Sprintf("@1 %s @2", test.operation), testMemAcc,
)
if err != nil {
return nil, err
}
// We will project out the first two columns in order
// to have test cases be less verbose.
return colexecbase.NewSimpleProjectOp(projOp, 3 /* numInputCols */, []uint32{2}), nil
})
}
})
}
}
func benchmarkLogicalProjOp(
b *testing.B, operation string, useSelectionVector bool, hasNulls bool,
) {
ctx := context.Background()
st := cluster.MakeTestingClusterSettings()
evalCtx := eval.MakeTestingEvalContext(st)
defer evalCtx.Stop(ctx)
flowCtx := &execinfra.FlowCtx{
EvalCtx: &evalCtx,
Mon: evalCtx.TestingMon,
Cfg: &execinfra.ServerConfig{
Settings: st,
},
}
rng, _ := randutil.NewTestRand()
batch := testAllocator.NewMemBatchWithMaxCapacity([]*types.T{types.Bool, types.Bool})
col1 := batch.ColVec(0).Bool()
col2 := batch.ColVec(0).Bool()
for i := 0; i < coldata.BatchSize(); i++ {
col1[i] = rng.Float64() < 0.5
col2[i] = rng.Float64() < 0.5
}
if hasNulls {
nulls1 := batch.ColVec(0).Nulls()
nulls2 := batch.ColVec(0).Nulls()
for i := 0; i < coldata.BatchSize(); i++ {
if rng.Float64() < 0.1 {
nulls1.SetNull(i)
}
if rng.Float64() < 0.1 {
nulls2.SetNull(i)
}
}
}
batch.SetLength(coldata.BatchSize())
if useSelectionVector {
batch.SetSelection(true)
sel := batch.Selection()
for i := 0; i < coldata.BatchSize(); i++ {
sel[i] = i
}
}
typs := []*types.T{types.Bool, types.Bool}
input := colexecop.NewRepeatableBatchSource(testAllocator, batch, typs)
logicalProjOp, err := colexectestutils.CreateTestProjectingOperator(
ctx, flowCtx, input, typs, fmt.Sprintf("@1 %s @2", operation), testMemAcc,
)
require.NoError(b, err)
logicalProjOp.Init(ctx)
b.SetBytes(int64(8 * coldata.BatchSize()))
for i := 0; i < b.N; i++ {
logicalProjOp.Next()
}
}
func BenchmarkLogicalProjOp(b *testing.B) {
for _, operation := range []string{"AND", "OR"} {
for _, useSel := range []bool{true, false} {
for _, hasNulls := range []bool{true, false} {
b.Run(fmt.Sprintf("%s,useSel=%t,hasNulls=%t", operation, useSel, hasNulls), func(b *testing.B) {
benchmarkLogicalProjOp(b, operation, useSel, hasNulls)
})
}
}
}
}
| pkg/sql/colexec/and_or_projection_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/ada419810cf861594978bc5e997e1be096a7d35e | [
0.9951506853103638,
0.08558760583400726,
0.00016749791393522173,
0.00017336857854388654,
0.2637512683868408
] |
{
"id": 2,
"code_window": [
"\tisExternal bool,\n",
"\tshuffleInput bool,\n",
") {\n",
"\trng, _ := randutil.NewTestRand()\n",
"\tconst nCols = 2\n",
"\tconst bytesValueLength = 8\n",
"\tdistinctCols := []uint32{0, 1}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trng := randutil.NewTestRandWithSeed(41)\n"
],
"file_path": "pkg/sql/colexec/distinct_test.go",
"type": "replace",
"edit_start_line_idx": 474
} | # disabled for tenants as they can't set storage.mvcc.range_tombstones.enabled
new-cluster name=s1 nodes=1 disable-tenant
----
subtest restore-retry
exec-sql
CREATE DATABASE restore;
CREATE SCHEMA restore.myschema;
CREATE TABLE foobar (pk int primary key);
CREATE TABLE restore.myschema.table1 (pk int primary key);
INSERT INTO restore.myschema.table1 VALUES (1);
CREATE TYPE data.myenum AS ENUM ('hello');
----
exec-sql
BACKUP INTO 'nodelocal://1/cluster_backup';
----
new-cluster name=s2 nodes=1 share-io-dir=s1 disable-tenant
----
exec-sql
SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true;
----
exec-sql
SELECT crdb_internal.set_vmodule('lease=3');
----
# Restore's OnFailOrCancel deletes descriptors which requires us to wait for no
# versions of that descriptor to be leased before proceeding. Since our test fails
# the job after the descriptors have been published, it's possible for them to be leased
# somewhere.
exec-sql
SET CLUSTER SETTING sql.catalog.descriptor_lease_duration = '1s';
----
exec-sql
SET CLUSTER SETTING jobs.debug.pausepoints = 'restore.after_publishing_descriptors';
----
restore expect-pausepoint tag=a
RESTORE FROM LATEST IN 'nodelocal://1/cluster_backup';
----
job paused at pausepoint
exec-sql
SET CLUSTER SETTING jobs.debug.pausepoints = '';
----
# Cancel the job so that the cleanup hook runs.
job cancel=a
----
# TODO(ssd): We sleep via the test runner and not via SQL using pg_sleep because if we try to
# execute a query too quickly we see failures. https://github.com/cockroachdb/cockroach/issues/88913
sleep ms=2000
----
restore
RESTORE FROM LATEST IN 'nodelocal://1/cluster_backup';
----
subtest end
| pkg/ccl/backupccl/testdata/backup-restore/restore-on-fail-or-cancel-retry | 0 | https://github.com/cockroachdb/cockroach/commit/ada419810cf861594978bc5e997e1be096a7d35e | [
0.00017383992963004857,
0.00016959148342721164,
0.00016619812231510878,
0.00017015592311508954,
0.000002515704181860201
] |
{
"id": 2,
"code_window": [
"\tisExternal bool,\n",
"\tshuffleInput bool,\n",
") {\n",
"\trng, _ := randutil.NewTestRand()\n",
"\tconst nCols = 2\n",
"\tconst bytesValueLength = 8\n",
"\tdistinctCols := []uint32{0, 1}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trng := randutil.NewTestRandWithSeed(41)\n"
],
"file_path": "pkg/sql/colexec/distinct_test.go",
"type": "replace",
"edit_start_line_idx": 474
} | echo
----
+----------------------------------------+--------------------------------------------------------------+--------------+------------------------------------+------------------------------------------------------------------------------------+
| SPAN | KEY HEX | ENDKEY HEX | VERSION HEX | PRETTY |
+----------------------------------------+--------------------------------------------------------------+--------------+------------------------------------+------------------------------------------------------------------------------------+
| /Local/RangeID/2/{r""-s""} | 01698a726162632d120ce61c175eb445878c36dcf4062ada4c0001 | | | /Local/RangeID/2/r/AbortSpan/"0ce61c17-5eb4-4587-8c36-dcf4062ada4c" |
| /Local/RangeID/2/{r""-s""} | 01698a726162632d129855a1ef8eb94c06a106cab1dda78a2b0001 | | | /Local/RangeID/2/r/AbortSpan/"9855a1ef-8eb9-4c06-a106-cab1dda78a2b" |
| /Local/RangeID/2/{r""-s""} | 01698a726c67632d | | | /Local/RangeID/2/r/RangeGCThreshold |
| /Local/RangeID/2/{r""-s""} | 01698a727261736b | | | /Local/RangeID/2/r/RangeAppliedState |
| /Local/RangeID/2/{r""-s""} | 01698a72726c6c2d | | | /Local/RangeID/2/r/RangeLease |
| /Local/RangeID/2/{r""-s""} | 01698a723a61 | 01698a723a78 | 000000000000000109 | /Local/RangeID/2/r":{a"-x"}/0.000000001,0 |
| /Local/RangeID/2/{u""-v""} | 01698a7572667462 | | | /Local/RangeID/2/u/RangeTombstone |
| /Local/RangeID/2/{u""-v""} | 01698a7572667468 | | | /Local/RangeID/2/u/RaftHardState |
| /Local/RangeID/2/{u""-v""} | 01698a757266746c0000000000000001 | | | /Local/RangeID/2/u/RaftLog/logIndex:1 |
| /Local/RangeID/2/{u""-v""} | 01698a757266746c0000000000000002 | | | /Local/RangeID/2/u/RaftLog/logIndex:2 |
| /Local/RangeID/2/{u""-v""} | 01698a75726c7274 | | | /Local/RangeID/2/u/RangeLastReplicaGCTimestamp |
| /Local/RangeID/2/{u""-v""} | 01698a753a61 | 01698a753a78 | 000000000000000109 | /Local/RangeID/2/u":{a"-x"}/0.000000001,0 |
| /Local/Range"{b"-c"} | 016b1262000172647363 | | 0000000000000001 | /Local/Range"b"/RangeDescriptor/0.000000001,0 |
| /Local/Range"{b"-c"} | 016b1262000174786e2d0ce61c175eb445878c36dcf4062ada4c | | | /Local/Range"b"/Transaction/"0ce61c17-5eb4-4587-8c36-dcf4062ada4c" |
| /Local/Range"{b"-c"} | 016b126200ff000174786e2d9855a1ef8eb94c06a106cab1dda78a2b | | | /Local/Range"b\x00"/Transaction/"9855a1ef-8eb9-4c06-a106-cab1dda78a2b" |
| /Local/Range"{b"-c"} | 016b1262ffffffff000174786e2d295e727c8ca9437cbb5e8e2ebbad996f | | | /Local/Range"b\xff\xff\xff\xff"/Transaction/"295e727c-8ca9-437c-bb5e-8e2ebbad996f" |
| /Local/Lock/Intent/Local/Range"{b"-c"} | 017a6b12016b126200ff01726473630001 | | 030ce61c175eb445878c36dcf4062ada4c | /Local/Range"b"/RangeDescriptor |
| /Local/Lock/Intent"{b"-c"} | 017a6b12620001 | | 030ce61c175eb445878c36dcf4062ada4c | "b" |
| {b-c} | 62 | | 0000000000000001 | "b"/0.000000001,0 |
| {b-c} | 62ffffffff | | 0000000000000001 | "b\xff\xff\xff\xff"/0.000000001,0 |
| {b-c} | 62 | 63 | 000000000000000109 | {b-c}/0.000000001,0 |
+----------------------------------------+--------------------------------------------------------------+--------------+------------------------------------+------------------------------------------------------------------------------------+
| pkg/kv/kvserver/rditer/testdata/TestReplicaDataIterator/r2/all/output | 0 | https://github.com/cockroachdb/cockroach/commit/ada419810cf861594978bc5e997e1be096a7d35e | [
0.009126870892941952,
0.0036561910528689623,
0.0003336411900818348,
0.0015080608427524567,
0.0038979542441666126
] |
{
"id": 2,
"code_window": [
"\tisExternal bool,\n",
"\tshuffleInput bool,\n",
") {\n",
"\trng, _ := randutil.NewTestRand()\n",
"\tconst nCols = 2\n",
"\tconst bytesValueLength = 8\n",
"\tdistinctCols := []uint32{0, 1}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trng := randutil.NewTestRandWithSeed(41)\n"
],
"file_path": "pkg/sql/colexec/distinct_test.go",
"type": "replace",
"edit_start_line_idx": 474
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package geogfn
import (
"math"
"github.com/cockroachdb/cockroach/pkg/geo"
"github.com/cockroachdb/cockroach/pkg/geo/geoprojbase"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/golang/geo/s1"
"github.com/golang/geo/s2"
"github.com/twpayne/go-geom"
)
// Area returns the area of a given Geography.
func Area(g geo.Geography, useSphereOrSpheroid UseSphereOrSpheroid) (float64, error) {
regions, err := g.AsS2(geo.EmptyBehaviorOmit)
if err != nil {
return 0, err
}
spheroid, err := spheroidFromGeography(g)
if err != nil {
return 0, err
}
var totalArea float64
for _, region := range regions {
switch region := region.(type) {
case s2.Point, *s2.Polyline:
case *s2.Polygon:
if useSphereOrSpheroid == UseSpheroid {
for _, loop := range region.Loops() {
points := loop.Vertices()
area, _ := spheroid.AreaAndPerimeter(points[:len(points)-1])
totalArea += float64(loop.Sign()) * area
}
} else {
totalArea += region.Area()
}
default:
return 0, pgerror.Newf(pgcode.InvalidParameterValue, "unknown type: %T", region)
}
}
if useSphereOrSpheroid == UseSphere {
totalArea *= spheroid.SphereRadius() * spheroid.SphereRadius()
}
return totalArea, nil
}
// Perimeter returns the perimeter of a given Geography.
func Perimeter(g geo.Geography, useSphereOrSpheroid UseSphereOrSpheroid) (float64, error) {
gt, err := g.AsGeomT()
if err != nil {
return 0, err
}
// This check mirrors PostGIS behavior, where GeometryCollections
// of LineStrings include the length for perimeters.
switch gt.(type) {
case *geom.Polygon, *geom.MultiPolygon, *geom.GeometryCollection:
default:
return 0, nil
}
regions, err := geo.S2RegionsFromGeomT(gt, geo.EmptyBehaviorOmit)
if err != nil {
return 0, err
}
spheroid, err := spheroidFromGeography(g)
if err != nil {
return 0, err
}
return length(regions, spheroid, useSphereOrSpheroid)
}
// Length returns length of a given Geography.
func Length(g geo.Geography, useSphereOrSpheroid UseSphereOrSpheroid) (float64, error) {
gt, err := g.AsGeomT()
if err != nil {
return 0, err
}
// This check mirrors PostGIS behavior, where GeometryCollections
// of Polygons include the perimeters for polygons.
switch gt.(type) {
case *geom.LineString, *geom.MultiLineString, *geom.GeometryCollection:
default:
return 0, nil
}
regions, err := geo.S2RegionsFromGeomT(gt, geo.EmptyBehaviorOmit)
if err != nil {
return 0, err
}
spheroid, err := spheroidFromGeography(g)
if err != nil {
return 0, err
}
return length(regions, spheroid, useSphereOrSpheroid)
}
// Project returns calculate a projected point given a source point, a distance and a azimuth.
func Project(g geo.Geography, distance float64, azimuth s1.Angle) (geo.Geography, error) {
geomT, err := g.AsGeomT()
if err != nil {
return geo.Geography{}, err
}
point, ok := geomT.(*geom.Point)
if !ok {
return geo.Geography{}, pgerror.Newf(pgcode.InvalidParameterValue, "ST_Project(geography) is only valid for point inputs")
}
spheroid, err := spheroidFromGeography(g)
if err != nil {
return geo.Geography{}, err
}
// Normalize distance to be positive.
if distance < 0.0 {
distance = -distance
azimuth += math.Pi
}
// Normalize azimuth
azimuth = azimuth.Normalized()
// Check the distance validity.
if distance > (math.Pi * spheroid.Radius()) {
return geo.Geography{}, pgerror.Newf(pgcode.InvalidParameterValue, "distance must not be greater than %f", math.Pi*spheroid.Radius())
}
if point.Empty() {
return geo.Geography{}, pgerror.Newf(pgcode.InvalidParameterValue, "cannot project POINT EMPTY")
}
// Convert to ta geodetic point.
x := point.X()
y := point.Y()
projected := spheroid.Project(
s2.LatLngFromDegrees(x, y),
distance,
azimuth,
)
ret := geom.NewPointFlat(
geom.XY,
[]float64{
geo.NormalizeLongitudeDegrees(projected.Lng.Degrees()),
geo.NormalizeLatitudeDegrees(projected.Lat.Degrees()),
},
).SetSRID(point.SRID())
return geo.MakeGeographyFromGeomT(ret)
}
// length returns the sum of the lengths and perimeters in the shapes of the Geography.
// In OGC parlance, length returns both LineString lengths _and_ Polygon perimeters.
func length(
regions []s2.Region, spheroid geoprojbase.Spheroid, useSphereOrSpheroid UseSphereOrSpheroid,
) (float64, error) {
var totalLength float64
for _, region := range regions {
switch region := region.(type) {
case s2.Point:
case *s2.Polyline:
if useSphereOrSpheroid == UseSpheroid {
totalLength += spheroid.InverseBatch((*region))
} else {
for edgeIdx, regionNumEdges := 0, region.NumEdges(); edgeIdx < regionNumEdges; edgeIdx++ {
edge := region.Edge(edgeIdx)
totalLength += s2.ChordAngleBetweenPoints(edge.V0, edge.V1).Angle().Radians()
}
}
case *s2.Polygon:
for _, loop := range region.Loops() {
if useSphereOrSpheroid == UseSpheroid {
totalLength += spheroid.InverseBatch(loop.Vertices())
} else {
for edgeIdx, loopNumEdges := 0, loop.NumEdges(); edgeIdx < loopNumEdges; edgeIdx++ {
edge := loop.Edge(edgeIdx)
totalLength += s2.ChordAngleBetweenPoints(edge.V0, edge.V1).Angle().Radians()
}
}
}
default:
return 0, pgerror.Newf(pgcode.InvalidParameterValue, "unknown type: %T", region)
}
}
if useSphereOrSpheroid == UseSphere {
totalLength *= spheroid.SphereRadius()
}
return totalLength, nil
}
| pkg/geo/geogfn/unary_operators.go | 0 | https://github.com/cockroachdb/cockroach/commit/ada419810cf861594978bc5e997e1be096a7d35e | [
0.0015744060510769486,
0.0002490594342816621,
0.0001632390049053356,
0.00017467819270677865,
0.0002976663818117231
] |
{
"id": 0,
"code_window": [
"\t\tbackupManifest, memSize, reloadBackupErr = b.readManifestOnResume(ctx, &mem, p.ExecCfg(), defaultStore, details, p.User())\n",
"\t\tif reloadBackupErr != nil {\n",
"\t\t\treturn errors.Wrap(reloadBackupErr, \"could not reload backup manifest when retrying\")\n",
"\t\t}\n",
"\t}\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
"\t// We have exhausted retries, but we have not seen a \"PermanentBulkJobError\" so\n",
"\t// it is possible that this is a transient error that is taking longer than\n",
"\t// our configured retry to go away.\n",
"\t//\n",
"\t// Let's pause the job instead of failing it so that the user can decide\n",
"\t// whether to resume it or cancel it.\n"
],
"file_path": "pkg/ccl/backupccl/backup_job.go",
"type": "add",
"edit_start_line_idx": 584
} | // Copyright 2016 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package backupccl
import (
"context"
"fmt"
"net/url"
"path"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/build"
"github.com/cockroachdb/cockroach/pkg/ccl/utilccl"
"github.com/cockroachdb/cockroach/pkg/cloud"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/joberror"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/scheduledjobs"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/gogo/protobuf/types"
)
// BackupCheckpointInterval is the interval at which backup progress is saved
// to durable storage.
var BackupCheckpointInterval = time.Minute
// TestingShortBackupCheckpointInterval sets the BackupCheckpointInterval
// to a shorter interval for testing purposes, so we can see multiple
// checkpoints written without having extremely large backups. It returns
// a function which resets the checkpoint interval to the old interval.
func TestingShortBackupCheckpointInterval(oldInterval time.Duration) func() {
BackupCheckpointInterval = time.Millisecond * 10
return func() {
BackupCheckpointInterval = oldInterval
}
}
var forceReadBackupManifest = util.ConstantWithMetamorphicTestBool("backup-read-manifest", false)
func countRows(raw roachpb.BulkOpSummary, pkIDs map[uint64]bool) roachpb.RowCount {
res := roachpb.RowCount{DataSize: raw.DataSize}
for id, count := range raw.EntryCounts {
if _, ok := pkIDs[id]; ok {
res.Rows += count
} else {
res.IndexEntries += count
}
}
return res
}
// filterSpans returns the spans that represent the set difference
// (includes - excludes).
func filterSpans(includes []roachpb.Span, excludes []roachpb.Span) []roachpb.Span {
var cov roachpb.SpanGroup
cov.Add(includes...)
cov.Sub(excludes...)
return cov.Slice()
}
// clusterNodeCount returns the approximate number of nodes in the cluster.
func clusterNodeCount(gw gossip.OptionalGossip) (int, error) {
g, err := gw.OptionalErr(47970)
if err != nil {
return 0, err
}
var nodes int
err = g.IterateInfos(
gossip.KeyNodeIDPrefix, func(_ string, _ gossip.Info) error {
nodes++
return nil
},
)
if err != nil {
return 0, err
}
// If we somehow got 0 and return it, a caller may panic if they divide by
// such a nonsensical nodecount.
if nodes == 0 {
return 1, errors.New("failed to count nodes")
}
return nodes, nil
}
// backup exports a snapshot of every kv entry into ranged sstables.
//
// The output is an sstable per range with files in the following locations:
// - <dir>/<unique_int>.sst
// - <dir> is given by the user and may be cloud storage
// - Each file contains data for a key range that doesn't overlap with any other
// file.
func backup(
ctx context.Context,
execCtx sql.JobExecContext,
defaultURI string,
urisByLocalityKV map[string]string,
db *kv.DB,
settings *cluster.Settings,
defaultStore cloud.ExternalStorage,
storageByLocalityKV map[string]*roachpb.ExternalStorage,
job *jobs.Job,
backupManifest *BackupManifest,
makeExternalStorage cloud.ExternalStorageFactory,
encryption *jobspb.BackupEncryptionOptions,
statsCache *stats.TableStatisticsCache,
) (roachpb.RowCount, error) {
// TODO(dan): Figure out how permissions should work. #6713 is tracking this
// for grpc.
resumerSpan := tracing.SpanFromContext(ctx)
var lastCheckpoint time.Time
var completedSpans, completedIntroducedSpans []roachpb.Span
// TODO(benesch): verify these files, rather than accepting them as truth
// blindly.
// No concurrency yet, so these assignments are safe.
for _, file := range backupManifest.Files {
if file.StartTime.IsEmpty() && !file.EndTime.IsEmpty() {
completedIntroducedSpans = append(completedIntroducedSpans, file.Span)
} else {
completedSpans = append(completedSpans, file.Span)
}
}
// Subtract out any completed spans.
spans := filterSpans(backupManifest.Spans, completedSpans)
introducedSpans := filterSpans(backupManifest.IntroducedSpans, completedIntroducedSpans)
pkIDs := make(map[uint64]bool)
for i := range backupManifest.Descriptors {
if t, _, _, _ := descpb.FromDescriptor(&backupManifest.Descriptors[i]); t != nil {
pkIDs[roachpb.BulkOpSummaryID(uint64(t.ID), uint64(t.PrimaryIndex.ID))] = true
}
}
evalCtx := execCtx.ExtendedEvalContext()
dsp := execCtx.DistSQLPlanner()
// We don't return the compatible nodes here since PartitionSpans will
// filter out incompatible nodes.
planCtx, _, err := dsp.SetupAllNodesPlanning(ctx, evalCtx, execCtx.ExecCfg())
if err != nil {
return roachpb.RowCount{}, errors.Wrap(err, "failed to determine nodes on which to run")
}
backupSpecs, err := distBackupPlanSpecs(
ctx,
planCtx,
execCtx,
dsp,
int64(job.ID()),
spans,
introducedSpans,
pkIDs,
defaultURI,
urisByLocalityKV,
encryption,
roachpb.MVCCFilter(backupManifest.MVCCFilter),
backupManifest.StartTime,
backupManifest.EndTime,
)
if err != nil {
return roachpb.RowCount{}, err
}
numTotalSpans := 0
for _, spec := range backupSpecs {
numTotalSpans += len(spec.IntroducedSpans) + len(spec.Spans)
}
progressLogger := jobs.NewChunkProgressLogger(job, numTotalSpans, job.FractionCompleted(), jobs.ProgressUpdateOnly)
requestFinishedCh := make(chan struct{}, numTotalSpans) // enough buffer to never block
var jobProgressLoop func(ctx context.Context) error
if numTotalSpans > 0 {
jobProgressLoop = func(ctx context.Context) error {
// Currently the granularity of backup progress is the % of spans
// exported. Would improve accuracy if we tracked the actual size of each
// file.
return progressLogger.Loop(ctx, requestFinishedCh)
}
}
progCh := make(chan *execinfrapb.RemoteProducerMetadata_BulkProcessorProgress)
checkpointLoop := func(ctx context.Context) error {
// When a processor is done exporting a span, it will send a progress update
// to progCh.
defer close(requestFinishedCh)
var numBackedUpFiles int64
for progress := range progCh {
var progDetails BackupManifest_Progress
if err := types.UnmarshalAny(&progress.ProgressDetails, &progDetails); err != nil {
log.Errorf(ctx, "unable to unmarshal backup progress details: %+v", err)
}
if backupManifest.RevisionStartTime.Less(progDetails.RevStartTime) {
backupManifest.RevisionStartTime = progDetails.RevStartTime
}
for _, file := range progDetails.Files {
backupManifest.Files = append(backupManifest.Files, file)
backupManifest.EntryCounts.Add(file.EntryCounts)
numBackedUpFiles++
}
// Signal that an ExportRequest finished to update job progress.
for i := int32(0); i < progDetails.CompletedSpans; i++ {
requestFinishedCh <- struct{}{}
}
if timeutil.Since(lastCheckpoint) > BackupCheckpointInterval {
resumerSpan.RecordStructured(&BackupProgressTraceEvent{
TotalNumFiles: numBackedUpFiles,
TotalEntryCounts: backupManifest.EntryCounts,
RevisionStartTime: backupManifest.RevisionStartTime,
})
lastCheckpoint = timeutil.Now()
err := writeBackupManifestCheckpoint(
ctx, defaultURI, encryption, backupManifest, execCtx.ExecCfg(), execCtx.User(),
)
if err != nil {
log.Errorf(ctx, "unable to checkpoint backup descriptor: %+v", err)
}
if execCtx.ExecCfg().TestingKnobs.AfterBackupCheckpoint != nil {
execCtx.ExecCfg().TestingKnobs.AfterBackupCheckpoint()
}
}
}
return nil
}
resumerSpan.RecordStructured(&types.StringValue{Value: "starting DistSQL backup execution"})
runBackup := func(ctx context.Context) error {
return distBackup(
ctx,
execCtx,
planCtx,
dsp,
progCh,
backupSpecs,
)
}
if err := ctxgroup.GoAndWait(ctx, jobProgressLoop, checkpointLoop, runBackup); err != nil {
return roachpb.RowCount{}, errors.Wrapf(err, "exporting %d ranges", errors.Safe(numTotalSpans))
}
backupID := uuid.MakeV4()
backupManifest.ID = backupID
// Write additional partial descriptors to each node for partitioned backups.
if len(storageByLocalityKV) > 0 {
resumerSpan.RecordStructured(&types.StringValue{Value: "writing partition descriptors for partitioned backup"})
filesByLocalityKV := make(map[string][]BackupManifest_File)
for _, file := range backupManifest.Files {
filesByLocalityKV[file.LocalityKV] = append(filesByLocalityKV[file.LocalityKV], file)
}
nextPartitionedDescFilenameID := 1
for kv, conf := range storageByLocalityKV {
backupManifest.LocalityKVs = append(backupManifest.LocalityKVs, kv)
// Set a unique filename for each partition backup descriptor. The ID
// ensures uniqueness, and the kv string appended to the end is for
// readability.
filename := fmt.Sprintf("%s_%d_%s",
backupPartitionDescriptorPrefix, nextPartitionedDescFilenameID, sanitizeLocalityKV(kv))
nextPartitionedDescFilenameID++
backupManifest.PartitionDescriptorFilenames = append(backupManifest.PartitionDescriptorFilenames, filename)
desc := BackupPartitionDescriptor{
LocalityKV: kv,
Files: filesByLocalityKV[kv],
BackupID: backupID,
}
if err := func() error {
store, err := makeExternalStorage(ctx, *conf)
if err != nil {
return err
}
defer store.Close()
return writeBackupPartitionDescriptor(ctx, store, filename, encryption, &desc)
}(); err != nil {
return roachpb.RowCount{}, err
}
}
}
resumerSpan.RecordStructured(&types.StringValue{Value: "writing backup manifest"})
if err := writeBackupManifest(ctx, settings, defaultStore, backupManifestName, encryption, backupManifest); err != nil {
return roachpb.RowCount{}, err
}
var tableStatistics []*stats.TableStatisticProto
for i := range backupManifest.Descriptors {
if tbl, _, _, _ := descpb.FromDescriptor(&backupManifest.Descriptors[i]); tbl != nil {
tableDesc := tabledesc.NewBuilder(tbl).BuildImmutableTable()
// Collect all the table stats for this table.
tableStatisticsAcc, err := statsCache.GetTableStats(ctx, tableDesc)
if err != nil {
// Successfully backed up data is more valuable than table stats that can
// be recomputed after restore, and so if we fail to collect the stats of a
// table we do not want to mark the job as failed.
// The lack of stats on restore could lead to suboptimal performance when
// reading/writing to this table until the stats have been recomputed.
log.Warningf(ctx, "failed to collect stats for table: %s, "+
"table ID: %d during a backup: %s", tableDesc.GetName(), tableDesc.GetID(),
err.Error())
continue
}
for _, stat := range tableStatisticsAcc {
tableStatistics = append(tableStatistics, &stat.TableStatisticProto)
}
}
}
statsTable := StatsTable{
Statistics: tableStatistics,
}
resumerSpan.RecordStructured(&types.StringValue{Value: "writing backup table statistics"})
if err := writeTableStatistics(ctx, defaultStore, backupStatisticsFileName, encryption, &statsTable); err != nil {
return roachpb.RowCount{}, err
}
if writeMetadataSST.Get(&settings.SV) {
if err := writeBackupMetadataSST(ctx, defaultStore, encryption, backupManifest, tableStatistics); err != nil {
err = errors.Wrap(err, "writing forward-compat metadata sst")
if !build.IsRelease() {
return roachpb.RowCount{}, err
}
log.Warningf(ctx, "%+v", err)
}
}
return backupManifest.EntryCounts, nil
}
func releaseProtectedTimestamp(
ctx context.Context, txn *kv.Txn, pts protectedts.Storage, ptsID *uuid.UUID,
) error {
// If the job doesn't have a protected timestamp then there's nothing to do.
if ptsID == nil {
return nil
}
err := pts.Release(ctx, txn, *ptsID)
if errors.Is(err, protectedts.ErrNotExists) {
// No reason to return an error which might cause problems if it doesn't
// seem to exist.
log.Warningf(ctx, "failed to release protected which seems not to exist: %v", err)
err = nil
}
return err
}
type backupResumer struct {
job *jobs.Job
backupStats roachpb.RowCount
testingKnobs struct {
ignoreProtectedTimestamps bool
}
}
var _ jobs.TraceableJob = &backupResumer{}
// ForceRealSpan implements the TraceableJob interface.
func (b *backupResumer) ForceRealSpan() bool {
return true
}
// Resume is part of the jobs.Resumer interface.
func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error {
// The span is finished by the registry executing the job.
resumerSpan := tracing.SpanFromContext(ctx)
details := b.job.Details().(jobspb.BackupDetails)
p := execCtx.(sql.JobExecContext)
var backupManifest *BackupManifest
// If planning didn't resolve the external destination, then we need to now.
if details.URI == "" {
initialDetails := details
backupDetails, m, err := getBackupDetailAndManifest(
ctx, p.ExecCfg(), p.Txn(), details, p.User(),
)
if err != nil {
return err
}
details = backupDetails
backupManifest = &m
if len(backupManifest.Spans) > 0 && p.ExecCfg().Codec.ForSystemTenant() {
protectedtsID := uuid.MakeV4()
details.ProtectedTimestampRecord = &protectedtsID
if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
return protectTimestampForBackup(
ctx, p.ExecCfg(), txn, b.job.ID(), m, details,
)
}); err != nil {
return err
}
}
if err := writeBackupManifestCheckpoint(
ctx, details.URI, details.EncryptionOptions, backupManifest, p.ExecCfg(), p.User(),
); err != nil {
return err
}
if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
return planSchedulePTSChaining(ctx, p.ExecCfg(), txn, &details, b.job.CreatedBy())
}); err != nil {
return err
}
// The description picked during original planning might still say "LATEST",
// if resolving that to the actual directory only just happened above here.
// Ideally we'd re-render the description now that we know the subdir, but
// we don't have backup AST node anymore to easily call the rendering func.
// Instead we can just do a bit of dirty string replacement iff there is one
// "INTO 'LATEST' IN" (if there's >1, somenoe has a weird table/db names and
// we should just leave the description as-is, since it is just for humans).
description := b.job.Payload().Description
const unresolvedText = "INTO 'LATEST' IN"
if initialDetails.Destination.Subdir == "LATEST" && strings.Count(description, unresolvedText) == 1 {
description = strings.ReplaceAll(description, unresolvedText, fmt.Sprintf("INTO '%s' IN", details.Destination.Subdir))
}
// Update the job payload (non-volatile job definition) once, with the now
// resolved destination, updated description, etc. If we resume again we'll
// skip this whole block so this isn't an excessive update of payload.
if err := b.job.Update(ctx, nil, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error {
if err := md.CheckRunningOrReverting(); err != nil {
return err
}
md.Payload.Details = jobspb.WrapPayloadDetails(details)
md.Payload.Description = description
ju.UpdatePayload(md.Payload)
return nil
}); err != nil {
return err
}
// Collect telemetry, once per backup after resolving its destination.
lic := utilccl.CheckEnterpriseEnabled(
p.ExecCfg().Settings, p.ExecCfg().LogicalClusterID(), p.ExecCfg().Organization(), "",
) != nil
collectTelemetry(m, details, details, lic)
}
// For all backups, partitioned or not, the main BACKUP manifest is stored at
// details.URI.
defaultConf, err := cloud.ExternalStorageConfFromURI(details.URI, p.User())
if err != nil {
return errors.Wrapf(err, "export configuration")
}
defaultStore, err := p.ExecCfg().DistSQLSrv.ExternalStorage(ctx, defaultConf)
if err != nil {
return errors.Wrapf(err, "make storage")
}
defer defaultStore.Close()
// EncryptionInfo is non-nil only when new encryption information has been
// generated during BACKUP planning.
redactedURI := RedactURIForErrorMessage(details.URI)
if details.EncryptionInfo != nil {
if err := writeEncryptionInfoIfNotExists(ctx, details.EncryptionInfo,
defaultStore); err != nil {
return errors.Wrapf(err, "creating encryption info file to %s", redactedURI)
}
}
storageByLocalityKV := make(map[string]*roachpb.ExternalStorage)
for kv, uri := range details.URIsByLocalityKV {
conf, err := cloud.ExternalStorageConfFromURI(uri, p.User())
if err != nil {
return err
}
storageByLocalityKV[kv] = &conf
}
mem := p.ExecCfg().RootMemoryMonitor.MakeBoundAccount()
defer mem.Close(ctx)
var memSize int64
defer func() {
if memSize != 0 {
mem.Shrink(ctx, memSize)
}
}()
if backupManifest == nil || forceReadBackupManifest {
backupManifest, memSize, err = b.readManifestOnResume(ctx, &mem, p.ExecCfg(), defaultStore, details, p.User())
if err != nil {
return err
}
}
statsCache := p.ExecCfg().TableStatsCache
// We retry on pretty generic failures -- any rpc error. If a worker node were
// to restart, it would produce this kind of error, but there may be other
// errors that are also rpc errors. Don't retry to aggressively.
retryOpts := retry.Options{
MaxBackoff: 1 * time.Second,
MaxRetries: 5,
}
if err := p.ExecCfg().JobRegistry.CheckPausepoint("backup.before_flow"); err != nil {
return err
}
// We want to retry a backup if there are transient failures (i.e. worker nodes
// dying), so if we receive a retryable error, re-plan and retry the backup.
var res roachpb.RowCount
var retryCount int32
for r := retry.StartWithCtx(ctx, retryOpts); r.Next(); {
retryCount++
resumerSpan.RecordStructured(&roachpb.RetryTracingEvent{
Operation: "backupResumer.Resume",
AttemptNumber: retryCount,
RetryError: tracing.RedactAndTruncateError(err),
})
res, err = backup(
ctx,
p,
details.URI,
details.URIsByLocalityKV,
p.ExecCfg().DB,
p.ExecCfg().Settings,
defaultStore,
storageByLocalityKV,
b.job,
backupManifest,
p.ExecCfg().DistSQLSrv.ExternalStorage,
details.EncryptionOptions,
statsCache,
)
if err == nil {
break
}
if joberror.IsPermanentBulkJobError(err) {
return errors.Wrap(err, "failed to run backup")
}
log.Warningf(ctx, `BACKUP job encountered retryable error: %+v`, err)
// Reload the backup manifest to pick up any spans we may have completed on
// previous attempts.
var reloadBackupErr error
mem.Shrink(ctx, memSize)
memSize = 0
backupManifest, memSize, reloadBackupErr = b.readManifestOnResume(ctx, &mem, p.ExecCfg(), defaultStore, details, p.User())
if reloadBackupErr != nil {
return errors.Wrap(reloadBackupErr, "could not reload backup manifest when retrying")
}
}
if err != nil {
return errors.Wrap(err, "exhausted retries")
}
var backupDetails jobspb.BackupDetails
var ok bool
if backupDetails, ok = b.job.Details().(jobspb.BackupDetails); !ok {
return errors.Newf("unexpected job details type %T", b.job.Details())
}
if err := maybeUpdateSchedulePTSRecord(ctx, p.ExecCfg(), backupDetails, b.job.ID()); err != nil {
return err
}
if details.ProtectedTimestampRecord != nil && !b.testingKnobs.ignoreProtectedTimestamps {
if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
details := b.job.Details().(jobspb.BackupDetails)
return releaseProtectedTimestamp(ctx, txn, p.ExecCfg().ProtectedTimestampProvider,
details.ProtectedTimestampRecord)
}); err != nil {
log.Errorf(ctx, "failed to release protected timestamp: %v", err)
}
}
// If this is a full backup that was automatically nested in a collection of
// backups, record the path under which we wrote it to the LATEST file in the
// root of the collection. Note: this file *not* encrypted, as it only
// contains the name of another file that is in the same folder -- if you can
// get to this file to read it, you could already find its contents from the
// listing of the directory it is in -- it exists only to save us a
// potentially expensive listing of a giant backup collection to find the most
// recent completed entry.
if backupManifest.StartTime.IsEmpty() && details.CollectionURI != "" {
backupURI, err := url.Parse(details.URI)
if err != nil {
return err
}
collectionURI, err := url.Parse(details.CollectionURI)
if err != nil {
return err
}
suffix := strings.TrimPrefix(path.Clean(backupURI.Path), path.Clean(collectionURI.Path))
c, err := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, details.CollectionURI, p.User())
if err != nil {
return err
}
defer c.Close()
if err := writeNewLatestFile(ctx, p.ExecCfg().Settings, c, suffix); err != nil {
return err
}
}
b.backupStats = res
// Collect telemetry.
{
numClusterNodes, err := clusterNodeCount(p.ExecCfg().Gossip)
if err != nil {
if !build.IsRelease() && p.ExecCfg().Codec.ForSystemTenant() {
return err
}
log.Warningf(ctx, "unable to determine cluster node count: %v", err)
numClusterNodes = 1
}
telemetry.Count("backup.total.succeeded")
const mb = 1 << 20
sizeMb := res.DataSize / mb
sec := int64(timeutil.Since(timeutil.FromUnixMicros(b.job.Payload().StartedMicros)).Seconds())
var mbps int64
if sec > 0 {
mbps = mb / sec
}
if details.StartTime.IsEmpty() {
telemetry.CountBucketed("backup.duration-sec.full-succeeded", sec)
telemetry.CountBucketed("backup.size-mb.full", sizeMb)
telemetry.CountBucketed("backup.speed-mbps.full.total", mbps)
telemetry.CountBucketed("backup.speed-mbps.full.per-node", mbps/int64(numClusterNodes))
} else {
telemetry.CountBucketed("backup.duration-sec.inc-succeeded", sec)
telemetry.CountBucketed("backup.size-mb.inc", sizeMb)
telemetry.CountBucketed("backup.speed-mbps.inc.total", mbps)
telemetry.CountBucketed("backup.speed-mbps.inc.per-node", mbps/int64(numClusterNodes))
}
}
return b.maybeNotifyScheduledJobCompletion(ctx, jobs.StatusSucceeded, p.ExecCfg())
}
// ReportResults implements JobResultsReporter interface.
func (b *backupResumer) ReportResults(ctx context.Context, resultsCh chan<- tree.Datums) error {
select {
case <-ctx.Done():
return ctx.Err()
case resultsCh <- tree.Datums{
tree.NewDInt(tree.DInt(b.job.ID())),
tree.NewDString(string(jobs.StatusSucceeded)),
tree.NewDFloat(tree.DFloat(1.0)),
tree.NewDInt(tree.DInt(b.backupStats.Rows)),
tree.NewDInt(tree.DInt(b.backupStats.IndexEntries)),
tree.NewDInt(tree.DInt(b.backupStats.DataSize)),
}:
return nil
}
}
func (b *backupResumer) readManifestOnResume(
ctx context.Context,
mem *mon.BoundAccount,
cfg *sql.ExecutorConfig,
defaultStore cloud.ExternalStorage,
details jobspb.BackupDetails,
user security.SQLUsername,
) (*BackupManifest, int64, error) {
// We don't read the table descriptors from the backup descriptor, but
// they could be using either the new or the old foreign key
// representations. We should just preserve whatever representation the
// table descriptors were using and leave them alone.
desc, memSize, err := readBackupCheckpointManifest(ctx, mem, defaultStore, backupManifestCheckpointName,
details.EncryptionOptions)
if err != nil {
if !errors.Is(err, cloud.ErrFileDoesNotExist) {
return nil, 0, errors.Wrapf(err, "reading backup checkpoint")
}
// Try reading temp checkpoint.
tmpCheckpoint := tempCheckpointFileNameForJob(b.job.ID())
desc, memSize, err = readBackupCheckpointManifest(ctx, mem, defaultStore, tmpCheckpoint, details.EncryptionOptions)
if err != nil {
return nil, 0, err
}
// "Rename" temp checkpoint.
if err := writeBackupManifestCheckpoint(
ctx, details.URI, details.EncryptionOptions, &desc, cfg, user,
); err != nil {
mem.Shrink(ctx, memSize)
return nil, 0, errors.Wrapf(err, "renaming temp checkpoint file")
}
// Best effort remove temp checkpoint.
if err := defaultStore.Delete(ctx, tmpCheckpoint); err != nil {
log.Errorf(ctx, "error removing temporary checkpoint %s", tmpCheckpoint)
}
if err := defaultStore.Delete(ctx, backupProgressDirectory+"/"+tmpCheckpoint); err != nil {
log.Errorf(ctx, "error removing temporary checkpoint %s", backupProgressDirectory+"/"+tmpCheckpoint)
}
}
if !desc.ClusterID.Equal(cfg.LogicalClusterID()) {
mem.Shrink(ctx, memSize)
return nil, 0, errors.Newf("cannot resume backup started on another cluster (%s != %s)",
desc.ClusterID, cfg.LogicalClusterID())
}
return &desc, memSize, nil
}
func (b *backupResumer) maybeNotifyScheduledJobCompletion(
ctx context.Context, jobStatus jobs.Status, exec *sql.ExecutorConfig,
) error {
env := scheduledjobs.ProdJobSchedulerEnv
if knobs, ok := exec.DistSQLSrv.TestingKnobs.JobsTestingKnobs.(*jobs.TestingKnobs); ok {
if knobs.JobSchedulerEnv != nil {
env = knobs.JobSchedulerEnv
}
}
err := exec.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
// We cannot rely on b.job containing created_by_id because on job
// resumption the registry does not populate the resumer's CreatedByInfo.
datums, err := exec.InternalExecutor.QueryRowEx(
ctx,
"lookup-schedule-info",
txn,
sessiondata.InternalExecutorOverride{User: security.NodeUserName()},
fmt.Sprintf(
"SELECT created_by_id FROM %s WHERE id=$1 AND created_by_type=$2",
env.SystemJobsTableName()),
b.job.ID(), jobs.CreatedByScheduledJobs)
if err != nil {
return errors.Wrap(err, "schedule info lookup")
}
if datums == nil {
// Not a scheduled backup.
return nil
}
scheduleID := int64(tree.MustBeDInt(datums[0]))
if err := jobs.NotifyJobTermination(
ctx, env, b.job.ID(), jobStatus, b.job.Details(), scheduleID, exec.InternalExecutor, txn); err != nil {
return errors.Wrapf(err,
"failed to notify schedule %d of completion of job %d", scheduleID, b.job.ID())
}
return nil
})
return err
}
// OnFailOrCancel is part of the jobs.Resumer interface.
func (b *backupResumer) OnFailOrCancel(ctx context.Context, execCtx interface{}) error {
telemetry.Count("backup.total.failed")
telemetry.CountBucketed("backup.duration-sec.failed",
int64(timeutil.Since(timeutil.FromUnixMicros(b.job.Payload().StartedMicros)).Seconds()))
p := execCtx.(sql.JobExecContext)
cfg := p.ExecCfg()
b.deleteCheckpoint(ctx, cfg, p.User())
if err := cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
details := b.job.Details().(jobspb.BackupDetails)
return releaseProtectedTimestamp(ctx, txn, cfg.ProtectedTimestampProvider,
details.ProtectedTimestampRecord)
}); err != nil {
return err
}
// This should never return an error unless resolving the schedule that the
// job is being run under fails. This could happen if the schedule is dropped
// while the job is executing.
if err := b.maybeNotifyScheduledJobCompletion(ctx, jobs.StatusFailed,
execCtx.(sql.JobExecContext).ExecCfg()); err != nil {
log.Errorf(ctx, "failed to notify job %d on completion of OnFailOrCancel: %+v",
b.job.ID(), err)
}
return nil //nolint:returnerrcheck
}
func (b *backupResumer) deleteCheckpoint(
ctx context.Context, cfg *sql.ExecutorConfig, user security.SQLUsername,
) {
// Attempt to delete BACKUP-CHECKPOINT(s) in /progress directory.
if err := func() error {
details := b.job.Details().(jobspb.BackupDetails)
// For all backups, partitioned or not, the main BACKUP manifest is stored at
// details.URI.
exportStore, err := cfg.DistSQLSrv.ExternalStorageFromURI(ctx, details.URI, user)
if err != nil {
return err
}
defer exportStore.Close()
// We first attempt to delete from base directory to account for older
// backups, and then from the progress directory.
err = exportStore.Delete(ctx, backupManifestCheckpointName)
if err != nil {
log.Warningf(ctx, "unable to delete checkpointed backup descriptor file in base directory: %+v", err)
}
err = exportStore.Delete(ctx, backupManifestCheckpointName+backupManifestChecksumSuffix)
if err != nil {
log.Warningf(ctx, "unable to delete checkpoint checksum file in base directory: %+v", err)
}
// Delete will not delete a nonempty directory, so we have to go through
// all files and delete each file one by one.
return exportStore.List(ctx, backupProgressDirectory, "", func(p string) error {
return exportStore.Delete(ctx, backupProgressDirectory+p)
})
}(); err != nil {
log.Warningf(ctx, "unable to delete checkpointed backup descriptor file in progress directory: %+v", err)
}
}
var _ jobs.Resumer = &backupResumer{}
func init() {
jobs.RegisterConstructor(
jobspb.TypeBackup,
func(job *jobs.Job, _ *cluster.Settings) jobs.Resumer {
return &backupResumer{
job: job,
}
},
)
}
| pkg/ccl/backupccl/backup_job.go | 1 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.9941987991333008,
0.0653901919722557,
0.0001619390823179856,
0.0011542930733412504,
0.22157181799411774
] |
{
"id": 0,
"code_window": [
"\t\tbackupManifest, memSize, reloadBackupErr = b.readManifestOnResume(ctx, &mem, p.ExecCfg(), defaultStore, details, p.User())\n",
"\t\tif reloadBackupErr != nil {\n",
"\t\t\treturn errors.Wrap(reloadBackupErr, \"could not reload backup manifest when retrying\")\n",
"\t\t}\n",
"\t}\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
"\t// We have exhausted retries, but we have not seen a \"PermanentBulkJobError\" so\n",
"\t// it is possible that this is a transient error that is taking longer than\n",
"\t// our configured retry to go away.\n",
"\t//\n",
"\t// Let's pause the job instead of failing it so that the user can decide\n",
"\t// whether to resume it or cancel it.\n"
],
"file_path": "pkg/ccl/backupccl/backup_job.go",
"type": "add",
"edit_start_line_idx": 584
} | # LogicTest: local
statement ok
CREATE TABLE xyz (
x INT PRIMARY KEY,
y INT,
z INT,
INDEX foo (z, y),
FAMILY "primary" (x, y, z)
)
query T
EXPLAIN (OPT, CATALOG) SELECT * from xyz
----
TABLE xyz
├── x int not null
├── y int
├── z int
├── crdb_internal_mvcc_timestamp decimal [hidden] [system]
├── tableoid oid [hidden] [system]
├── PRIMARY INDEX xyz_pkey
│ └── x int not null
└── INDEX foo
├── z int
├── y int
└── x int not null
scan xyz
# Verify that column qualifications in check constraints and computed columns
# are stripped.
statement ok
CREATE TABLE abcdef (
a INT NOT NULL,
b INT,
c INT DEFAULT (10),
d INT AS (abcdef.b + c + 1) STORED,
e INT AS (a) STORED,
f INT NOT NULL CHECK (test.abcdef.f > 2),
FAMILY "primary" (a, b, c, d, e, f, rowid)
)
query T
EXPLAIN (OPT, CATALOG) SELECT * from abcdef
----
TABLE abcdef
├── a int not null
├── b int
├── c int default (10:::INT8)
├── d int as ((b + c) + 1:::INT8) stored
├── e int as (a) stored
├── f int not null
├── rowid int not null default (unique_rowid()) [hidden]
├── crdb_internal_mvcc_timestamp decimal [hidden] [system]
├── tableoid oid [hidden] [system]
├── CHECK (f > 2:::INT8)
└── PRIMARY INDEX abcdef_pkey
└── rowid int not null default (unique_rowid()) [hidden]
scan abcdef
├── check constraint expressions
│ └── f > 2
└── computed column expressions
├── d
│ └── (b + c) + 1
└── e
└── a
statement ok
CREATE TABLE uvwxy (
u INT,
v INT,
w INT,
x INT,
y INT,
PRIMARY KEY (u,v),
FAMILY (u,v,w),
FAMILY (x),
FAMILY (y)
)
query T
EXPLAIN (OPT, CATALOG) SELECT * from uvwxy
----
TABLE uvwxy
├── u int not null
├── v int not null
├── w int
├── x int
├── y int
├── crdb_internal_mvcc_timestamp decimal [hidden] [system]
├── tableoid oid [hidden] [system]
├── FAMILY fam_0_u_v_w (u, v, w)
├── FAMILY fam_1_x (x)
├── FAMILY fam_2_y (y)
└── PRIMARY INDEX uvwxy_pkey
├── u int not null
└── v int not null
scan uvwxy
# Test foreign keys.
statement ok
CREATE TABLE parent (p INT, q INT, r INT, other INT, PRIMARY KEY (p, q, r), FAMILY "primary" (p, q, r, other))
# Simple FK.
statement ok
CREATE TABLE child (
c INT PRIMARY KEY,
p INT, q INT, r INT,
CONSTRAINT fk FOREIGN KEY (p,q,r) REFERENCES parent(p,q,r),
FAMILY "primary" (c, p, q, r)
)
query T
EXPLAIN (OPT, CATALOG) SELECT * from child
----
TABLE child
├── c int not null
├── p int
├── q int
├── r int
├── crdb_internal_mvcc_timestamp decimal [hidden] [system]
├── tableoid oid [hidden] [system]
├── PRIMARY INDEX child_pkey
│ └── c int not null
└── CONSTRAINT fk FOREIGN KEY child (p, q, r) REFERENCES parent (p, q, r)
scan child
query T
EXPLAIN (OPT, CATALOG) SELECT * from parent
----
TABLE parent
├── p int not null
├── q int not null
├── r int not null
├── other int
├── crdb_internal_mvcc_timestamp decimal [hidden] [system]
├── tableoid oid [hidden] [system]
├── PRIMARY INDEX parent_pkey
│ ├── p int not null
│ ├── q int not null
│ └── r int not null
└── REFERENCED BY CONSTRAINT fk FOREIGN KEY child (p, q, r) REFERENCES parent (p, q, r)
scan parent
# FK with match and actions.
statement ok
CREATE TABLE child2 (
c INT PRIMARY KEY,
p INT, q INT, r INT,
CONSTRAINT fk FOREIGN KEY (p,q,r) REFERENCES parent(p,q,r) MATCH FULL ON DELETE SET NULL ON UPDATE SET DEFAULT,
FAMILY "primary" (c, p, q, r)
)
# TODO(radu, justin): we are missing the ON UPDATE part.
query T
EXPLAIN (OPT, CATALOG) SELECT * from child2
----
TABLE child2
├── c int not null
├── p int
├── q int
├── r int
├── crdb_internal_mvcc_timestamp decimal [hidden] [system]
├── tableoid oid [hidden] [system]
├── PRIMARY INDEX child2_pkey
│ └── c int not null
└── CONSTRAINT fk FOREIGN KEY child2 (p, q, r) REFERENCES parent (p, q, r) MATCH FULL ON DELETE SET NULL
scan child2
query T
EXPLAIN (OPT, CATALOG) SELECT * from parent
----
TABLE parent
├── p int not null
├── q int not null
├── r int not null
├── other int
├── crdb_internal_mvcc_timestamp decimal [hidden] [system]
├── tableoid oid [hidden] [system]
├── PRIMARY INDEX parent_pkey
│ ├── p int not null
│ ├── q int not null
│ └── r int not null
├── REFERENCED BY CONSTRAINT fk FOREIGN KEY child (p, q, r) REFERENCES parent (p, q, r)
└── REFERENCED BY CONSTRAINT fk FOREIGN KEY child2 (p, q, r) REFERENCES parent (p, q, r) MATCH FULL ON DELETE SET NULL
scan parent
| pkg/sql/opt/exec/execbuilder/testdata/catalog | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.00024066443438641727,
0.000174583779880777,
0.00016031222185119987,
0.00017271247634198517,
0.000017188911442644894
] |
{
"id": 0,
"code_window": [
"\t\tbackupManifest, memSize, reloadBackupErr = b.readManifestOnResume(ctx, &mem, p.ExecCfg(), defaultStore, details, p.User())\n",
"\t\tif reloadBackupErr != nil {\n",
"\t\t\treturn errors.Wrap(reloadBackupErr, \"could not reload backup manifest when retrying\")\n",
"\t\t}\n",
"\t}\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
"\t// We have exhausted retries, but we have not seen a \"PermanentBulkJobError\" so\n",
"\t// it is possible that this is a transient error that is taking longer than\n",
"\t// our configured retry to go away.\n",
"\t//\n",
"\t// Let's pause the job instead of failing it so that the user can decide\n",
"\t// whether to resume it or cancel it.\n"
],
"file_path": "pkg/ccl/backupccl/backup_job.go",
"type": "add",
"edit_start_line_idx": 584
} | - Single-Use Common Table Expressions
- Status: completed
- Start Date: 2017-11-30
- Authors: Jordan Lewis
- RFC PR: #20374
- Cockroach Issue: #7029
# Summary
Implement a simple subset of common table expressions that permits naming
result sets and using each of them at most once in a statement. Full support
for common table expression (henceforth CTEs) requires temporary table
infrastructure, which is currently missing from CockroachDB. This proposal aims
to fast-track the implementation of the subset of CTEs that doesn't require
temporary tables, providing our users with partial compatibility and query
readability boons at little cost.
The supported subset will be known as single-use CTEs, and consists of:
- Full `WITH ... AS` syntax support, with unlimited clauses
- Each clause can be referenced as a data source at most once
Features that are not present in single-use CTEs:
- Using a CTE more than once in a data source will not be included because
implementing it requires temporary tables
- Correlated CTEs will not be supported because we don't yet support correlated
subqueries in general.
- `WITH RECURSIVE` will not be included because implementing it requires
temporary tables and a complex new execution strategy.
# Motivation
Common table expressions (CTEs), or `WITH` clauses, are a standard SQL 1999
feature that permit naming the result sets of intermediate statements for use
in a single larger `SELECT`, `INSERT/UPSERT`, `UPDATE` or `DELETE` statement.
In full CTEs, the named statements can be referred to unlimited times
throughout the rest of the statement. To preserve the referential integrity of
the names of the statements, the execution engine must ensure that each impure
clause (either one that modifies data or one that uses an impure builtin
function) is not executed more than once. CTEs increase the expressivity of SQL
by adding some syntactic sugar and new access patterns:
1. Statements with many subqueries can be made more readable by extracting the
subqueries into named, top-level CTE clauses
2. The results of `INSERT/UPSERT/UPDATE/DELETE` statements that use `RETURNING`
can be captured in a named result set, which is not possible with ordinary
subqueries
3. Recursive statements for tasks like graph traversal can be written with a CTE
clause that references itself, using the `WITH RECURSIVE` syntax
4. Statements can reference a named CTE clause more than once, enabling patterns
that join complex datasets to themselves
CTEs are a frequently requested feature for CockroachDB, both for compatibility
and developer quality-of-life reasons. Implementing CTEs in full is quite
involved, because of the requirement that each CTE clause is referentially
transparent. Impure statements can have arbitrarily large result sets, so
temporarily accumulating these result sets for use by other statements requires
infrastructure for temporary tables to ensure that CockroachDB doesn't run out
of memory while executing them.
However, many CTE use cases only need the syntactic sugar provided in points 1
and 2 above. None of the queries mentioned in CockroachDB's CTE issue #7029 use
features missing from the proposed subset, for example. Also, several ORMs
including ActiveRecord use single-use CTEs as part of their schema introspection
routines.
Therefore, this proposal aims to implement just the syntactic sugar in points 1
and 2 above, additionally imposing the restriction that each CTE clause may not be
referred to as a data source more than once. We believe that providing this
subset of CTEs will be so beneficial for our users that we shouldn't delay
implementing the feature until after temporary tables are available at some
undetermined time in the future.
An
[initial implementation](https://github.com/cockroachdb/cockroach/pull/20359)
of this subset was straightforward and weighed in at less than 300 lines of new
non-test code.
# Guide-level explanation
Implementing this proposal would enable the `WITH` statement syntax for
unlimited named result sets per statement, as long as each result set is not
accessed more than once.
The following syntax sketch aims to show the salient features of the CTE syntax:
```
WITH name1 AS (<dml_with_results>),
name2 AS (<dml_with_results> that can reference name1),
name3 AS (<dml_with_results> that can reference name1 and name2),
...
nameN AS (<dml_with_results> that can reference all names above)
<dml> that can reference all names above
```
where `<dml_with_results>` is any `SELECT` or `INSERT`, `UPSERT`, `UPDATE` or
`DELETE` that uses the `RETURNING` clause, and `<dml>` is any of the above
statements with or without `RETURNING`.
The following example demonstrates the query-factoring capability of CTEs:
```
--- Original query:
INSERT INTO v SELECT * FROM
(SELECT c FROM u JOIN
(SELECT a, b FROM t WHERE a < 5) AS x(a, b)
ON u.a = x.a WHERE b > 10)
AS y
--- CTE'd equivalent of the above:
WITH x(a, b) AS (SELECT a, b FROM t WHERE a < 5),
y AS (SELECT c from u JOIN x ON u.a = x.a WHERE b > 10)
INSERT INTO v SELECT * from y
```
The second version is more readable, since the subquery nesting is replaced
with a lateral set of result set declarations.
Here's an example with `RETURNING` clauses in the CTE clauses:
```
WITH x AS (INSERT INTO t(a) VALUES(1) RETURNING a),
y(c) AS (DELETE FROM u WHERE a IN (x) RETURNING b),
z AS (SELECT a FROM v WHERE a < 10)
SELECT * FROM z JOIN y ON z.a = y.c;
```
In this example, the outputs of an `INSERT` and `DELETE` statement are each
used as a named result set, something that's not possible with ordinary
subqueries.
Each CTE clause can itself have nested `WITH` clauses or subqueries, in which
case the names from the outer CTE are still made available to the inner
queries. For example:
```
WITH x AS (SELECT c FROM a),
y AS (SELECT d FROM b WHERE e IN (SELECT p from c WHERE q IN x)
SELECT * FROM y WHERE d > 5;
```
In this case, the subquery in clause `y` can still reference the clause `x`
from the outer CTE.
Each clause can only reference named result sets that were defined before in
the statement, and clauses can't reference themselves. Additionally, each
result set can only be used as a data source once by subsequent CTE clauses and
the main statement clause. For example, the following CTEs would not be
supported by the proposal:
```
--- Sum the integers from 1 to 10
--- Not supported: clauses can't reference themselves.
WITH RECURSIVE t(n) AS (
SELECT 1
UNION ALL
SELECT n+1 FROM t
)
SELECT SUM(n) FROM t LIMIT 10;
--- Not supported: can't reference a clause more than once.
WITH x(a) AS (SELECT a FROM t),
y(b) AS (SELECT a + 1 FROM x)
SELECT * FROM x JOIN y ON x.a = y.b;
```
As a more realistic example, implementing this proposal would permit
CockroachDB to execute the complete sample query suggested by the popular CTE
blog post
[The Best Postgres Feature You're Not Using – CTEs Aka WITH Clauses](http://www.craigkerstiens.com/2013/11/18/best-postgres-feature-youre-not-using/):
```sql
--- Initial query to grab project title and tasks per user
WITH users_tasks AS (
SELECT
users.id as user_id,
users.email,
array_agg(tasks.name) as task_list,
projects.title
FROM
users,
tasks,
project
WHERE
users.id = tasks.user_id
projects.title = tasks.project_id
GROUP BY
users.email,
projects.title
),
--- Calculates the total tasks per each project
total_tasks_per_project AS (
SELECT
project_id,
count(*) as task_count
FROM tasks
GROUP BY project_id
),
--- Calculates the projects per each user
tasks_per_project_per_user AS (
SELECT
user_id,
project_id,
count(*) as task_count
FROM tasks
GROUP BY user_id, project_id
),
--- Gets user ids that have over 50% of tasks assigned
overloaded_users AS (
SELECT tasks_per_project_per_user.user_id,
FROM tasks_per_project_per_user,
total_tasks_per_project
WHERE tasks_per_project_per_user.task_count > (total_tasks_per_project / 2)
)
SELECT
email,
task_list,
title
FROM
users_tasks,
overloaded_users
WHERE
users_tasks.user_id = overloaded_users.user_id
```
This query is executable by the single-use CTE implementation since every named
result set is used at most once. As you can see, trying to represent this
massive query as a single nested set of subselects would be much more difficult
to read. This improvement in readability is a major reason why CTEs are
popular, and something that we could easily provide today without waiting for
temporary tables.
# Reference-level explanation
CockroachDB already supports using arbitrary plans as plan data sources, so the
bulk of the implementation of the single-use CTEs is adding syntactic sugar and
a name resolution mechanism for CTE subclauses.
Specifically, the planner will be augmented to include a naming environment
that is composed of one frame per CTE statement. Each frame will consist of a
mapping from CTE clause name to a query plan for executing the statement
corresponding to the name, as well as any column aliases the user might have
provided. The environment will be treated as a stack. Planning a CTE pushes
a new frame onto the stack, and finishing planning that CTE pops the frame.
Data source resolution will be augmented to search through the naming
environment from the top of the stack to the bottom before searching for
tables. CTE names can shadow table names and other CTE names in an outer scope,
but an individual CTE statement can't contain more than one clause with any
given name.
To enforce this proposal's restrictions, the naming environment will also
include a flag on each named clause that is set when it is used as a data
source in another clause or the main statement. This flag will allow the
planner to detect when a query tries to reference a table more than once and
return a suitable error.
Because of this proposal's restrictions, temporary table infrastructure is not
necessary as each CTE clause will stream its output to the plan that references
it just like an ordinary CockroachDB plan tree.
Performance of planning ordinary changes will not be meaningfully affected,
since the naming environment doesn't have to get checked if its empty.
## Drawbacks
Despite the fact that completing this proposal would provide strictly more
functionality to our users, it might be risky to ship an incomplete version of
common table expressions from an optics perspective. We wouldn't want to give
users the impression that we don't care about fully implementing features that
we claim to support.
This risk can be mitigated by setting expectations carefully in the docs and
feature release notes. As long as we don't claim to have full CTE support,
people won't be unduly surprised when they can't use some of the more complex
functionality that CTEs offer.
## Rationale and Alternatives
This design is a simple incremental step toward providing common table
expressions. Whether or not we choose to ship this partial CTE implementation,
it's a good idea to start with this simpler set of functionality to establish a
baseline for testing.
As an alternative, we could punt on CTEs entirely until temporary tables are
available for internal use, and then deliver a full implementation of CTEs all
at once.
The impact of waiting to implement this functionality is that we might turn
away potential users that expect to be able to use CTEs.
## Unresolved questions
None.
## Future work
Implementing the rest of CTEs has 2 stages: temporary storage to enable
multi-use clauses, and execution engine changes to enable `WITH RECURSIVE`.
Temporary storage may be less involved than fully implementing temporary
tables. DistSQL processors can be configured to use temporary storage in a
processor if necessary, so it's possible that all this will take is plugging
the temporary storage infrastructure into the local execution engine in front
of clauses that need to be used more than once, or waiting until the two
engines are merged.
`WITH RECURSIVE` will take some additional thought, and possibly another RFC.
| docs/RFCS/20171206_single_use_common_table_expressions.md | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.00017741952615324408,
0.0001674065861152485,
0.00016050692647695541,
0.00016720002167858183,
0.00000450154993814067
] |
{
"id": 0,
"code_window": [
"\t\tbackupManifest, memSize, reloadBackupErr = b.readManifestOnResume(ctx, &mem, p.ExecCfg(), defaultStore, details, p.User())\n",
"\t\tif reloadBackupErr != nil {\n",
"\t\t\treturn errors.Wrap(reloadBackupErr, \"could not reload backup manifest when retrying\")\n",
"\t\t}\n",
"\t}\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\n",
"\t// We have exhausted retries, but we have not seen a \"PermanentBulkJobError\" so\n",
"\t// it is possible that this is a transient error that is taking longer than\n",
"\t// our configured retry to go away.\n",
"\t//\n",
"\t// Let's pause the job instead of failing it so that the user can decide\n",
"\t// whether to resume it or cancel it.\n"
],
"file_path": "pkg/ccl/backupccl/backup_job.go",
"type": "add",
"edit_start_line_idx": 584
} | // Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package server
import (
"compress/gzip"
"net/http"
"sync"
)
// TODO(benesch): Use https://github.com/NYTimes/gziphandler instead.
// gzipResponseWriter reinvents the wheel and is not as robust.
type gzipResponseWriter struct {
gz gzip.Writer
http.ResponseWriter
}
// Allocation pool for gzipResponseWriters.
var gzipResponseWriterPool sync.Pool
func newGzipResponseWriter(rw http.ResponseWriter) *gzipResponseWriter {
var w *gzipResponseWriter
if wI := gzipResponseWriterPool.Get(); wI == nil {
w = new(gzipResponseWriter)
} else {
w = wI.(*gzipResponseWriter)
}
w.Reset(rw)
return w
}
func (w *gzipResponseWriter) Reset(rw http.ResponseWriter) {
w.gz.Reset(rw)
w.ResponseWriter = rw
}
func (w *gzipResponseWriter) Write(b []byte) (int, error) {
// The underlying http.ResponseWriter can't sniff gzipped data properly, so we
// do our own sniffing on the uncompressed data.
if w.Header().Get("Content-Type") == "" {
w.Header().Set("Content-Type", http.DetectContentType(b))
}
return w.gz.Write(b)
}
// Flush implements http.Flusher as required by grpc-gateway for clients
// which access streaming endpoints (as exercised by the acceptance tests
// at time of writing).
func (w *gzipResponseWriter) Flush() {
// If Flush returns an error, we'll see it on the next call to Write or
// Close as well, so we can ignore it here.
if err := w.gz.Flush(); err == nil {
// Flush the wrapped ResponseWriter as well, if possible.
if f, ok := w.ResponseWriter.(http.Flusher); ok {
f.Flush()
}
}
}
// Close implements the io.Closer interface. It is not safe to use the
// writer after calling Close.
func (w *gzipResponseWriter) Close() error {
err := w.gz.Close()
w.Reset(nil) // release ResponseWriter reference.
gzipResponseWriterPool.Put(w)
return err
}
| pkg/server/gzip_response_writer.go | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.0003600468044169247,
0.00019971400615759194,
0.00016582044190727174,
0.00017274652782361954,
0.00006205787212820724
] |
{
"id": 2,
"code_window": [
"\t\tlog.Warningf(restoreCtx, `encountered retryable error: %+v`, err)\n",
"\t}\n",
"\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t// We have exhausted retries, but we have not seen a \"PermanentBulkJobError\" so\n",
"\t// it is possible that this is a transient error that is taking longer than\n",
"\t// our configured retry to go away.\n",
"\t//\n",
"\t// Let's pause the job instead of failing it so that the user can decide\n",
"\t// whether to resume it or cancel it.\n"
],
"file_path": "pkg/ccl/backupccl/restore_job.go",
"type": "add",
"edit_start_line_idx": 177
} | // Copyright 2016 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package backupccl
import (
"context"
"fmt"
"net/url"
"path"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/build"
"github.com/cockroachdb/cockroach/pkg/ccl/utilccl"
"github.com/cockroachdb/cockroach/pkg/cloud"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/joberror"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/scheduledjobs"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/gogo/protobuf/types"
)
// BackupCheckpointInterval is the interval at which backup progress is saved
// to durable storage.
var BackupCheckpointInterval = time.Minute
// TestingShortBackupCheckpointInterval sets the BackupCheckpointInterval
// to a shorter interval for testing purposes, so we can see multiple
// checkpoints written without having extremely large backups. It returns
// a function which resets the checkpoint interval to the old interval.
func TestingShortBackupCheckpointInterval(oldInterval time.Duration) func() {
BackupCheckpointInterval = time.Millisecond * 10
return func() {
BackupCheckpointInterval = oldInterval
}
}
var forceReadBackupManifest = util.ConstantWithMetamorphicTestBool("backup-read-manifest", false)
func countRows(raw roachpb.BulkOpSummary, pkIDs map[uint64]bool) roachpb.RowCount {
res := roachpb.RowCount{DataSize: raw.DataSize}
for id, count := range raw.EntryCounts {
if _, ok := pkIDs[id]; ok {
res.Rows += count
} else {
res.IndexEntries += count
}
}
return res
}
// filterSpans returns the spans that represent the set difference
// (includes - excludes).
func filterSpans(includes []roachpb.Span, excludes []roachpb.Span) []roachpb.Span {
var cov roachpb.SpanGroup
cov.Add(includes...)
cov.Sub(excludes...)
return cov.Slice()
}
// clusterNodeCount returns the approximate number of nodes in the cluster.
func clusterNodeCount(gw gossip.OptionalGossip) (int, error) {
g, err := gw.OptionalErr(47970)
if err != nil {
return 0, err
}
var nodes int
err = g.IterateInfos(
gossip.KeyNodeIDPrefix, func(_ string, _ gossip.Info) error {
nodes++
return nil
},
)
if err != nil {
return 0, err
}
// If we somehow got 0 and return it, a caller may panic if they divide by
// such a nonsensical nodecount.
if nodes == 0 {
return 1, errors.New("failed to count nodes")
}
return nodes, nil
}
// backup exports a snapshot of every kv entry into ranged sstables.
//
// The output is an sstable per range with files in the following locations:
// - <dir>/<unique_int>.sst
// - <dir> is given by the user and may be cloud storage
// - Each file contains data for a key range that doesn't overlap with any other
// file.
func backup(
ctx context.Context,
execCtx sql.JobExecContext,
defaultURI string,
urisByLocalityKV map[string]string,
db *kv.DB,
settings *cluster.Settings,
defaultStore cloud.ExternalStorage,
storageByLocalityKV map[string]*roachpb.ExternalStorage,
job *jobs.Job,
backupManifest *BackupManifest,
makeExternalStorage cloud.ExternalStorageFactory,
encryption *jobspb.BackupEncryptionOptions,
statsCache *stats.TableStatisticsCache,
) (roachpb.RowCount, error) {
// TODO(dan): Figure out how permissions should work. #6713 is tracking this
// for grpc.
resumerSpan := tracing.SpanFromContext(ctx)
var lastCheckpoint time.Time
var completedSpans, completedIntroducedSpans []roachpb.Span
// TODO(benesch): verify these files, rather than accepting them as truth
// blindly.
// No concurrency yet, so these assignments are safe.
for _, file := range backupManifest.Files {
if file.StartTime.IsEmpty() && !file.EndTime.IsEmpty() {
completedIntroducedSpans = append(completedIntroducedSpans, file.Span)
} else {
completedSpans = append(completedSpans, file.Span)
}
}
// Subtract out any completed spans.
spans := filterSpans(backupManifest.Spans, completedSpans)
introducedSpans := filterSpans(backupManifest.IntroducedSpans, completedIntroducedSpans)
pkIDs := make(map[uint64]bool)
for i := range backupManifest.Descriptors {
if t, _, _, _ := descpb.FromDescriptor(&backupManifest.Descriptors[i]); t != nil {
pkIDs[roachpb.BulkOpSummaryID(uint64(t.ID), uint64(t.PrimaryIndex.ID))] = true
}
}
evalCtx := execCtx.ExtendedEvalContext()
dsp := execCtx.DistSQLPlanner()
// We don't return the compatible nodes here since PartitionSpans will
// filter out incompatible nodes.
planCtx, _, err := dsp.SetupAllNodesPlanning(ctx, evalCtx, execCtx.ExecCfg())
if err != nil {
return roachpb.RowCount{}, errors.Wrap(err, "failed to determine nodes on which to run")
}
backupSpecs, err := distBackupPlanSpecs(
ctx,
planCtx,
execCtx,
dsp,
int64(job.ID()),
spans,
introducedSpans,
pkIDs,
defaultURI,
urisByLocalityKV,
encryption,
roachpb.MVCCFilter(backupManifest.MVCCFilter),
backupManifest.StartTime,
backupManifest.EndTime,
)
if err != nil {
return roachpb.RowCount{}, err
}
numTotalSpans := 0
for _, spec := range backupSpecs {
numTotalSpans += len(spec.IntroducedSpans) + len(spec.Spans)
}
progressLogger := jobs.NewChunkProgressLogger(job, numTotalSpans, job.FractionCompleted(), jobs.ProgressUpdateOnly)
requestFinishedCh := make(chan struct{}, numTotalSpans) // enough buffer to never block
var jobProgressLoop func(ctx context.Context) error
if numTotalSpans > 0 {
jobProgressLoop = func(ctx context.Context) error {
// Currently the granularity of backup progress is the % of spans
// exported. Would improve accuracy if we tracked the actual size of each
// file.
return progressLogger.Loop(ctx, requestFinishedCh)
}
}
progCh := make(chan *execinfrapb.RemoteProducerMetadata_BulkProcessorProgress)
checkpointLoop := func(ctx context.Context) error {
// When a processor is done exporting a span, it will send a progress update
// to progCh.
defer close(requestFinishedCh)
var numBackedUpFiles int64
for progress := range progCh {
var progDetails BackupManifest_Progress
if err := types.UnmarshalAny(&progress.ProgressDetails, &progDetails); err != nil {
log.Errorf(ctx, "unable to unmarshal backup progress details: %+v", err)
}
if backupManifest.RevisionStartTime.Less(progDetails.RevStartTime) {
backupManifest.RevisionStartTime = progDetails.RevStartTime
}
for _, file := range progDetails.Files {
backupManifest.Files = append(backupManifest.Files, file)
backupManifest.EntryCounts.Add(file.EntryCounts)
numBackedUpFiles++
}
// Signal that an ExportRequest finished to update job progress.
for i := int32(0); i < progDetails.CompletedSpans; i++ {
requestFinishedCh <- struct{}{}
}
if timeutil.Since(lastCheckpoint) > BackupCheckpointInterval {
resumerSpan.RecordStructured(&BackupProgressTraceEvent{
TotalNumFiles: numBackedUpFiles,
TotalEntryCounts: backupManifest.EntryCounts,
RevisionStartTime: backupManifest.RevisionStartTime,
})
lastCheckpoint = timeutil.Now()
err := writeBackupManifestCheckpoint(
ctx, defaultURI, encryption, backupManifest, execCtx.ExecCfg(), execCtx.User(),
)
if err != nil {
log.Errorf(ctx, "unable to checkpoint backup descriptor: %+v", err)
}
if execCtx.ExecCfg().TestingKnobs.AfterBackupCheckpoint != nil {
execCtx.ExecCfg().TestingKnobs.AfterBackupCheckpoint()
}
}
}
return nil
}
resumerSpan.RecordStructured(&types.StringValue{Value: "starting DistSQL backup execution"})
runBackup := func(ctx context.Context) error {
return distBackup(
ctx,
execCtx,
planCtx,
dsp,
progCh,
backupSpecs,
)
}
if err := ctxgroup.GoAndWait(ctx, jobProgressLoop, checkpointLoop, runBackup); err != nil {
return roachpb.RowCount{}, errors.Wrapf(err, "exporting %d ranges", errors.Safe(numTotalSpans))
}
backupID := uuid.MakeV4()
backupManifest.ID = backupID
// Write additional partial descriptors to each node for partitioned backups.
if len(storageByLocalityKV) > 0 {
resumerSpan.RecordStructured(&types.StringValue{Value: "writing partition descriptors for partitioned backup"})
filesByLocalityKV := make(map[string][]BackupManifest_File)
for _, file := range backupManifest.Files {
filesByLocalityKV[file.LocalityKV] = append(filesByLocalityKV[file.LocalityKV], file)
}
nextPartitionedDescFilenameID := 1
for kv, conf := range storageByLocalityKV {
backupManifest.LocalityKVs = append(backupManifest.LocalityKVs, kv)
// Set a unique filename for each partition backup descriptor. The ID
// ensures uniqueness, and the kv string appended to the end is for
// readability.
filename := fmt.Sprintf("%s_%d_%s",
backupPartitionDescriptorPrefix, nextPartitionedDescFilenameID, sanitizeLocalityKV(kv))
nextPartitionedDescFilenameID++
backupManifest.PartitionDescriptorFilenames = append(backupManifest.PartitionDescriptorFilenames, filename)
desc := BackupPartitionDescriptor{
LocalityKV: kv,
Files: filesByLocalityKV[kv],
BackupID: backupID,
}
if err := func() error {
store, err := makeExternalStorage(ctx, *conf)
if err != nil {
return err
}
defer store.Close()
return writeBackupPartitionDescriptor(ctx, store, filename, encryption, &desc)
}(); err != nil {
return roachpb.RowCount{}, err
}
}
}
resumerSpan.RecordStructured(&types.StringValue{Value: "writing backup manifest"})
if err := writeBackupManifest(ctx, settings, defaultStore, backupManifestName, encryption, backupManifest); err != nil {
return roachpb.RowCount{}, err
}
var tableStatistics []*stats.TableStatisticProto
for i := range backupManifest.Descriptors {
if tbl, _, _, _ := descpb.FromDescriptor(&backupManifest.Descriptors[i]); tbl != nil {
tableDesc := tabledesc.NewBuilder(tbl).BuildImmutableTable()
// Collect all the table stats for this table.
tableStatisticsAcc, err := statsCache.GetTableStats(ctx, tableDesc)
if err != nil {
// Successfully backed up data is more valuable than table stats that can
// be recomputed after restore, and so if we fail to collect the stats of a
// table we do not want to mark the job as failed.
// The lack of stats on restore could lead to suboptimal performance when
// reading/writing to this table until the stats have been recomputed.
log.Warningf(ctx, "failed to collect stats for table: %s, "+
"table ID: %d during a backup: %s", tableDesc.GetName(), tableDesc.GetID(),
err.Error())
continue
}
for _, stat := range tableStatisticsAcc {
tableStatistics = append(tableStatistics, &stat.TableStatisticProto)
}
}
}
statsTable := StatsTable{
Statistics: tableStatistics,
}
resumerSpan.RecordStructured(&types.StringValue{Value: "writing backup table statistics"})
if err := writeTableStatistics(ctx, defaultStore, backupStatisticsFileName, encryption, &statsTable); err != nil {
return roachpb.RowCount{}, err
}
if writeMetadataSST.Get(&settings.SV) {
if err := writeBackupMetadataSST(ctx, defaultStore, encryption, backupManifest, tableStatistics); err != nil {
err = errors.Wrap(err, "writing forward-compat metadata sst")
if !build.IsRelease() {
return roachpb.RowCount{}, err
}
log.Warningf(ctx, "%+v", err)
}
}
return backupManifest.EntryCounts, nil
}
func releaseProtectedTimestamp(
ctx context.Context, txn *kv.Txn, pts protectedts.Storage, ptsID *uuid.UUID,
) error {
// If the job doesn't have a protected timestamp then there's nothing to do.
if ptsID == nil {
return nil
}
err := pts.Release(ctx, txn, *ptsID)
if errors.Is(err, protectedts.ErrNotExists) {
// No reason to return an error which might cause problems if it doesn't
// seem to exist.
log.Warningf(ctx, "failed to release protected which seems not to exist: %v", err)
err = nil
}
return err
}
type backupResumer struct {
job *jobs.Job
backupStats roachpb.RowCount
testingKnobs struct {
ignoreProtectedTimestamps bool
}
}
var _ jobs.TraceableJob = &backupResumer{}
// ForceRealSpan implements the TraceableJob interface.
func (b *backupResumer) ForceRealSpan() bool {
return true
}
// Resume is part of the jobs.Resumer interface.
func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error {
// The span is finished by the registry executing the job.
resumerSpan := tracing.SpanFromContext(ctx)
details := b.job.Details().(jobspb.BackupDetails)
p := execCtx.(sql.JobExecContext)
var backupManifest *BackupManifest
// If planning didn't resolve the external destination, then we need to now.
if details.URI == "" {
initialDetails := details
backupDetails, m, err := getBackupDetailAndManifest(
ctx, p.ExecCfg(), p.Txn(), details, p.User(),
)
if err != nil {
return err
}
details = backupDetails
backupManifest = &m
if len(backupManifest.Spans) > 0 && p.ExecCfg().Codec.ForSystemTenant() {
protectedtsID := uuid.MakeV4()
details.ProtectedTimestampRecord = &protectedtsID
if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
return protectTimestampForBackup(
ctx, p.ExecCfg(), txn, b.job.ID(), m, details,
)
}); err != nil {
return err
}
}
if err := writeBackupManifestCheckpoint(
ctx, details.URI, details.EncryptionOptions, backupManifest, p.ExecCfg(), p.User(),
); err != nil {
return err
}
if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
return planSchedulePTSChaining(ctx, p.ExecCfg(), txn, &details, b.job.CreatedBy())
}); err != nil {
return err
}
// The description picked during original planning might still say "LATEST",
// if resolving that to the actual directory only just happened above here.
// Ideally we'd re-render the description now that we know the subdir, but
// we don't have backup AST node anymore to easily call the rendering func.
// Instead we can just do a bit of dirty string replacement iff there is one
// "INTO 'LATEST' IN" (if there's >1, somenoe has a weird table/db names and
// we should just leave the description as-is, since it is just for humans).
description := b.job.Payload().Description
const unresolvedText = "INTO 'LATEST' IN"
if initialDetails.Destination.Subdir == "LATEST" && strings.Count(description, unresolvedText) == 1 {
description = strings.ReplaceAll(description, unresolvedText, fmt.Sprintf("INTO '%s' IN", details.Destination.Subdir))
}
// Update the job payload (non-volatile job definition) once, with the now
// resolved destination, updated description, etc. If we resume again we'll
// skip this whole block so this isn't an excessive update of payload.
if err := b.job.Update(ctx, nil, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error {
if err := md.CheckRunningOrReverting(); err != nil {
return err
}
md.Payload.Details = jobspb.WrapPayloadDetails(details)
md.Payload.Description = description
ju.UpdatePayload(md.Payload)
return nil
}); err != nil {
return err
}
// Collect telemetry, once per backup after resolving its destination.
lic := utilccl.CheckEnterpriseEnabled(
p.ExecCfg().Settings, p.ExecCfg().LogicalClusterID(), p.ExecCfg().Organization(), "",
) != nil
collectTelemetry(m, details, details, lic)
}
// For all backups, partitioned or not, the main BACKUP manifest is stored at
// details.URI.
defaultConf, err := cloud.ExternalStorageConfFromURI(details.URI, p.User())
if err != nil {
return errors.Wrapf(err, "export configuration")
}
defaultStore, err := p.ExecCfg().DistSQLSrv.ExternalStorage(ctx, defaultConf)
if err != nil {
return errors.Wrapf(err, "make storage")
}
defer defaultStore.Close()
// EncryptionInfo is non-nil only when new encryption information has been
// generated during BACKUP planning.
redactedURI := RedactURIForErrorMessage(details.URI)
if details.EncryptionInfo != nil {
if err := writeEncryptionInfoIfNotExists(ctx, details.EncryptionInfo,
defaultStore); err != nil {
return errors.Wrapf(err, "creating encryption info file to %s", redactedURI)
}
}
storageByLocalityKV := make(map[string]*roachpb.ExternalStorage)
for kv, uri := range details.URIsByLocalityKV {
conf, err := cloud.ExternalStorageConfFromURI(uri, p.User())
if err != nil {
return err
}
storageByLocalityKV[kv] = &conf
}
mem := p.ExecCfg().RootMemoryMonitor.MakeBoundAccount()
defer mem.Close(ctx)
var memSize int64
defer func() {
if memSize != 0 {
mem.Shrink(ctx, memSize)
}
}()
if backupManifest == nil || forceReadBackupManifest {
backupManifest, memSize, err = b.readManifestOnResume(ctx, &mem, p.ExecCfg(), defaultStore, details, p.User())
if err != nil {
return err
}
}
statsCache := p.ExecCfg().TableStatsCache
// We retry on pretty generic failures -- any rpc error. If a worker node were
// to restart, it would produce this kind of error, but there may be other
// errors that are also rpc errors. Don't retry to aggressively.
retryOpts := retry.Options{
MaxBackoff: 1 * time.Second,
MaxRetries: 5,
}
if err := p.ExecCfg().JobRegistry.CheckPausepoint("backup.before_flow"); err != nil {
return err
}
// We want to retry a backup if there are transient failures (i.e. worker nodes
// dying), so if we receive a retryable error, re-plan and retry the backup.
var res roachpb.RowCount
var retryCount int32
for r := retry.StartWithCtx(ctx, retryOpts); r.Next(); {
retryCount++
resumerSpan.RecordStructured(&roachpb.RetryTracingEvent{
Operation: "backupResumer.Resume",
AttemptNumber: retryCount,
RetryError: tracing.RedactAndTruncateError(err),
})
res, err = backup(
ctx,
p,
details.URI,
details.URIsByLocalityKV,
p.ExecCfg().DB,
p.ExecCfg().Settings,
defaultStore,
storageByLocalityKV,
b.job,
backupManifest,
p.ExecCfg().DistSQLSrv.ExternalStorage,
details.EncryptionOptions,
statsCache,
)
if err == nil {
break
}
if joberror.IsPermanentBulkJobError(err) {
return errors.Wrap(err, "failed to run backup")
}
log.Warningf(ctx, `BACKUP job encountered retryable error: %+v`, err)
// Reload the backup manifest to pick up any spans we may have completed on
// previous attempts.
var reloadBackupErr error
mem.Shrink(ctx, memSize)
memSize = 0
backupManifest, memSize, reloadBackupErr = b.readManifestOnResume(ctx, &mem, p.ExecCfg(), defaultStore, details, p.User())
if reloadBackupErr != nil {
return errors.Wrap(reloadBackupErr, "could not reload backup manifest when retrying")
}
}
if err != nil {
return errors.Wrap(err, "exhausted retries")
}
var backupDetails jobspb.BackupDetails
var ok bool
if backupDetails, ok = b.job.Details().(jobspb.BackupDetails); !ok {
return errors.Newf("unexpected job details type %T", b.job.Details())
}
if err := maybeUpdateSchedulePTSRecord(ctx, p.ExecCfg(), backupDetails, b.job.ID()); err != nil {
return err
}
if details.ProtectedTimestampRecord != nil && !b.testingKnobs.ignoreProtectedTimestamps {
if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
details := b.job.Details().(jobspb.BackupDetails)
return releaseProtectedTimestamp(ctx, txn, p.ExecCfg().ProtectedTimestampProvider,
details.ProtectedTimestampRecord)
}); err != nil {
log.Errorf(ctx, "failed to release protected timestamp: %v", err)
}
}
// If this is a full backup that was automatically nested in a collection of
// backups, record the path under which we wrote it to the LATEST file in the
// root of the collection. Note: this file *not* encrypted, as it only
// contains the name of another file that is in the same folder -- if you can
// get to this file to read it, you could already find its contents from the
// listing of the directory it is in -- it exists only to save us a
// potentially expensive listing of a giant backup collection to find the most
// recent completed entry.
if backupManifest.StartTime.IsEmpty() && details.CollectionURI != "" {
backupURI, err := url.Parse(details.URI)
if err != nil {
return err
}
collectionURI, err := url.Parse(details.CollectionURI)
if err != nil {
return err
}
suffix := strings.TrimPrefix(path.Clean(backupURI.Path), path.Clean(collectionURI.Path))
c, err := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, details.CollectionURI, p.User())
if err != nil {
return err
}
defer c.Close()
if err := writeNewLatestFile(ctx, p.ExecCfg().Settings, c, suffix); err != nil {
return err
}
}
b.backupStats = res
// Collect telemetry.
{
numClusterNodes, err := clusterNodeCount(p.ExecCfg().Gossip)
if err != nil {
if !build.IsRelease() && p.ExecCfg().Codec.ForSystemTenant() {
return err
}
log.Warningf(ctx, "unable to determine cluster node count: %v", err)
numClusterNodes = 1
}
telemetry.Count("backup.total.succeeded")
const mb = 1 << 20
sizeMb := res.DataSize / mb
sec := int64(timeutil.Since(timeutil.FromUnixMicros(b.job.Payload().StartedMicros)).Seconds())
var mbps int64
if sec > 0 {
mbps = mb / sec
}
if details.StartTime.IsEmpty() {
telemetry.CountBucketed("backup.duration-sec.full-succeeded", sec)
telemetry.CountBucketed("backup.size-mb.full", sizeMb)
telemetry.CountBucketed("backup.speed-mbps.full.total", mbps)
telemetry.CountBucketed("backup.speed-mbps.full.per-node", mbps/int64(numClusterNodes))
} else {
telemetry.CountBucketed("backup.duration-sec.inc-succeeded", sec)
telemetry.CountBucketed("backup.size-mb.inc", sizeMb)
telemetry.CountBucketed("backup.speed-mbps.inc.total", mbps)
telemetry.CountBucketed("backup.speed-mbps.inc.per-node", mbps/int64(numClusterNodes))
}
}
return b.maybeNotifyScheduledJobCompletion(ctx, jobs.StatusSucceeded, p.ExecCfg())
}
// ReportResults implements JobResultsReporter interface.
func (b *backupResumer) ReportResults(ctx context.Context, resultsCh chan<- tree.Datums) error {
select {
case <-ctx.Done():
return ctx.Err()
case resultsCh <- tree.Datums{
tree.NewDInt(tree.DInt(b.job.ID())),
tree.NewDString(string(jobs.StatusSucceeded)),
tree.NewDFloat(tree.DFloat(1.0)),
tree.NewDInt(tree.DInt(b.backupStats.Rows)),
tree.NewDInt(tree.DInt(b.backupStats.IndexEntries)),
tree.NewDInt(tree.DInt(b.backupStats.DataSize)),
}:
return nil
}
}
func (b *backupResumer) readManifestOnResume(
ctx context.Context,
mem *mon.BoundAccount,
cfg *sql.ExecutorConfig,
defaultStore cloud.ExternalStorage,
details jobspb.BackupDetails,
user security.SQLUsername,
) (*BackupManifest, int64, error) {
// We don't read the table descriptors from the backup descriptor, but
// they could be using either the new or the old foreign key
// representations. We should just preserve whatever representation the
// table descriptors were using and leave them alone.
desc, memSize, err := readBackupCheckpointManifest(ctx, mem, defaultStore, backupManifestCheckpointName,
details.EncryptionOptions)
if err != nil {
if !errors.Is(err, cloud.ErrFileDoesNotExist) {
return nil, 0, errors.Wrapf(err, "reading backup checkpoint")
}
// Try reading temp checkpoint.
tmpCheckpoint := tempCheckpointFileNameForJob(b.job.ID())
desc, memSize, err = readBackupCheckpointManifest(ctx, mem, defaultStore, tmpCheckpoint, details.EncryptionOptions)
if err != nil {
return nil, 0, err
}
// "Rename" temp checkpoint.
if err := writeBackupManifestCheckpoint(
ctx, details.URI, details.EncryptionOptions, &desc, cfg, user,
); err != nil {
mem.Shrink(ctx, memSize)
return nil, 0, errors.Wrapf(err, "renaming temp checkpoint file")
}
// Best effort remove temp checkpoint.
if err := defaultStore.Delete(ctx, tmpCheckpoint); err != nil {
log.Errorf(ctx, "error removing temporary checkpoint %s", tmpCheckpoint)
}
if err := defaultStore.Delete(ctx, backupProgressDirectory+"/"+tmpCheckpoint); err != nil {
log.Errorf(ctx, "error removing temporary checkpoint %s", backupProgressDirectory+"/"+tmpCheckpoint)
}
}
if !desc.ClusterID.Equal(cfg.LogicalClusterID()) {
mem.Shrink(ctx, memSize)
return nil, 0, errors.Newf("cannot resume backup started on another cluster (%s != %s)",
desc.ClusterID, cfg.LogicalClusterID())
}
return &desc, memSize, nil
}
func (b *backupResumer) maybeNotifyScheduledJobCompletion(
ctx context.Context, jobStatus jobs.Status, exec *sql.ExecutorConfig,
) error {
env := scheduledjobs.ProdJobSchedulerEnv
if knobs, ok := exec.DistSQLSrv.TestingKnobs.JobsTestingKnobs.(*jobs.TestingKnobs); ok {
if knobs.JobSchedulerEnv != nil {
env = knobs.JobSchedulerEnv
}
}
err := exec.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
// We cannot rely on b.job containing created_by_id because on job
// resumption the registry does not populate the resumer's CreatedByInfo.
datums, err := exec.InternalExecutor.QueryRowEx(
ctx,
"lookup-schedule-info",
txn,
sessiondata.InternalExecutorOverride{User: security.NodeUserName()},
fmt.Sprintf(
"SELECT created_by_id FROM %s WHERE id=$1 AND created_by_type=$2",
env.SystemJobsTableName()),
b.job.ID(), jobs.CreatedByScheduledJobs)
if err != nil {
return errors.Wrap(err, "schedule info lookup")
}
if datums == nil {
// Not a scheduled backup.
return nil
}
scheduleID := int64(tree.MustBeDInt(datums[0]))
if err := jobs.NotifyJobTermination(
ctx, env, b.job.ID(), jobStatus, b.job.Details(), scheduleID, exec.InternalExecutor, txn); err != nil {
return errors.Wrapf(err,
"failed to notify schedule %d of completion of job %d", scheduleID, b.job.ID())
}
return nil
})
return err
}
// OnFailOrCancel is part of the jobs.Resumer interface.
func (b *backupResumer) OnFailOrCancel(ctx context.Context, execCtx interface{}) error {
telemetry.Count("backup.total.failed")
telemetry.CountBucketed("backup.duration-sec.failed",
int64(timeutil.Since(timeutil.FromUnixMicros(b.job.Payload().StartedMicros)).Seconds()))
p := execCtx.(sql.JobExecContext)
cfg := p.ExecCfg()
b.deleteCheckpoint(ctx, cfg, p.User())
if err := cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
details := b.job.Details().(jobspb.BackupDetails)
return releaseProtectedTimestamp(ctx, txn, cfg.ProtectedTimestampProvider,
details.ProtectedTimestampRecord)
}); err != nil {
return err
}
// This should never return an error unless resolving the schedule that the
// job is being run under fails. This could happen if the schedule is dropped
// while the job is executing.
if err := b.maybeNotifyScheduledJobCompletion(ctx, jobs.StatusFailed,
execCtx.(sql.JobExecContext).ExecCfg()); err != nil {
log.Errorf(ctx, "failed to notify job %d on completion of OnFailOrCancel: %+v",
b.job.ID(), err)
}
return nil //nolint:returnerrcheck
}
func (b *backupResumer) deleteCheckpoint(
ctx context.Context, cfg *sql.ExecutorConfig, user security.SQLUsername,
) {
// Attempt to delete BACKUP-CHECKPOINT(s) in /progress directory.
if err := func() error {
details := b.job.Details().(jobspb.BackupDetails)
// For all backups, partitioned or not, the main BACKUP manifest is stored at
// details.URI.
exportStore, err := cfg.DistSQLSrv.ExternalStorageFromURI(ctx, details.URI, user)
if err != nil {
return err
}
defer exportStore.Close()
// We first attempt to delete from base directory to account for older
// backups, and then from the progress directory.
err = exportStore.Delete(ctx, backupManifestCheckpointName)
if err != nil {
log.Warningf(ctx, "unable to delete checkpointed backup descriptor file in base directory: %+v", err)
}
err = exportStore.Delete(ctx, backupManifestCheckpointName+backupManifestChecksumSuffix)
if err != nil {
log.Warningf(ctx, "unable to delete checkpoint checksum file in base directory: %+v", err)
}
// Delete will not delete a nonempty directory, so we have to go through
// all files and delete each file one by one.
return exportStore.List(ctx, backupProgressDirectory, "", func(p string) error {
return exportStore.Delete(ctx, backupProgressDirectory+p)
})
}(); err != nil {
log.Warningf(ctx, "unable to delete checkpointed backup descriptor file in progress directory: %+v", err)
}
}
var _ jobs.Resumer = &backupResumer{}
func init() {
jobs.RegisterConstructor(
jobspb.TypeBackup,
func(job *jobs.Job, _ *cluster.Settings) jobs.Resumer {
return &backupResumer{
job: job,
}
},
)
}
| pkg/ccl/backupccl/backup_job.go | 1 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.21975450217723846,
0.00416130805388093,
0.0001640632253838703,
0.0002160458534490317,
0.0241993460804224
] |
{
"id": 2,
"code_window": [
"\t\tlog.Warningf(restoreCtx, `encountered retryable error: %+v`, err)\n",
"\t}\n",
"\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t// We have exhausted retries, but we have not seen a \"PermanentBulkJobError\" so\n",
"\t// it is possible that this is a transient error that is taking longer than\n",
"\t// our configured retry to go away.\n",
"\t//\n",
"\t// Let's pause the job instead of failing it so that the user can decide\n",
"\t// whether to resume it or cancel it.\n"
],
"file_path": "pkg/ccl/backupccl/restore_job.go",
"type": "add",
"edit_start_line_idx": 177
} | statement ok
CREATE DATABASE d;
GRANT CREATE ON DATABASE d TO testuser
# By default, testuser should have ALL privileges on a sequences it creates.
user testuser
statement ok
USE d;
statement ok
CREATE SEQUENCE testuser_s;
query TTTTTB colnames
SHOW GRANTS ON testuser_s;
----
database_name schema_name table_name grantee privilege_type is_grantable
d public testuser_s admin ALL true
d public testuser_s root ALL true
d public testuser_s testuser ALL false
statement ok
ALTER DEFAULT PRIVILEGES REVOKE ALL ON SEQUENCES FROM testuser;
ALTER DEFAULT PRIVILEGES IN SCHEMA public REVOKE ALL ON SEQUENCES FROM testuser;
statement ok
CREATE SEQUENCE testuser_s2;
# Note that CREATE is still present for testuser due to our current inheritance
# behavior.
# TODO(richardjcai): Remove this when we remove our current inheritance logic.
query TTTTTB colnames
SHOW GRANTS ON testuser_s2
----
database_name schema_name table_name grantee privilege_type is_grantable
d public testuser_s2 admin ALL true
d public testuser_s2 root ALL true
user root
statement ok
USE test;
statement ok
CREATE USER testuser2
statement ok
ALTER DEFAULT PRIVILEGES GRANT ALL ON SEQUENCES TO testuser, testuser2
statement ok
CREATE SEQUENCE s
query TTTTTB colnames
SHOW GRANTS ON s
----
database_name schema_name table_name grantee privilege_type is_grantable
test public s admin ALL true
test public s root ALL true
test public s testuser ALL true
test public s testuser2 ALL true
# Sequence DEFAULT PRIVILEGES should be separate from tables.
statement ok
CREATE TABLE t()
query TTTTTB colnames
SHOW GRANTS ON t
----
database_name schema_name table_name grantee privilege_type is_grantable
test public t admin ALL true
test public t root ALL true
statement ok
ALTER DEFAULT PRIVILEGES REVOKE SELECT ON SEQUENCES FROM testuser, testuser2
statement ok
CREATE SEQUENCE s2
query TTTTTB colnames
SHOW GRANTS ON s2
----
database_name schema_name table_name grantee privilege_type is_grantable
test public s2 admin ALL true
test public s2 root ALL true
test public s2 testuser CREATE true
test public s2 testuser DELETE true
test public s2 testuser DROP true
test public s2 testuser GRANT true
test public s2 testuser INSERT true
test public s2 testuser UPDATE true
test public s2 testuser ZONECONFIG true
test public s2 testuser2 CREATE true
test public s2 testuser2 DELETE true
test public s2 testuser2 DROP true
test public s2 testuser2 GRANT true
test public s2 testuser2 INSERT true
test public s2 testuser2 UPDATE true
test public s2 testuser2 ZONECONFIG true
statement ok
ALTER DEFAULT PRIVILEGES REVOKE ALL ON SEQUENCES FROM testuser, testuser2
statement ok
CREATE SEQUENCE s3
query TTTTTB colnames
SHOW GRANTS ON s3
----
database_name schema_name table_name grantee privilege_type is_grantable
test public s3 admin ALL true
test public s3 root ALL true
statement ok
GRANT CREATE ON DATABASE d TO testuser
user testuser
statement ok
USE d
statement ok
ALTER DEFAULT PRIVILEGES FOR ROLE testuser REVOKE ALL ON SEQUENCES FROM testuser, testuser2
statement ok
CREATE SEQUENCE s4
# testuser still has CREATE due to "inheriting" it from the parent database.
query TTTTTB colnames
SHOW GRANTS ON s4
----
database_name schema_name table_name grantee privilege_type is_grantable
d public s4 admin ALL true
d public s4 root ALL true
user root
statement ok
USE d
statement ok
GRANT testuser TO root
statement ok
ALTER DEFAULT PRIVILEGES FOR ROLE testuser REVOKE ALL ON SEQUENCES FROM testuser, testuser2
user testuser
statement ok
USE d
statement ok
CREATE SEQUENCE s5
# testuser still has CREATE due to "inheriting" it from the parent database.
query TTTTTB colnames
SHOW GRANTS ON s5
----
database_name schema_name table_name grantee privilege_type is_grantable
d public s5 admin ALL true
d public s5 root ALL true
| pkg/sql/logictest/testdata/logic_test/alter_default_privileges_for_sequence | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.0017602049047127366,
0.0002694500726647675,
0.0001659955596551299,
0.0001692633086349815,
0.00038493459578603506
] |
{
"id": 2,
"code_window": [
"\t\tlog.Warningf(restoreCtx, `encountered retryable error: %+v`, err)\n",
"\t}\n",
"\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t// We have exhausted retries, but we have not seen a \"PermanentBulkJobError\" so\n",
"\t// it is possible that this is a transient error that is taking longer than\n",
"\t// our configured retry to go away.\n",
"\t//\n",
"\t// Let's pause the job instead of failing it so that the user can decide\n",
"\t// whether to resume it or cancel it.\n"
],
"file_path": "pkg/ccl/backupccl/restore_job.go",
"type": "add",
"edit_start_line_idx": 177
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "schematestutils",
srcs = ["schema_test_utils.go"],
importpath = "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/schemafeed/schematestutils",
visibility = ["//visibility:public"],
deps = [
"//pkg/sql/catalog",
"//pkg/sql/catalog/catpb",
"//pkg/sql/catalog/descpb",
"//pkg/sql/catalog/tabledesc",
"//pkg/sql/types",
"//pkg/util/hlc",
"@com_github_gogo_protobuf//proto",
],
)
| pkg/ccl/changefeedccl/schemafeed/schematestutils/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.00017109564214479178,
0.00017102863057516515,
0.0001709616044536233,
0.00017102863057516515,
6.701884558424354e-8
] |
{
"id": 2,
"code_window": [
"\t\tlog.Warningf(restoreCtx, `encountered retryable error: %+v`, err)\n",
"\t}\n",
"\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t// We have exhausted retries, but we have not seen a \"PermanentBulkJobError\" so\n",
"\t// it is possible that this is a transient error that is taking longer than\n",
"\t// our configured retry to go away.\n",
"\t//\n",
"\t// Let's pause the job instead of failing it so that the user can decide\n",
"\t// whether to resume it or cancel it.\n"
],
"file_path": "pkg/ccl/backupccl/restore_job.go",
"type": "add",
"edit_start_line_idx": 177
} | ## A simple txn writing at an older timestamp.
## This is expected to fail in error "Write too old".
# Prepare a newer write.
run ok
with t=A
txn_begin ts=44
put k=a v=abc resolve
txn_remove
----
>> at end:
data: "a"/44.000000000,0 -> /BYTES/abc
# Try an old write, expect an error.
run error
with t=A
txn_begin ts=33
del k=a
----
>> at end:
txn: "A" meta={id=00000000 key=/Min pri=0.00000000 epo=0 ts=33.000000000,0 min=0,0 seq=0} lock=true stat=PENDING rts=33.000000000,0 wto=false gul=0,0
meta: "a"/0,0 -> txn={id=00000000 key=/Min pri=0.00000000 epo=0 ts=44.000000000,1 min=0,0 seq=0} ts=44.000000000,1 del=true klen=12 vlen=0 mergeTs=<nil> txnDidNotUpdateMeta=true
data: "a"/44.000000000,1 -> /<empty>
data: "a"/44.000000000,0 -> /BYTES/abc
error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "a" at timestamp 33.000000000,0 too old; wrote at 44.000000000,1
run ok
resolve_intent t=A k=a status=ABORTED
----
>> at end:
data: "a"/44.000000000,0 -> /BYTES/abc
## A more complex txn writing at an older timestamp but with an uncertainty
## interval that extends past the value.
## This is expected to fail with a "write too old" error and NOT a "read within
## uncertainty interval" error.
run error
with t=B
txn_begin ts=33 globalUncertaintyLimit=55
cput k=a v=def
----
>> at end:
txn: "B" meta={id=00000000 key=/Min pri=0.00000000 epo=0 ts=33.000000000,0 min=0,0 seq=0} lock=true stat=PENDING rts=33.000000000,0 wto=false gul=55.000000000,0
meta: "a"/0,0 -> txn={id=00000000 key=/Min pri=0.00000000 epo=0 ts=44.000000000,1 min=0,0 seq=0} ts=44.000000000,1 del=false klen=12 vlen=8 mergeTs=<nil> txnDidNotUpdateMeta=true
data: "a"/44.000000000,1 -> /BYTES/def
data: "a"/44.000000000,0 -> /BYTES/abc
error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "a" at timestamp 33.000000000,0 too old; wrote at 44.000000000,1
run ok
resolve_intent t=B k=a status=ABORTED
----
>> at end:
data: "a"/44.000000000,0 -> /BYTES/abc
| pkg/storage/testdata/mvcc_histories/write_too_old | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.0001713558885967359,
0.00016723229782655835,
0.00016433550626970828,
0.00016666081501170993,
0.0000022641247596766334
] |
{
"id": 3,
"code_window": [
"\tif err != nil {\n",
"\t\treturn roachpb.RowCount{}, errors.Wrap(err, \"exhausted retries\")\n",
"\t}\n",
"\treturn res, nil\n",
"}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn res, jobs.MarkPauseRequestError(errors.Wrap(err, \"exhausted retries\"))\n"
],
"file_path": "pkg/ccl/backupccl/restore_job.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package importer
import (
"bytes"
"context"
"fmt"
"math"
"time"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/joberror"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descidgen"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/ingesting"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/rewrite"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/gcjob"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/ioctx"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/log/eventpb"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
)
type importResumer struct {
job *jobs.Job
settings *cluster.Settings
res roachpb.RowCount
testingKnobs struct {
afterImport func(summary roachpb.RowCount) error
alwaysFlushJobProgress bool
}
}
func (r *importResumer) TestingSetAfterImportKnob(fn func(summary roachpb.RowCount) error) {
r.testingKnobs.afterImport = fn
}
var _ jobs.TraceableJob = &importResumer{}
func (r *importResumer) ForceRealSpan() bool {
return true
}
var _ jobs.Resumer = &importResumer{}
var processorsPerNode = settings.RegisterIntSetting(
settings.TenantWritable,
"bulkio.import.processors_per_node",
"number of input processors to run on each sql instance", 1,
settings.PositiveInt,
)
type preparedSchemaMetadata struct {
schemaPreparedDetails jobspb.ImportDetails
schemaRewrites jobspb.DescRewriteMap
newSchemaIDToName map[descpb.ID]string
oldSchemaIDToName map[descpb.ID]string
queuedSchemaJobs []jobspb.JobID
}
// Resume is part of the jobs.Resumer interface.
func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error {
p := execCtx.(sql.JobExecContext)
if err := r.parseBundleSchemaIfNeeded(ctx, p); err != nil {
return err
}
details := r.job.Details().(jobspb.ImportDetails)
files := details.URIs
format := details.Format
tables := make(map[string]*execinfrapb.ReadImportDataSpec_ImportTable, len(details.Tables))
if details.Tables != nil {
// Skip prepare stage on job resumption, if it has already been completed.
if !details.PrepareComplete {
var schemaMetadata *preparedSchemaMetadata
if err := sql.DescsTxn(ctx, p.ExecCfg(), func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
var preparedDetails jobspb.ImportDetails
schemaMetadata = &preparedSchemaMetadata{
newSchemaIDToName: make(map[descpb.ID]string),
oldSchemaIDToName: make(map[descpb.ID]string),
}
var err error
curDetails := details
if len(details.Schemas) != 0 {
schemaMetadata, err = r.prepareSchemasForIngestion(ctx, p, curDetails, txn, descsCol)
if err != nil {
return err
}
curDetails = schemaMetadata.schemaPreparedDetails
}
if r.settings.Version.IsActive(ctx, clusterversion.PublicSchemasWithDescriptors) {
// In 22.1, the Public schema should always be present in the database.
// Make sure it is part of schemaMetadata, it is not guaranteed to
// be added in prepareSchemasForIngestion if we're not importing any
// schemas.
// The Public schema will not change in the database so both the
// oldSchemaIDToName and newSchemaIDToName entries will be the
// same for the Public schema.
_, dbDesc, err := descsCol.GetImmutableDatabaseByID(ctx, txn, details.ParentID, tree.DatabaseLookupFlags{Required: true})
if err != nil {
return err
}
schemaMetadata.oldSchemaIDToName[dbDesc.GetSchemaID(tree.PublicSchema)] = tree.PublicSchema
schemaMetadata.newSchemaIDToName[dbDesc.GetSchemaID(tree.PublicSchema)] = tree.PublicSchema
}
preparedDetails, err = r.prepareTablesForIngestion(ctx, p, curDetails, txn, descsCol,
schemaMetadata)
if err != nil {
return err
}
// Telemetry for multi-region.
for _, table := range preparedDetails.Tables {
_, dbDesc, err := descsCol.GetImmutableDatabaseByID(
ctx, txn, table.Desc.GetParentID(), tree.DatabaseLookupFlags{Required: true})
if err != nil {
return err
}
if dbDesc.IsMultiRegion() {
telemetry.Inc(sqltelemetry.ImportIntoMultiRegionDatabaseCounter)
}
}
// Update the job details now that the schemas and table descs have
// been "prepared".
return r.job.Update(ctx, txn, func(
txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater,
) error {
pl := md.Payload
*pl.GetImport() = preparedDetails
// Update the set of descriptors for later observability.
// TODO(ajwerner): Do we need this idempotence test?
prev := md.Payload.DescriptorIDs
if prev == nil {
var descriptorIDs []descpb.ID
for _, schema := range preparedDetails.Schemas {
descriptorIDs = append(descriptorIDs, schema.Desc.GetID())
}
for _, table := range preparedDetails.Tables {
descriptorIDs = append(descriptorIDs, table.Desc.GetID())
}
pl.DescriptorIDs = descriptorIDs
}
ju.UpdatePayload(pl)
return nil
})
}); err != nil {
return err
}
// Run the queued job which updates the database descriptor to contain the
// newly created schemas.
// NB: Seems like the registry eventually adopts the job anyways but this
// is in keeping with the semantics we use when creating a schema during
// sql execution. Namely, queue job in the txn which creates the schema
// desc and run once the txn has committed.
if err := p.ExecCfg().JobRegistry.Run(ctx, p.ExecCfg().InternalExecutor,
schemaMetadata.queuedSchemaJobs); err != nil {
return err
}
// Re-initialize details after prepare step.
details = r.job.Details().(jobspb.ImportDetails)
emitImportJobEvent(ctx, p, jobs.StatusRunning, r.job)
}
// Create a mapping from schemaID to schemaName.
schemaIDToName := make(map[descpb.ID]string)
for _, i := range details.Schemas {
schemaIDToName[i.Desc.GetID()] = i.Desc.GetName()
}
for _, i := range details.Tables {
var tableName string
if i.Name != "" {
tableName = i.Name
} else if i.Desc != nil {
tableName = i.Desc.Name
} else {
return errors.New("invalid table specification")
}
// If we are importing from PGDUMP, qualify the table name with the schema
// name since we support non-public schemas.
if details.Format.Format == roachpb.IOFileFormat_PgDump {
schemaName := tree.PublicSchema
if schema, ok := schemaIDToName[i.Desc.GetUnexposedParentSchemaID()]; ok {
schemaName = schema
}
tableName = fmt.Sprintf("%s.%s", schemaName, tableName)
}
tables[tableName] = &execinfrapb.ReadImportDataSpec_ImportTable{
Desc: i.Desc,
TargetCols: i.TargetCols,
}
}
}
typeDescs := make([]*descpb.TypeDescriptor, len(details.Types))
for i, t := range details.Types {
typeDescs[i] = t.Desc
}
// If details.Walltime is still 0, then it was not set during
// `prepareTablesForIngestion`. This indicates that we are in an IMPORT INTO,
// and that the walltime was not set in a previous run of IMPORT.
//
// In the case of importing into existing tables we must wait for all nodes
// to see the same version of the updated table descriptor, after which we
// shall chose a ts to import from.
if details.Walltime == 0 {
// Now that we know all the tables are offline, pick a walltime at which we
// will write.
details.Walltime = p.ExecCfg().Clock.Now().WallTime
// Check if the tables being imported into are starting empty, in which
// case we can cheaply clear-range instead of revert-range to cleanup.
for i := range details.Tables {
if !details.Tables[i].IsNew {
tblDesc := tabledesc.NewBuilder(details.Tables[i].Desc).BuildImmutableTable()
tblSpan := tblDesc.TableSpan(p.ExecCfg().Codec)
res, err := p.ExecCfg().DB.Scan(ctx, tblSpan.Key, tblSpan.EndKey, 1 /* maxRows */)
if err != nil {
return errors.Wrap(err, "checking if existing table is empty")
}
details.Tables[i].WasEmpty = len(res) == 0
}
}
if err := r.job.SetDetails(ctx, nil /* txn */, details); err != nil {
return err
}
}
procsPerNode := int(processorsPerNode.Get(&p.ExecCfg().Settings.SV))
res, err := ingestWithRetry(ctx, p, r.job, tables, typeDescs, files, format, details.Walltime,
r.testingKnobs.alwaysFlushJobProgress, procsPerNode)
if err != nil {
return err
}
pkIDs := make(map[uint64]struct{}, len(details.Tables))
for _, t := range details.Tables {
pkIDs[roachpb.BulkOpSummaryID(uint64(t.Desc.ID), uint64(t.Desc.PrimaryIndex.ID))] = struct{}{}
}
r.res.DataSize = res.DataSize
for id, count := range res.EntryCounts {
if _, ok := pkIDs[id]; ok {
r.res.Rows += count
} else {
r.res.IndexEntries += count
}
}
if r.testingKnobs.afterImport != nil {
if err := r.testingKnobs.afterImport(r.res); err != nil {
return err
}
}
if err := p.ExecCfg().JobRegistry.CheckPausepoint("import.after_ingest"); err != nil {
return err
}
if err := r.checkVirtualConstraints(ctx, p.ExecCfg(), r.job); err != nil {
return err
}
// If the table being imported into referenced UDTs, ensure that a concurrent
// schema change on any of the typeDescs has not modified the type descriptor. If
// it has, it is unsafe to import the data and we fail the import job.
if err := r.checkForUDTModification(ctx, p.ExecCfg()); err != nil {
return err
}
if err := r.publishSchemas(ctx, p.ExecCfg()); err != nil {
return err
}
if err := r.publishTables(ctx, p.ExecCfg(), res); err != nil {
return err
}
// As of 21.2 we do not write a protected timestamp record during IMPORT INTO.
// In case of a mixed version cluster with 21.1 and 21.2 nodes, it is possible
// that the job was planned on an older node and then resumed on a 21.2 node.
// Thus, we still need to clear the timestamp record that was written when the
// IMPORT INTO was planned on the older node.
//
// TODO(adityamaru): Remove in 22.1.
if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
return r.releaseProtectedTimestamp(ctx, txn, p.ExecCfg().ProtectedTimestampProvider)
}); err != nil {
log.Errorf(ctx, "failed to release protected timestamp: %v", err)
}
emitImportJobEvent(ctx, p, jobs.StatusSucceeded, r.job)
addToFileFormatTelemetry(details.Format.Format.String(), "succeeded")
telemetry.CountBucketed("import.rows", r.res.Rows)
const mb = 1 << 20
sizeMb := r.res.DataSize / mb
telemetry.CountBucketed("import.size-mb", sizeMb)
sec := int64(timeutil.Since(timeutil.FromUnixMicros(r.job.Payload().StartedMicros)).Seconds())
var mbps int64
if sec > 0 {
mbps = mb / sec
}
telemetry.CountBucketed("import.duration-sec.succeeded", sec)
telemetry.CountBucketed("import.speed-mbps", mbps)
// Tiny imports may skew throughput numbers due to overhead.
if sizeMb > 10 {
telemetry.CountBucketed("import.speed-mbps.over10mb", mbps)
}
return nil
}
// prepareTablesForIngestion prepares table descriptors for the ingestion
// step of import. The descriptors are in an IMPORTING state (offline) on
// successful completion of this method.
func (r *importResumer) prepareTablesForIngestion(
ctx context.Context,
p sql.JobExecContext,
details jobspb.ImportDetails,
txn *kv.Txn,
descsCol *descs.Collection,
schemaMetadata *preparedSchemaMetadata,
) (jobspb.ImportDetails, error) {
importDetails := details
importDetails.Tables = make([]jobspb.ImportDetails_Table, len(details.Tables))
newSchemaAndTableNameToIdx := make(map[string]int, len(importDetails.Tables))
var hasExistingTables bool
var err error
var newTableDescs []jobspb.ImportDetails_Table
var desc *descpb.TableDescriptor
for i, table := range details.Tables {
if !table.IsNew {
desc, err = prepareExistingTablesForIngestion(ctx, txn, descsCol, table.Desc)
if err != nil {
return importDetails, err
}
importDetails.Tables[i] = jobspb.ImportDetails_Table{
Desc: desc, Name: table.Name,
SeqVal: table.SeqVal,
IsNew: table.IsNew,
TargetCols: table.TargetCols,
}
hasExistingTables = true
} else {
// PGDUMP imports support non-public schemas.
// For the purpose of disambiguation we must take the schema into
// account when constructing the newTablenameToIdx map.
// At this point the table descriptor's parent schema ID has not being
// remapped to the newly generated schema ID.
key, err := constructSchemaAndTableKey(ctx, table.Desc, schemaMetadata.oldSchemaIDToName, p.ExecCfg().Settings.Version)
if err != nil {
return importDetails, err
}
newSchemaAndTableNameToIdx[key.String()] = i
// Make a deep copy of the table descriptor so that rewrites do not
// partially clobber the descriptor stored in details.
newTableDescs = append(newTableDescs,
*protoutil.Clone(&table).(*jobspb.ImportDetails_Table))
}
}
// Prepare the table descriptors for newly created tables being imported
// into.
//
// TODO(adityamaru): This is still unnecessarily complicated. If we can get
// the new table desc preparation to work on a per desc basis, rather than
// requiring all the newly created descriptors, then this can look like the
// call to prepareExistingTablesForIngestion. Currently, FK references
// misbehave when I tried to write the desc one at a time.
if len(newTableDescs) != 0 {
res, err := prepareNewTablesForIngestion(
ctx, txn, descsCol, p, newTableDescs, importDetails.ParentID, schemaMetadata.schemaRewrites)
if err != nil {
return importDetails, err
}
for _, desc := range res {
key, err := constructSchemaAndTableKey(ctx, desc, schemaMetadata.newSchemaIDToName, p.ExecCfg().Settings.Version)
if err != nil {
return importDetails, err
}
i := newSchemaAndTableNameToIdx[key.String()]
table := details.Tables[i]
importDetails.Tables[i] = jobspb.ImportDetails_Table{
Desc: desc,
Name: table.Name,
SeqVal: table.SeqVal,
IsNew: table.IsNew,
TargetCols: table.TargetCols,
}
}
}
importDetails.PrepareComplete = true
// If we do not have pending schema changes on existing descriptors we can
// choose our Walltime (to IMPORT from) immediately. Otherwise, we have to
// wait for all nodes to see the same descriptor version before doing so.
if !hasExistingTables {
importDetails.Walltime = p.ExecCfg().Clock.Now().WallTime
} else {
importDetails.Walltime = 0
}
return importDetails, nil
}
// prepareExistingTablesForIngestion prepares descriptors for existing tables
// being imported into.
func prepareExistingTablesForIngestion(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, desc *descpb.TableDescriptor,
) (*descpb.TableDescriptor, error) {
if len(desc.Mutations) > 0 {
return nil, errors.Errorf("cannot IMPORT INTO a table with schema changes in progress -- try again later (pending mutation %s)", desc.Mutations[0].String())
}
// Note that desc is just used to verify that the version matches.
importing, err := descsCol.GetMutableTableVersionByID(ctx, desc.ID, txn)
if err != nil {
return nil, err
}
// Ensure that the version of the table has not been modified since this
// job was created.
if got, exp := importing.Version, desc.Version; got != exp {
return nil, errors.Errorf("another operation is currently operating on the table")
}
// Take the table offline for import.
// TODO(dt): audit everywhere we get table descs (leases or otherwise) to
// ensure that filtering by state handles IMPORTING correctly.
importing.SetOffline("importing")
// TODO(dt): de-validate all the FKs.
if err := descsCol.WriteDesc(
ctx, false /* kvTrace */, importing, txn,
); err != nil {
return nil, err
}
return importing.TableDesc(), nil
}
// prepareNewTablesForIngestion prepares descriptors for newly created
// tables being imported into.
func prepareNewTablesForIngestion(
ctx context.Context,
txn *kv.Txn,
descsCol *descs.Collection,
p sql.JobExecContext,
importTables []jobspb.ImportDetails_Table,
parentID descpb.ID,
schemaRewrites jobspb.DescRewriteMap,
) ([]*descpb.TableDescriptor, error) {
newMutableTableDescriptors := make([]*tabledesc.Mutable, len(importTables))
for i := range importTables {
newMutableTableDescriptors[i] = tabledesc.NewBuilder(importTables[i].Desc).BuildCreatedMutableTable()
}
// Verification steps have passed, generate a new table ID if we're
// restoring. We do this last because we want to avoid calling
// GenerateUniqueDescID if there's any kind of error above.
// Reserving a table ID now means we can avoid the rekey work during restore.
//
// schemaRewrites may contain information which is used in rewrite.TableDescs
// to rewrite the parent schema ID in the table desc to point to the correct
// schema ID.
tableRewrites := schemaRewrites
if tableRewrites == nil {
tableRewrites = make(jobspb.DescRewriteMap)
}
seqVals := make(map[descpb.ID]int64, len(importTables))
for _, tableDesc := range importTables {
id, err := descidgen.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
if err != nil {
return nil, err
}
oldParentSchemaID := tableDesc.Desc.GetUnexposedParentSchemaID()
parentSchemaID := oldParentSchemaID
if rw, ok := schemaRewrites[oldParentSchemaID]; ok {
parentSchemaID = rw.ID
}
tableRewrites[tableDesc.Desc.ID] = &jobspb.DescriptorRewrite{
ID: id,
ParentSchemaID: parentSchemaID,
ParentID: parentID,
}
seqVals[id] = tableDesc.SeqVal
}
if err := rewrite.TableDescs(
newMutableTableDescriptors, tableRewrites, "",
); err != nil {
return nil, err
}
// After all of the ID's have been remapped, ensure that there aren't any name
// collisions with any importing tables.
for i := range newMutableTableDescriptors {
tbl := newMutableTableDescriptors[i]
err := descsCol.Direct().CheckObjectCollision(
ctx,
txn,
tbl.GetParentID(),
tbl.GetParentSchemaID(),
tree.NewUnqualifiedTableName(tree.Name(tbl.GetName())),
)
if err != nil {
return nil, err
}
}
// tableDescs contains the same slice as newMutableTableDescriptors but
// as tabledesc.TableDescriptor.
tableDescs := make([]catalog.TableDescriptor, len(newMutableTableDescriptors))
for i := range tableDescs {
newMutableTableDescriptors[i].SetOffline("importing")
tableDescs[i] = newMutableTableDescriptors[i]
}
var seqValKVs []roachpb.KeyValue
for _, desc := range newMutableTableDescriptors {
if v, ok := seqVals[desc.GetID()]; ok && v != 0 {
key, val, err := sql.MakeSequenceKeyVal(p.ExecCfg().Codec, desc, v, false)
if err != nil {
return nil, err
}
kv := roachpb.KeyValue{Key: key}
kv.Value.SetInt(val)
seqValKVs = append(seqValKVs, kv)
}
}
// Write the new TableDescriptors and flip the namespace entries over to
// them. After this call, any queries on a table will be served by the newly
// imported data.
if err := ingesting.WriteDescriptors(ctx, p.ExecCfg().Codec, txn, p.User(), descsCol,
nil /* databases */, nil, /* schemas */
tableDescs, nil, tree.RequestedDescriptors, seqValKVs, "" /* inheritParentName */); err != nil {
return nil, errors.Wrapf(err, "creating importTables")
}
newPreparedTableDescs := make([]*descpb.TableDescriptor, len(newMutableTableDescriptors))
for i := range newMutableTableDescriptors {
newPreparedTableDescs[i] = newMutableTableDescriptors[i].TableDesc()
}
return newPreparedTableDescs, nil
}
// prepareSchemasForIngestion is responsible for assigning the created schema
// descriptors actual IDs, updating the parent DB with references to the new
// schemas and writing the schema descriptors to disk.
func (r *importResumer) prepareSchemasForIngestion(
ctx context.Context,
p sql.JobExecContext,
details jobspb.ImportDetails,
txn *kv.Txn,
descsCol *descs.Collection,
) (*preparedSchemaMetadata, error) {
schemaMetadata := &preparedSchemaMetadata{
schemaPreparedDetails: details,
newSchemaIDToName: make(map[descpb.ID]string),
oldSchemaIDToName: make(map[descpb.ID]string),
}
schemaMetadata.schemaPreparedDetails.Schemas = make([]jobspb.ImportDetails_Schema,
len(details.Schemas))
desc, err := descsCol.GetMutableDescriptorByID(ctx, txn, details.ParentID)
if err != nil {
return nil, err
}
dbDesc, ok := desc.(*dbdesc.Mutable)
if !ok {
return nil, errors.Newf("expected ID %d to refer to the database being imported into",
details.ParentID)
}
schemaMetadata.schemaRewrites = make(jobspb.DescRewriteMap)
mutableSchemaDescs := make([]*schemadesc.Mutable, 0)
for _, desc := range details.Schemas {
schemaMetadata.oldSchemaIDToName[desc.Desc.GetID()] = desc.Desc.GetName()
newMutableSchemaDescriptor := schemadesc.NewBuilder(desc.Desc).BuildCreatedMutable().(*schemadesc.Mutable)
// Verification steps have passed, generate a new schema ID. We do this
// last because we want to avoid calling GenerateUniqueDescID if there's
// any kind of error in the prior stages of import.
id, err := descidgen.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
if err != nil {
return nil, err
}
newMutableSchemaDescriptor.Version = 1
newMutableSchemaDescriptor.ID = id
mutableSchemaDescs = append(mutableSchemaDescs, newMutableSchemaDescriptor)
schemaMetadata.newSchemaIDToName[id] = newMutableSchemaDescriptor.GetName()
// Update the parent database with this schema information.
dbDesc.AddSchemaToDatabase(newMutableSchemaDescriptor.Name,
descpb.DatabaseDescriptor_SchemaInfo{ID: newMutableSchemaDescriptor.ID})
schemaMetadata.schemaRewrites[desc.Desc.ID] = &jobspb.DescriptorRewrite{
ID: id,
}
}
// Queue a job to write the updated database descriptor.
schemaMetadata.queuedSchemaJobs, err = writeNonDropDatabaseChange(ctx, dbDesc, txn, descsCol, p,
fmt.Sprintf("updating parent database %s when importing new schemas", dbDesc.GetName()))
if err != nil {
return nil, err
}
// Finally create the schemas on disk.
for i, mutDesc := range mutableSchemaDescs {
nameKey := catalogkeys.MakeSchemaNameKey(p.ExecCfg().Codec, dbDesc.ID, mutDesc.GetName())
err = createSchemaDescriptorWithID(ctx, nameKey, mutDesc.ID, mutDesc, p, descsCol, txn)
if err != nil {
return nil, err
}
schemaMetadata.schemaPreparedDetails.Schemas[i] = jobspb.ImportDetails_Schema{
Desc: mutDesc.SchemaDesc(),
}
}
return schemaMetadata, err
}
// createSchemaDescriptorWithID writes a schema descriptor with `id` to disk.
func createSchemaDescriptorWithID(
ctx context.Context,
idKey roachpb.Key,
id descpb.ID,
descriptor catalog.Descriptor,
p sql.JobExecContext,
descsCol *descs.Collection,
txn *kv.Txn,
) error {
if descriptor.GetID() == descpb.InvalidID {
return errors.AssertionFailedf("cannot create descriptor with an empty ID: %v", descriptor)
}
if descriptor.GetID() != id {
return errors.AssertionFailedf("cannot create descriptor with an ID %v; expected ID %v; descriptor %v",
id, descriptor.GetID(), descriptor)
}
b := &kv.Batch{}
descID := descriptor.GetID()
if p.ExtendedEvalContext().Tracing.KVTracingEnabled() {
log.VEventf(ctx, 2, "CPut %s -> %d", idKey, descID)
}
b.CPut(idKey, descID, nil)
if err := descsCol.Direct().WriteNewDescToBatch(
ctx,
p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
b,
descriptor,
); err != nil {
return err
}
mutDesc, ok := descriptor.(catalog.MutableDescriptor)
if !ok {
return errors.Newf("unexpected type %T when creating descriptor", descriptor)
}
switch mutDesc.(type) {
case *schemadesc.Mutable:
if err := descsCol.AddUncommittedDescriptor(mutDesc); err != nil {
return err
}
default:
return errors.Newf("unexpected type %T when creating descriptor", mutDesc)
}
return txn.Run(ctx, b)
}
// parseBundleSchemaIfNeeded parses dump files (PGDUMP, MYSQLDUMP) for DDL
// statements and creates the relevant database, schema, table and type
// descriptors. Data from the dump files is ingested into these descriptors in
// the next phase of the import.
func (r *importResumer) parseBundleSchemaIfNeeded(ctx context.Context, phs interface{}) error {
p := phs.(sql.JobExecContext)
seqVals := make(map[descpb.ID]int64)
details := r.job.Details().(jobspb.ImportDetails)
skipFKs := details.SkipFKs
parentID := details.ParentID
files := details.URIs
format := details.Format
owner := r.job.Payload().UsernameProto.Decode()
p.SessionDataMutatorIterator().SetSessionDefaultIntSize(details.DefaultIntSize)
if details.ParseBundleSchema {
var span *tracing.Span
ctx, span = tracing.ChildSpan(ctx, "import-parsing-bundle-schema")
defer span.Finish()
if err := r.job.RunningStatus(ctx, nil /* txn */, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) {
return runningStatusImportBundleParseSchema, nil
}); err != nil {
return errors.Wrapf(err, "failed to update running status of job %d", errors.Safe(r.job.ID()))
}
var dbDesc catalog.DatabaseDescriptor
{
if err := sql.DescsTxn(ctx, p.ExecCfg(), func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) (err error) {
_, dbDesc, err = descriptors.GetImmutableDatabaseByID(ctx, txn, parentID, tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
})
if err != nil {
return err
}
return err
}); err != nil {
return err
}
}
var schemaDescs []*schemadesc.Mutable
var tableDescs []*tabledesc.Mutable
var err error
walltime := p.ExecCfg().Clock.Now().WallTime
if tableDescs, schemaDescs, err = parseAndCreateBundleTableDescs(
ctx, p, details, seqVals, skipFKs, dbDesc, files, format, walltime, owner,
r.job.ID()); err != nil {
return err
}
schemaDetails := make([]jobspb.ImportDetails_Schema, len(schemaDescs))
for i, schemaDesc := range schemaDescs {
schemaDetails[i] = jobspb.ImportDetails_Schema{Desc: schemaDesc.SchemaDesc()}
}
details.Schemas = schemaDetails
tableDetails := make([]jobspb.ImportDetails_Table, len(tableDescs))
for i, tableDesc := range tableDescs {
tableDetails[i] = jobspb.ImportDetails_Table{
Name: tableDesc.GetName(),
Desc: tableDesc.TableDesc(),
SeqVal: seqVals[tableDescs[i].ID],
IsNew: true,
}
}
details.Tables = tableDetails
for _, tbl := range tableDescs {
// For reasons relating to #37691, we disallow user defined types in
// the standard IMPORT case.
for _, col := range tbl.Columns {
if col.Type.UserDefined() {
return errors.Newf("IMPORT cannot be used with user defined types; use IMPORT INTO instead")
}
}
}
// Prevent job from redoing schema parsing and table desc creation
// on subsequent resumptions.
details.ParseBundleSchema = false
if err := r.job.SetDetails(ctx, nil /* txn */, details); err != nil {
return err
}
}
return nil
}
func getPublicSchemaDescForDatabase(
ctx context.Context, execCfg *sql.ExecutorConfig, db catalog.DatabaseDescriptor,
) (scDesc catalog.SchemaDescriptor, err error) {
if !execCfg.Settings.Version.IsActive(ctx, clusterversion.PublicSchemasWithDescriptors) {
return schemadesc.GetPublicSchema(), err
}
if err := sql.DescsTxn(ctx, execCfg, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) error {
publicSchemaID := db.GetSchemaID(tree.PublicSchema)
scDesc, err = descriptors.GetImmutableSchemaByID(ctx, txn, publicSchemaID, tree.SchemaLookupFlags{Required: true})
return err
}); err != nil {
return nil, err
}
return scDesc, nil
}
// parseAndCreateBundleTableDescs parses and creates the table
// descriptors for bundle formats.
func parseAndCreateBundleTableDescs(
ctx context.Context,
p sql.JobExecContext,
details jobspb.ImportDetails,
seqVals map[descpb.ID]int64,
skipFKs bool,
parentDB catalog.DatabaseDescriptor,
files []string,
format roachpb.IOFileFormat,
walltime int64,
owner security.SQLUsername,
jobID jobspb.JobID,
) ([]*tabledesc.Mutable, []*schemadesc.Mutable, error) {
var schemaDescs []*schemadesc.Mutable
var tableDescs []*tabledesc.Mutable
var tableName string
// A single table entry in the import job details when importing a bundle format
// indicates that we are performing a single table import.
// This info is populated during the planning phase.
if len(details.Tables) > 0 {
tableName = details.Tables[0].Name
}
store, err := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, files[0], p.User())
if err != nil {
return tableDescs, schemaDescs, err
}
defer store.Close()
raw, err := store.ReadFile(ctx, "")
if err != nil {
return tableDescs, schemaDescs, err
}
defer raw.Close(ctx)
reader, err := decompressingReader(ioctx.ReaderCtxAdapter(ctx, raw), files[0], format.Compression)
if err != nil {
return tableDescs, schemaDescs, err
}
defer reader.Close()
fks := fkHandler{skip: skipFKs, allowed: true, resolver: fkResolver{
tableNameToDesc: make(map[string]*tabledesc.Mutable),
}}
switch format.Format {
case roachpb.IOFileFormat_Mysqldump:
id, err := descidgen.PeekNextUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
if err != nil {
return tableDescs, schemaDescs, err
}
fks.resolver.format.Format = roachpb.IOFileFormat_Mysqldump
evalCtx := &p.ExtendedEvalContext().EvalContext
tableDescs, err = readMysqlCreateTable(
ctx, reader, evalCtx, p, id, parentDB, tableName, fks,
seqVals, owner, walltime,
)
if err != nil {
return tableDescs, schemaDescs, err
}
case roachpb.IOFileFormat_PgDump:
fks.resolver.format.Format = roachpb.IOFileFormat_PgDump
evalCtx := &p.ExtendedEvalContext().EvalContext
// Setup a logger to handle unsupported DDL statements in the PGDUMP file.
unsupportedStmtLogger := makeUnsupportedStmtLogger(ctx, p.User(), int64(jobID),
format.PgDump.IgnoreUnsupported, format.PgDump.IgnoreUnsupportedLog, schemaParsing,
p.ExecCfg().DistSQLSrv.ExternalStorage)
tableDescs, schemaDescs, err = readPostgresCreateTable(ctx, reader, evalCtx, p, tableName,
parentDB, walltime, fks, int(format.PgDump.MaxRowSize), owner, unsupportedStmtLogger)
logErr := unsupportedStmtLogger.flush()
if logErr != nil {
return nil, nil, logErr
}
default:
return tableDescs, schemaDescs, errors.Errorf(
"non-bundle format %q does not support reading schemas", format.Format.String())
}
if err != nil {
return tableDescs, schemaDescs, err
}
if tableDescs == nil && len(details.Tables) > 0 {
return tableDescs, schemaDescs, errors.Errorf("table definition not found for %q", tableName)
}
return tableDescs, schemaDescs, err
}
// publishTables updates the status of imported tables from OFFLINE to PUBLIC.
func (r *importResumer) publishTables(
ctx context.Context, execCfg *sql.ExecutorConfig, res roachpb.BulkOpSummary,
) error {
details := r.job.Details().(jobspb.ImportDetails)
// Tables should only be published once.
if details.TablesPublished {
return nil
}
// Write stub statistics for new tables created during the import. This should
// be sufficient until the CREATE STATISTICS run finishes.
r.writeStubStatisticsForImportedTables(ctx, execCfg, res)
log.Event(ctx, "making tables live")
err := sql.DescsTxn(ctx, execCfg, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
b := txn.NewBatch()
for _, tbl := range details.Tables {
newTableDesc, err := descsCol.GetMutableTableVersionByID(ctx, tbl.Desc.ID, txn)
if err != nil {
return err
}
newTableDesc.SetPublic()
if !tbl.IsNew {
// NB: This is not using AllNonDropIndexes or directly mutating the
// constraints returned by the other usual helpers because we need to
// replace the `OutboundFKs` and `Checks` slices of newTableDesc with copies
// that we can mutate. We need to do that because newTableDesc is a shallow
// copy of tbl.Desc that we'll be asserting is the current version when we
// CPut below.
//
// Set FK constraints to unvalidated before publishing the table imported
// into.
newTableDesc.OutboundFKs = make([]descpb.ForeignKeyConstraint, len(newTableDesc.OutboundFKs))
copy(newTableDesc.OutboundFKs, tbl.Desc.OutboundFKs)
for i := range newTableDesc.OutboundFKs {
newTableDesc.OutboundFKs[i].Validity = descpb.ConstraintValidity_Unvalidated
}
// Set CHECK constraints to unvalidated before publishing the table imported into.
for _, c := range newTableDesc.AllActiveAndInactiveChecks() {
c.Validity = descpb.ConstraintValidity_Unvalidated
}
}
// TODO(dt): re-validate any FKs?
if err := descsCol.WriteDescToBatch(
ctx, false /* kvTrace */, newTableDesc, b,
); err != nil {
return errors.Wrapf(err, "publishing table %d", newTableDesc.ID)
}
}
if err := txn.Run(ctx, b); err != nil {
return errors.Wrap(err, "publishing tables")
}
// Update job record to mark tables published state as complete.
details.TablesPublished = true
err := r.job.SetDetails(ctx, txn, details)
if err != nil {
return errors.Wrap(err, "updating job details after publishing tables")
}
return nil
})
if err != nil {
return err
}
// Initiate a run of CREATE STATISTICS. We don't know the actual number of
// rows affected per table, so we use a large number because we want to make
// sure that stats always get created/refreshed here.
for i := range details.Tables {
desc := tabledesc.NewBuilder(details.Tables[i].Desc).BuildImmutableTable()
execCfg.StatsRefresher.NotifyMutation(desc, math.MaxInt32 /* rowsAffected */)
}
return nil
}
// writeStubStatisticsForImportedTables writes "stub" statistics for new tables
// created during an import.
func (r *importResumer) writeStubStatisticsForImportedTables(
ctx context.Context, execCfg *sql.ExecutorConfig, res roachpb.BulkOpSummary,
) {
details := r.job.Details().(jobspb.ImportDetails)
for _, tbl := range details.Tables {
if tbl.IsNew {
desc := tabledesc.NewBuilder(tbl.Desc).BuildImmutableTable()
id := roachpb.BulkOpSummaryID(uint64(desc.GetID()), uint64(desc.GetPrimaryIndexID()))
rowCount := uint64(res.EntryCounts[id])
// TODO(michae2): collect distinct and null counts during import.
distinctCount := uint64(float64(rowCount) * memo.UnknownDistinctCountRatio)
nullCount := uint64(float64(rowCount) * memo.UnknownNullCountRatio)
avgRowSize := uint64(memo.UnknownAvgRowSize)
// Because we don't yet have real distinct and null counts, only produce
// single-column stats to avoid the appearance of perfectly correlated
// columns.
multiColEnabled := false
statistics, err := sql.StubTableStats(desc, jobspb.ImportStatsName, multiColEnabled)
if err == nil {
for _, statistic := range statistics {
statistic.RowCount = rowCount
statistic.DistinctCount = distinctCount
statistic.NullCount = nullCount
statistic.AvgSize = avgRowSize
}
// TODO(michae2): parallelize insertion of statistics.
err = stats.InsertNewStats(ctx, execCfg.Settings, execCfg.InternalExecutor, nil /* txn */, statistics)
}
if err != nil {
// Failure to create statistics should not fail the entire import.
log.Warningf(
ctx, "error while creating statistics during import of %q: %v",
desc.GetName(), err,
)
}
}
}
}
// publishSchemas updates the status of imported schemas from OFFLINE to PUBLIC.
func (r *importResumer) publishSchemas(ctx context.Context, execCfg *sql.ExecutorConfig) error {
details := r.job.Details().(jobspb.ImportDetails)
// Schemas should only be published once.
if details.SchemasPublished {
return nil
}
log.Event(ctx, "making schemas live")
return sql.DescsTxn(ctx, execCfg, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
b := txn.NewBatch()
for _, schema := range details.Schemas {
newDesc, err := descsCol.GetMutableDescriptorByID(ctx, txn, schema.Desc.GetID())
if err != nil {
return err
}
newSchemaDesc, ok := newDesc.(*schemadesc.Mutable)
if !ok {
return errors.Newf("expected schema descriptor with ID %v, got %v",
schema.Desc.GetID(), newDesc)
}
newSchemaDesc.SetPublic()
if err := descsCol.WriteDescToBatch(
ctx, false /* kvTrace */, newSchemaDesc, b,
); err != nil {
return errors.Wrapf(err, "publishing schema %d", newSchemaDesc.ID)
}
}
if err := txn.Run(ctx, b); err != nil {
return errors.Wrap(err, "publishing schemas")
}
// Update job record to mark tables published state as complete.
details.SchemasPublished = true
err := r.job.SetDetails(ctx, txn, details)
if err != nil {
return errors.Wrap(err, "updating job details after publishing schemas")
}
return nil
})
}
// checkVirtualConstraints checks constraints that are enforced via runtime
// checks, such as uniqueness checks that are not directly backed by an index.
func (*importResumer) checkVirtualConstraints(
ctx context.Context, execCfg *sql.ExecutorConfig, job *jobs.Job,
) error {
for _, tbl := range job.Details().(jobspb.ImportDetails).Tables {
desc := tabledesc.NewBuilder(tbl.Desc).BuildExistingMutableTable()
desc.SetPublic()
if sql.HasVirtualUniqueConstraints(desc) {
if err := job.RunningStatus(ctx, nil /* txn */, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) {
return jobs.RunningStatus(fmt.Sprintf("re-validating %s", desc.GetName())), nil
}); err != nil {
return errors.Wrapf(err, "failed to update running status of job %d", errors.Safe(job.ID()))
}
}
if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
ie := execCfg.InternalExecutorFactory(ctx, sql.NewFakeSessionData(execCfg.SV()))
return ie.WithSyntheticDescriptors([]catalog.Descriptor{desc}, func() error {
return sql.RevalidateUniqueConstraintsInTable(ctx, txn, ie, desc)
})
}); err != nil {
return err
}
}
return nil
}
// checkForUDTModification checks whether any of the types referenced by the
// table being imported into have been modified incompatibly since they were
// read during import planning. If they have, it may be unsafe to continue
// with the import since we could be ingesting data that is no longer valid
// for the type.
//
// Egs: Renaming an enum value mid import could result in the import ingesting a
// value that is no longer valid.
//
// TODO(SQL Schema): This method might be unnecessarily aggressive in failing
// the import. The semantics of what concurrent type changes are/are not safe
// during an IMPORT still need to be ironed out. Once they are, we can make this
// method more conservative in what it uses to deem a type change dangerous. At
// the time of writing, changes to privileges and back-references are supported.
// Additions of new values could be supported but are not. Renaming of logical
// enum values or removal of enum values will need to forever remain
// incompatible.
func (r *importResumer) checkForUDTModification(
ctx context.Context, execCfg *sql.ExecutorConfig,
) error {
details := r.job.Details().(jobspb.ImportDetails)
if details.Types == nil {
return nil
}
// typeDescsAreEquivalent returns true if a and b are the same types save
// for the version, modification time, privileges, or the set of referencing
// descriptors.
typeDescsAreEquivalent := func(a, b *descpb.TypeDescriptor) (bool, error) {
clearIgnoredFields := func(d *descpb.TypeDescriptor) *descpb.TypeDescriptor {
d = protoutil.Clone(d).(*descpb.TypeDescriptor)
d.ModificationTime = hlc.Timestamp{}
d.Privileges = nil
d.Version = 0
d.ReferencingDescriptorIDs = nil
return d
}
aData, err := protoutil.Marshal(clearIgnoredFields(a))
if err != nil {
return false, err
}
bData, err := protoutil.Marshal(clearIgnoredFields(b))
if err != nil {
return false, err
}
return bytes.Equal(aData, bData), nil
}
// checkTypeIsEquivalent checks that the current version of the type as
// retrieved from the collection is equivalent to the previously saved
// type descriptor used by the import.
checkTypeIsEquivalent := func(
ctx context.Context, txn *kv.Txn, col *descs.Collection,
savedTypeDesc *descpb.TypeDescriptor,
) error {
typeDesc, err := col.Direct().MustGetTypeDescByID(ctx, txn, savedTypeDesc.GetID())
if err != nil {
return errors.Wrap(err, "resolving type descriptor when checking version mismatch")
}
if typeDesc.GetModificationTime() == savedTypeDesc.GetModificationTime() {
return nil
}
equivalent, err := typeDescsAreEquivalent(typeDesc.TypeDesc(), savedTypeDesc)
if err != nil {
return errors.NewAssertionErrorWithWrappedErrf(
err, "failed to check for type descriptor equivalence for type %q (%d)",
typeDesc.GetName(), typeDesc.GetID())
}
if equivalent {
return nil
}
return errors.WithHint(
errors.Newf(
"type descriptor %q (%d) has been modified, potentially incompatibly,"+
" since import planning; aborting to avoid possible corruption",
typeDesc.GetName(), typeDesc.GetID(),
),
"retrying the IMPORT operation may succeed if the operation concurrently"+
" modifying the descriptor does not reoccur during the retry attempt",
)
}
checkTypesAreEquivalent := func(
ctx context.Context, txn *kv.Txn, col *descs.Collection,
) error {
for _, savedTypeDesc := range details.Types {
if err := checkTypeIsEquivalent(
ctx, txn, col, savedTypeDesc.Desc,
); err != nil {
return err
}
}
return nil
}
return sql.DescsTxn(ctx, execCfg, checkTypesAreEquivalent)
}
func ingestWithRetry(
ctx context.Context,
execCtx sql.JobExecContext,
job *jobs.Job,
tables map[string]*execinfrapb.ReadImportDataSpec_ImportTable,
typeDescs []*descpb.TypeDescriptor,
from []string,
format roachpb.IOFileFormat,
walltime int64,
alwaysFlushProgress bool,
procsPerNode int,
) (roachpb.BulkOpSummary, error) {
resumerSpan := tracing.SpanFromContext(ctx)
// We retry on pretty generic failures -- any rpc error. If a worker node were
// to restart, it would produce this kind of error, but there may be other
// errors that are also rpc errors. Don't retry to aggressively.
retryOpts := retry.Options{
MaxBackoff: 1 * time.Second,
MaxRetries: 5,
}
// We want to retry an import if there are transient failures (i.e. worker
// nodes dying), so if we receive a retryable error, re-plan and retry the
// import.
var res roachpb.BulkOpSummary
var err error
var retryCount int32
for r := retry.StartWithCtx(ctx, retryOpts); r.Next(); {
for {
retryCount++
resumerSpan.RecordStructured(&roachpb.RetryTracingEvent{
Operation: "importResumer.ingestWithRetry",
AttemptNumber: retryCount,
RetryError: tracing.RedactAndTruncateError(err),
})
res, err = distImport(ctx, execCtx, job, tables, typeDescs, from, format, walltime,
alwaysFlushProgress, procsPerNode)
// Replanning errors should not count towards retry limits.
if err == nil || !errors.Is(err, sql.ErrPlanChanged) {
break
}
}
if err == nil {
break
}
if errors.HasType(err, &roachpb.InsufficientSpaceError{}) {
return res, jobs.MarkPauseRequestError(errors.UnwrapAll(err))
}
if joberror.IsPermanentBulkJobError(err) {
return res, err
}
// Re-load the job in order to update our progress object, which may have
// been updated by the changeFrontier processor since the flow started.
reloadedJob, reloadErr := execCtx.ExecCfg().JobRegistry.LoadClaimedJob(ctx, job.ID())
if reloadErr != nil {
if ctx.Err() != nil {
return res, ctx.Err()
}
log.Warningf(ctx, `IMPORT job %d could not reload job progress when retrying: %+v`,
int64(job.ID()), reloadErr)
} else {
job = reloadedJob
}
log.Warningf(ctx, `encountered retryable error: %+v`, err)
}
if err != nil {
return roachpb.BulkOpSummary{}, errors.Wrap(err, "exhausted retries")
}
return res, nil
}
// emitImportJobEvent emits an import job event to the event log.
func emitImportJobEvent(
ctx context.Context, p sql.JobExecContext, status jobs.Status, job *jobs.Job,
) {
var importEvent eventpb.Import
if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
return sql.LogEventForJobs(ctx, p.ExecCfg(), txn, &importEvent, int64(job.ID()),
job.Payload(), p.User(), status)
}); err != nil {
log.Warningf(ctx, "failed to log event: %v", err)
}
}
func constructSchemaAndTableKey(
ctx context.Context,
tableDesc *descpb.TableDescriptor,
schemaIDToName map[descpb.ID]string,
version clusterversion.Handle,
) (schemaAndTableName, error) {
if !version.IsActive(ctx, clusterversion.PublicSchemasWithDescriptors) {
if tableDesc.UnexposedParentSchemaID == keys.PublicSchemaIDForBackup {
return schemaAndTableName{schema: "", table: tableDesc.GetName()}, nil
}
}
schemaName, ok := schemaIDToName[tableDesc.GetUnexposedParentSchemaID()]
if !ok && schemaName != tree.PublicSchema {
return schemaAndTableName{}, errors.Newf("invalid parent schema %s with ID %d for table %s",
schemaName, tableDesc.UnexposedParentSchemaID, tableDesc.GetName())
}
return schemaAndTableName{schema: schemaName, table: tableDesc.GetName()}, nil
}
func writeNonDropDatabaseChange(
ctx context.Context,
desc *dbdesc.Mutable,
txn *kv.Txn,
descsCol *descs.Collection,
p sql.JobExecContext,
jobDesc string,
) ([]jobspb.JobID, error) {
var job *jobs.Job
var err error
if job, err = createNonDropDatabaseChangeJob(p.User(), desc.ID, jobDesc, p, txn); err != nil {
return nil, err
}
queuedJob := []jobspb.JobID{job.ID()}
b := txn.NewBatch()
err = descsCol.WriteDescToBatch(
ctx,
p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
desc,
b,
)
if err != nil {
return nil, err
}
return queuedJob, txn.Run(ctx, b)
}
func createNonDropDatabaseChangeJob(
user security.SQLUsername,
databaseID descpb.ID,
jobDesc string,
p sql.JobExecContext,
txn *kv.Txn,
) (*jobs.Job, error) {
jobRecord := jobs.Record{
Description: jobDesc,
Username: user,
Details: jobspb.SchemaChangeDetails{
DescID: databaseID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
jobID := p.ExecCfg().JobRegistry.MakeJobID()
return p.ExecCfg().JobRegistry.CreateJobWithTxn(
p.ExtendedEvalContext().Context,
jobRecord,
jobID,
txn,
)
}
// OnFailOrCancel is part of the jobs.Resumer interface. Removes data that has
// been committed from a import that has failed or been canceled. It does this
// by adding the table descriptors in DROP state, which causes the schema change
// stuff to delete the keys in the background.
func (r *importResumer) OnFailOrCancel(ctx context.Context, execCtx interface{}) error {
p := execCtx.(sql.JobExecContext)
// Emit to the event log that the job has started reverting.
emitImportJobEvent(ctx, p, jobs.StatusReverting, r.job)
details := r.job.Details().(jobspb.ImportDetails)
addToFileFormatTelemetry(details.Format.Format.String(), "failed")
cfg := execCtx.(sql.JobExecContext).ExecCfg()
var jobsToRunAfterTxnCommit []jobspb.JobID
if err := sql.DescsTxn(ctx, cfg, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
if err := r.dropTables(ctx, txn, descsCol, cfg); err != nil {
return err
}
// Drop all the schemas which may have been created during a bundle import.
// These schemas should now be empty as all the tables in them would be new
// tables created during the import, and therefore dropped by the above
// dropTables method. This allows us to avoid "collecting" objects in the
// schema before dropping the descriptor.
var err error
jobsToRunAfterTxnCommit, err = r.dropSchemas(ctx, txn, descsCol, cfg, p)
if err != nil {
return err
}
// TODO(adityamaru): Remove in 22.1 since we do not write PTS records during
// IMPORT INTO from 21.2+.
return r.releaseProtectedTimestamp(ctx, txn, cfg.ProtectedTimestampProvider)
}); err != nil {
return err
}
// Run any jobs which might have been queued when dropping the schemas.
// This would be a job to drop all the schemas, and a job to update the parent
// database descriptor.
if len(jobsToRunAfterTxnCommit) != 0 {
if err := p.ExecCfg().JobRegistry.Run(ctx, p.ExecCfg().InternalExecutor,
jobsToRunAfterTxnCommit); err != nil {
return errors.Wrap(err, "failed to run jobs that drop the imported schemas")
}
}
// Emit to the event log that the job has completed reverting.
emitImportJobEvent(ctx, p, jobs.StatusFailed, r.job)
return nil
}
// dropTables implements the OnFailOrCancel logic.
func (r *importResumer) dropTables(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, execCfg *sql.ExecutorConfig,
) error {
details := r.job.Details().(jobspb.ImportDetails)
dropTime := int64(1)
// If the prepare step of the import job was not completed then the
// descriptors do not need to be rolled back as the txn updating them never
// completed.
if !details.PrepareComplete {
return nil
}
var revert []catalog.TableDescriptor
var empty []catalog.TableDescriptor
for _, tbl := range details.Tables {
if !tbl.IsNew {
desc, err := descsCol.GetMutableTableVersionByID(ctx, tbl.Desc.ID, txn)
if err != nil {
return err
}
imm := desc.ImmutableCopy().(catalog.TableDescriptor)
if tbl.WasEmpty {
empty = append(empty, imm)
} else {
revert = append(revert, imm)
}
}
}
// The walltime can be 0 if there is a failure between publishing the tables
// as OFFLINE and then choosing a ingestion timestamp. This might happen
// while waiting for the descriptor version to propagate across the cluster
// for example.
//
// In this case, we don't want to rollback the data since data ingestion has
// not yet begun (since we have not chosen a timestamp at which to ingest.)
if details.Walltime != 0 && len(revert) > 0 {
// NB: if a revert fails it will abort the rest of this failure txn, which is
// also what brings tables back online. We _could_ change the error handling
// or just move the revert into Resume()'s error return path, however it isn't
// clear that just bringing a table back online with partially imported data
// that may or may not be partially reverted is actually a good idea. It seems
// better to do the revert here so that the table comes back if and only if,
// it was rolled back to its pre-IMPORT state, and instead provide a manual
// admin knob (e.g. ALTER TABLE REVERT TO SYSTEM TIME) if anything goes wrong.
ts := hlc.Timestamp{WallTime: details.Walltime}.Prev()
// disallowShadowingBelow=writeTS used to write means no existing keys could
// have been covered by a key imported and the table was offline to other
// writes, so even if GC has run it would not have GC'ed any keys to which
// we need to revert, so we can safely ignore the target-time GC check.
const ignoreGC = true
if err := sql.RevertTables(ctx, txn.DB(), execCfg, revert, ts, ignoreGC, sql.RevertTableDefaultBatchSize); err != nil {
return errors.Wrap(err, "rolling back partially completed IMPORT")
}
}
for i := range empty {
// Set a DropTime on the table descriptor to differentiate it from an
// older-format (v1.1) descriptor. This enables ClearTableData to use a
// RangeClear for faster data removal, rather than removing by chunks.
empty[i].TableDesc().DropTime = dropTime
if err := gcjob.ClearTableData(
ctx, execCfg.DB, execCfg.DistSender, execCfg.Codec, &execCfg.Settings.SV, empty[i],
); err != nil {
return errors.Wrapf(err, "clearing data for table %d", empty[i].GetID())
}
}
b := txn.NewBatch()
tablesToGC := make([]descpb.ID, 0, len(details.Tables))
toWrite := make([]*tabledesc.Mutable, 0, len(details.Tables))
for _, tbl := range details.Tables {
newTableDesc, err := descsCol.GetMutableTableVersionByID(ctx, tbl.Desc.ID, txn)
if err != nil {
return err
}
if tbl.IsNew {
newTableDesc.SetDropped()
// If the DropTime if set, a table uses RangeClear for fast data removal. This
// operation starts at DropTime + the GC TTL. If we used now() here, it would
// not clean up data until the TTL from the time of the error. Instead, use 1
// (that is, 1ns past the epoch) to allow this to be cleaned up as soon as
// possible. This is safe since the table data was never visible to users,
// and so we don't need to preserve MVCC semantics.
newTableDesc.DropTime = dropTime
b.Del(catalogkeys.EncodeNameKey(execCfg.Codec, newTableDesc))
tablesToGC = append(tablesToGC, newTableDesc.ID)
descsCol.AddDeletedDescriptor(newTableDesc.GetID())
} else {
// IMPORT did not create this table, so we should not drop it.
newTableDesc.SetPublic()
}
// Accumulate the changes before adding them to the batch to avoid
// making any table invalid before having read it.
toWrite = append(toWrite, newTableDesc)
}
for _, d := range toWrite {
const kvTrace = false
if err := descsCol.WriteDescToBatch(ctx, kvTrace, d, b); err != nil {
return err
}
}
// Queue a GC job.
gcDetails := jobspb.SchemaChangeGCDetails{}
for _, tableID := range tablesToGC {
gcDetails.Tables = append(gcDetails.Tables, jobspb.SchemaChangeGCDetails_DroppedID{
ID: tableID,
DropTime: dropTime,
})
}
gcJobRecord := jobs.Record{
Description: fmt.Sprintf("GC for %s", r.job.Payload().Description),
Username: r.job.Payload().UsernameProto.Decode(),
DescriptorIDs: tablesToGC,
Details: gcDetails,
Progress: jobspb.SchemaChangeGCProgress{},
NonCancelable: true,
}
if _, err := execCfg.JobRegistry.CreateJobWithTxn(
ctx, gcJobRecord, execCfg.JobRegistry.MakeJobID(), txn); err != nil {
return err
}
return errors.Wrap(txn.Run(ctx, b), "rolling back tables")
}
func (r *importResumer) dropSchemas(
ctx context.Context,
txn *kv.Txn,
descsCol *descs.Collection,
execCfg *sql.ExecutorConfig,
p sql.JobExecContext,
) ([]jobspb.JobID, error) {
details := r.job.Details().(jobspb.ImportDetails)
// If the prepare step of the import job was not completed then the
// descriptors do not need to be rolled back as the txn updating them never
// completed.
if !details.PrepareComplete || len(details.Schemas) == 0 {
return nil, nil
}
// Resolve the database descriptor.
desc, err := descsCol.GetMutableDescriptorByID(ctx, txn, details.ParentID)
if err != nil {
return nil, err
}
dbDesc, ok := desc.(*dbdesc.Mutable)
if !ok {
return nil, errors.Newf("expected ID %d to refer to the database being imported into",
details.ParentID)
}
droppedSchemaIDs := make([]descpb.ID, 0)
for _, schema := range details.Schemas {
desc, err := descsCol.GetMutableDescriptorByID(ctx, txn, schema.Desc.ID)
if err != nil {
return nil, err
}
var schemaDesc *schemadesc.Mutable
var ok bool
if schemaDesc, ok = desc.(*schemadesc.Mutable); !ok {
return nil, errors.Newf("unable to resolve schema desc with ID %d", schema.Desc.ID)
}
// Mark the descriptor as dropped and write it to the batch.
// Delete namespace entry or update draining names depending on version.
schemaDesc.SetDropped()
droppedSchemaIDs = append(droppedSchemaIDs, schemaDesc.GetID())
b := txn.NewBatch()
// TODO(postamar): remove version gate and else-block in 22.2
if execCfg.Settings.Version.IsActive(ctx, clusterversion.AvoidDrainingNames) {
if dbDesc.Schemas != nil {
delete(dbDesc.Schemas, schemaDesc.GetName())
}
b.Del(catalogkeys.EncodeNameKey(p.ExecCfg().Codec, schemaDesc))
} else {
//lint:ignore SA1019 removal of deprecated method call scheduled for 22.2
schemaDesc.AddDrainingName(descpb.NameInfo{
ParentID: details.ParentID,
ParentSchemaID: keys.RootNamespaceID,
Name: schemaDesc.Name,
})
// Update the parent database with information about the dropped schema.
dbDesc.AddSchemaToDatabase(schema.Desc.Name, descpb.DatabaseDescriptor_SchemaInfo{ID: dbDesc.ID, Dropped: true})
}
if err := descsCol.WriteDescToBatch(ctx, p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
schemaDesc, b); err != nil {
return nil, err
}
err = txn.Run(ctx, b)
if err != nil {
return nil, err
}
}
// Write out the change to the database. This only creates a job record to be
// run after the txn commits.
queuedJob, err := writeNonDropDatabaseChange(ctx, dbDesc, txn, descsCol, p, "")
if err != nil {
return nil, err
}
// Create the job to drop the schema.
dropSchemaJobRecord := jobs.Record{
Description: "dropping schemas as part of an import job rollback",
Username: p.User(),
DescriptorIDs: droppedSchemaIDs,
Details: jobspb.SchemaChangeDetails{
DroppedSchemas: droppedSchemaIDs,
DroppedDatabaseID: descpb.InvalidID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
jobID := p.ExecCfg().JobRegistry.MakeJobID()
job, err := execCfg.JobRegistry.CreateJobWithTxn(ctx, dropSchemaJobRecord, jobID, txn)
if err != nil {
return nil, err
}
queuedJob = append(queuedJob, job.ID())
return queuedJob, nil
}
func (r *importResumer) releaseProtectedTimestamp(
ctx context.Context, txn *kv.Txn, pts protectedts.Storage,
) error {
details := r.job.Details().(jobspb.ImportDetails)
ptsID := details.ProtectedTimestampRecord
// If the job doesn't have a protected timestamp then there's nothing to do.
if ptsID == nil {
return nil
}
err := pts.Release(ctx, txn, *ptsID)
if errors.Is(err, protectedts.ErrNotExists) {
// No reason to return an error which might cause problems if it doesn't
// seem to exist.
log.Warningf(ctx, "failed to release protected which seems not to exist: %v", err)
err = nil
}
return err
}
// ReportResults implements JobResultsReporter interface.
func (r *importResumer) ReportResults(ctx context.Context, resultsCh chan<- tree.Datums) error {
select {
case resultsCh <- tree.Datums{
tree.NewDInt(tree.DInt(r.job.ID())),
tree.NewDString(string(jobs.StatusSucceeded)),
tree.NewDFloat(tree.DFloat(1.0)),
tree.NewDInt(tree.DInt(r.res.Rows)),
tree.NewDInt(tree.DInt(r.res.IndexEntries)),
tree.NewDInt(tree.DInt(r.res.DataSize)),
}:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func init() {
jobs.RegisterConstructor(
jobspb.TypeImport,
func(job *jobs.Job, settings *cluster.Settings) jobs.Resumer {
return &importResumer{
job: job,
settings: settings,
}
},
)
}
| pkg/sql/importer/import_job.go | 1 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.06727389991283417,
0.0011224157642573118,
0.00015929419896565378,
0.00017186987679451704,
0.0055233449675142765
] |
{
"id": 3,
"code_window": [
"\tif err != nil {\n",
"\t\treturn roachpb.RowCount{}, errors.Wrap(err, \"exhausted retries\")\n",
"\t}\n",
"\treturn res, nil\n",
"}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn res, jobs.MarkPauseRequestError(errors.Wrap(err, \"exhausted retries\"))\n"
],
"file_path": "pkg/ccl/backupccl/restore_job.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Code generated by execgen; DO NOT EDIT.
// Copyright 2021 The Cockroach Authors.
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// {{/*
// This file is the execgen template for sorttopk.eg.go. It's formatted in a
// special way, so it's both valid Go and a valid text/template input. This
// permits editing this file with editor support.
// */}}
package colexec
import (
"container/heap"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/errors"
)
// execgen:inline
const _ = "template_nextBatch"
// processGroupsInBatch associates a row in the top K heap with its distinct
// partially ordered column group. It returns the most recently found groupId.
// execgen:inline
const _ = "template_processGroupsInBatch"
// processBatch checks whether each tuple in a batch should be added to the topK
// heap. If partialOrder is true, processing stops when the current distinct
// ordered group is complete. If useSel is true, we use the selection vector.
// execgen:inline
const _ = "template_processBatch"
// spool reads in the entire input, always storing the top K rows it has seen so
// far in o.topK. This is done by maintaining a max heap of indices into o.topK.
// Whenever we encounter a row which is smaller than the max row in the heap,
// we replace the max with that row.
// After all the input has been read, we pop everything off the heap to
// determine the final output ordering. This is used in emit() to output the rows
// in sorted order.
// If partialOrder is true, then we chunk the input into distinct groups based
// on the partially ordered input, and stop adding to the max heap after K rows
// and the group of the Kth row have been processed. If it's false, we assume
// that the input is unordered, and process all rows.
const _ = "template_spool"
const _ = "template_compareRow"
// spool reads in the entire input, always storing the top K rows it has seen so
// far in o.topK. This is done by maintaining a max heap of indices into o.topK.
// Whenever we encounter a row which is smaller than the max row in the heap,
// we replace the max with that row.
// After all the input has been read, we pop everything off the heap to
// determine the final output ordering. This is used in emit() to output the rows
// in sorted order.
func (t *topKSorter) spool() {
if t.hasPartialOrder {
spool_true(t)
} else {
spool_false(t)
}
}
// topKHeaper implements part of the heap.Interface for non-ordered input.
type topKHeaper struct {
*topKSorter
}
var _ heap.Interface = &topKHeaper{}
// Less is part of heap.Interface and is only meant to be used internally.
func (t *topKHeaper) Less(i, j int) bool {
return compareRow_false(t.topKSorter, topKVecIdx, topKVecIdx, t.heap[i], t.heap[j], 0, 0) > 0
}
// topKHeaper implements part of the heap.Interface for partially ordered input.
type topKPartialOrderHeaper struct {
*topKSorter
}
var _ heap.Interface = &topKPartialOrderHeaper{}
// Less is part of heap.Interface and is only meant to be used internally.
func (t *topKPartialOrderHeaper) Less(i, j int) bool {
return compareRow_true(t.topKSorter, topKVecIdx, topKVecIdx, t.heap[i], t.heap[j], t.orderState.group[t.heap[i]], t.orderState.group[t.heap[j]]) > 0
}
// spool reads in the entire input, always storing the top K rows it has seen so
// far in o.topK. This is done by maintaining a max heap of indices into o.topK.
// Whenever we encounter a row which is smaller than the max row in the heap,
// we replace the max with that row.
// After all the input has been read, we pop everything off the heap to
// determine the final output ordering. This is used in emit() to output the rows
// in sorted order.
// If partialOrder is true, then we chunk the input into distinct groups based
// on the partially ordered input, and stop adding to the max heap after K rows
// and the group of the Kth row have been processed. If it's false, we assume
// that the input is unordered, and process all rows.
func spool_true(t *topKSorter) {
// Fill up t.topK by spooling up to K rows from the input.
// We don't need to check for distinct groups until after we have filled
// t.topK.
// TODO(harding): We could emit the first N < K rows if the N rows are in one
// or more distinct and complete groups, and then use a K-N size heap to find
// the remaining top K-N rows.
{
t.inputBatch = t.Input.Next()
t.orderState.distincterInput.SetBatch(t.inputBatch)
t.orderState.distincter.Next()
t.firstUnprocessedTupleIdx = 0
}
remainingRows := t.k
groupId := 0
for remainingRows > 0 && t.inputBatch.Length() > 0 {
fromLength := t.inputBatch.Length()
if remainingRows < uint64(t.inputBatch.Length()) {
// t.topK will be full after this batch.
fromLength = int(remainingRows)
}
// Find the group id for each tuple just added to topK.
sel := t.inputBatch.Selection()
if sel != nil {
{
var __retval_groupId int
{
var groupIdStart int = groupId
groupId = groupIdStart
for i, k := 0, t.topK.Length(); i < fromLength; i, k = i+1, k+1 {
idx := sel[i]
if t.orderState.distinctOutput[idx] {
groupId++
}
t.orderState.group[k] = groupId
}
{
__retval_groupId = groupId
}
}
groupId = __retval_groupId
}
} else {
{
var __retval_groupId int
{
var groupIdStart int = groupId
groupId = groupIdStart
for i, k := 0, t.topK.Length(); i < fromLength; i, k = i+1, k+1 {
idx := i
if t.orderState.distinctOutput[idx] {
groupId++
}
t.orderState.group[k] = groupId
}
{
__retval_groupId = groupId
}
}
groupId = __retval_groupId
}
}
t.firstUnprocessedTupleIdx = fromLength
t.topK.AppendTuples(t.inputBatch, 0 /* startIdx */, fromLength)
remainingRows -= uint64(fromLength)
if fromLength == t.inputBatch.Length() {
{
t.inputBatch = t.Input.Next()
t.orderState.distincterInput.SetBatch(t.inputBatch)
t.orderState.distincter.Next()
t.firstUnprocessedTupleIdx = 0
}
}
}
t.updateComparators(topKVecIdx, t.topK)
// Initialize the heap.
if cap(t.heap) < t.topK.Length() {
t.heap = make([]int, t.topK.Length())
} else {
t.heap = t.heap[:t.topK.Length()]
}
for i := range t.heap {
t.heap[i] = i
}
heap.Init(t.heaper)
// Read the remainder of the input. Whenever a row is less than the heap max,
// swap it in. When we find the end of the group, we can finish reading the
// input.
_ = true
groupDone := false
for t.inputBatch.Length() > 0 {
t.updateComparators(inputVecIdx, t.inputBatch)
sel := t.inputBatch.Selection()
t.allocator.PerformOperation(
t.topK.ColVecs(),
func() {
if sel != nil {
{
var __retval_groupDone bool
{
for i := t.firstUnprocessedTupleIdx; i < t.inputBatch.Length(); i++ {
idx := sel[i]
// If this is a distinct group, we have already found the top K input,
// so we can stop comparing the rest of this and subsequent batches.
if t.orderState.distinctOutput[idx] {
{
__retval_groupDone = true
}
goto processBatch_true_true_return_4
}
maxIdx := t.heap[0]
groupMaxIdx := 0
groupMaxIdx = t.orderState.group[maxIdx]
if compareRow_true(t, inputVecIdx, topKVecIdx, idx, maxIdx, groupId, groupMaxIdx) < 0 {
for j := range t.inputTypes {
t.comparators[j].set(inputVecIdx, topKVecIdx, idx, maxIdx)
}
t.orderState.group[maxIdx] = groupId
heap.Fix(t.heaper, 0)
}
}
t.firstUnprocessedTupleIdx = t.inputBatch.Length()
{
__retval_groupDone = false
}
processBatch_true_true_return_4:
}
groupDone = __retval_groupDone
}
} else {
{
var __retval_groupDone bool
{
for i := t.firstUnprocessedTupleIdx; i < t.inputBatch.Length(); i++ {
idx := i
// If this is a distinct group, we have already found the top K input,
// so we can stop comparing the rest of this and subsequent batches.
if t.orderState.distinctOutput[idx] {
{
__retval_groupDone = true
}
goto processBatch_true_false_return_5
}
maxIdx := t.heap[0]
groupMaxIdx := 0
groupMaxIdx = t.orderState.group[maxIdx]
if compareRow_true(t, inputVecIdx, topKVecIdx, idx, maxIdx, groupId, groupMaxIdx) < 0 {
for j := range t.inputTypes {
t.comparators[j].set(inputVecIdx, topKVecIdx, idx, maxIdx)
}
t.orderState.group[maxIdx] = groupId
heap.Fix(t.heaper, 0)
}
}
t.firstUnprocessedTupleIdx = t.inputBatch.Length()
{
__retval_groupDone = false
}
processBatch_true_false_return_5:
}
groupDone = __retval_groupDone
}
}
},
)
if groupDone {
break
}
{
t.inputBatch = t.Input.Next()
t.orderState.distincterInput.SetBatch(t.inputBatch)
t.orderState.distincter.Next()
t.firstUnprocessedTupleIdx = 0
}
}
// t.topK now contains the top K rows unsorted. Create a selection vector
// which specifies the rows in sorted order by popping everything off the
// heap. Note that it's a max heap so we need to fill the selection vector in
// reverse.
t.sel = make([]int, t.topK.Length())
for i := 0; i < t.topK.Length(); i++ {
t.sel[len(t.sel)-i-1] = heap.Pop(t.heaper).(int)
}
}
// spool reads in the entire input, always storing the top K rows it has seen so
// far in o.topK. This is done by maintaining a max heap of indices into o.topK.
// Whenever we encounter a row which is smaller than the max row in the heap,
// we replace the max with that row.
// After all the input has been read, we pop everything off the heap to
// determine the final output ordering. This is used in emit() to output the rows
// in sorted order.
// If partialOrder is true, then we chunk the input into distinct groups based
// on the partially ordered input, and stop adding to the max heap after K rows
// and the group of the Kth row have been processed. If it's false, we assume
// that the input is unordered, and process all rows.
func spool_false(t *topKSorter) {
// Fill up t.topK by spooling up to K rows from the input.
// We don't need to check for distinct groups until after we have filled
// t.topK.
// TODO(harding): We could emit the first N < K rows if the N rows are in one
// or more distinct and complete groups, and then use a K-N size heap to find
// the remaining top K-N rows.
{
t.inputBatch = t.Input.Next()
t.firstUnprocessedTupleIdx = 0
}
remainingRows := t.k
groupId := 0
for remainingRows > 0 && t.inputBatch.Length() > 0 {
fromLength := t.inputBatch.Length()
if remainingRows < uint64(t.inputBatch.Length()) {
// t.topK will be full after this batch.
fromLength = int(remainingRows)
}
t.firstUnprocessedTupleIdx = fromLength
t.topK.AppendTuples(t.inputBatch, 0 /* startIdx */, fromLength)
remainingRows -= uint64(fromLength)
if fromLength == t.inputBatch.Length() {
{
t.inputBatch = t.Input.Next()
t.firstUnprocessedTupleIdx = 0
}
}
}
t.updateComparators(topKVecIdx, t.topK)
// Initialize the heap.
if cap(t.heap) < t.topK.Length() {
t.heap = make([]int, t.topK.Length())
} else {
t.heap = t.heap[:t.topK.Length()]
}
for i := range t.heap {
t.heap[i] = i
}
heap.Init(t.heaper)
// Read the remainder of the input. Whenever a row is less than the heap max,
// swap it in. When we find the end of the group, we can finish reading the
// input.
_ = true
for t.inputBatch.Length() > 0 {
t.updateComparators(inputVecIdx, t.inputBatch)
sel := t.inputBatch.Selection()
t.allocator.PerformOperation(
t.topK.ColVecs(),
func() {
if sel != nil {
{
for i := t.firstUnprocessedTupleIdx; i < t.inputBatch.Length(); i++ {
idx := sel[i]
maxIdx := t.heap[0]
groupMaxIdx := 0
if compareRow_false(t, inputVecIdx, topKVecIdx, idx, maxIdx, groupId, groupMaxIdx) < 0 {
for j := range t.inputTypes {
t.comparators[j].set(inputVecIdx, topKVecIdx, idx, maxIdx)
}
heap.Fix(t.heaper, 0)
}
}
t.firstUnprocessedTupleIdx = t.inputBatch.Length()
}
} else {
{
for i := t.firstUnprocessedTupleIdx; i < t.inputBatch.Length(); i++ {
idx := i
maxIdx := t.heap[0]
groupMaxIdx := 0
if compareRow_false(t, inputVecIdx, topKVecIdx, idx, maxIdx, groupId, groupMaxIdx) < 0 {
for j := range t.inputTypes {
t.comparators[j].set(inputVecIdx, topKVecIdx, idx, maxIdx)
}
heap.Fix(t.heaper, 0)
}
}
t.firstUnprocessedTupleIdx = t.inputBatch.Length()
}
}
},
)
{
t.inputBatch = t.Input.Next()
t.firstUnprocessedTupleIdx = 0
}
}
// t.topK now contains the top K rows unsorted. Create a selection vector
// which specifies the rows in sorted order by popping everything off the
// heap. Note that it's a max heap so we need to fill the selection vector in
// reverse.
t.sel = make([]int, t.topK.Length())
for i := 0; i < t.topK.Length(); i++ {
t.sel[len(t.sel)-i-1] = heap.Pop(t.heaper).(int)
}
}
func compareRow_false(
t *topKSorter,
vecIdx1 int,
vecIdx2 int,
rowIdx1 int,
rowIdx2 int,
groupIdx1 int,
groupIdx2 int,
) int {
for i := range t.orderingCols {
info := t.orderingCols[i]
res := t.comparators[info.ColIdx].compare(vecIdx1, vecIdx2, rowIdx1, rowIdx2)
if res != 0 {
switch d := info.Direction; d {
case execinfrapb.Ordering_Column_ASC:
return res
case execinfrapb.Ordering_Column_DESC:
return -res
default:
colexecerror.InternalError(errors.AssertionFailedf("unexpected direction value %d", d))
}
}
}
return 0
}
func compareRow_true(
t *topKSorter,
vecIdx1 int,
vecIdx2 int,
rowIdx1 int,
rowIdx2 int,
groupIdx1 int,
groupIdx2 int,
) int {
for i := range t.orderingCols {
// TODO(harding): If groupIdx1 != groupIdx2, we may be able to do some
// optimization if the ordered columns are in the same direction.
if i < t.matchLen && groupIdx1 == groupIdx2 {
// If the tuples being compared are in the same group, we only need to
// compare the columns that are not already ordered.
continue
}
info := t.orderingCols[i]
res := t.comparators[info.ColIdx].compare(vecIdx1, vecIdx2, rowIdx1, rowIdx2)
if res != 0 {
switch d := info.Direction; d {
case execinfrapb.Ordering_Column_ASC:
return res
case execinfrapb.Ordering_Column_DESC:
return -res
default:
colexecerror.InternalError(errors.AssertionFailedf("unexpected direction value %d", d))
}
}
}
return 0
}
// execgen:inline
const _ = "inlined_nextBatch_true"
// processGroupsInBatch associates a row in the top K heap with its distinct
// partially ordered column group. It returns the most recently found groupId.
// execgen:inline
const _ = "inlined_processGroupsInBatch_true"
// processGroupsInBatch associates a row in the top K heap with its distinct
// partially ordered column group. It returns the most recently found groupId.
// execgen:inline
const _ = "inlined_processGroupsInBatch_false"
// processBatch checks whether each tuple in a batch should be added to the topK
// heap. If partialOrder is true, processing stops when the current distinct
// ordered group is complete. If useSel is true, we use the selection vector.
// execgen:inline
const _ = "inlined_processBatch_true_true"
// processBatch checks whether each tuple in a batch should be added to the topK
// heap. If partialOrder is true, processing stops when the current distinct
// ordered group is complete. If useSel is true, we use the selection vector.
// execgen:inline
const _ = "inlined_processBatch_true_false"
// execgen:inline
const _ = "inlined_nextBatch_false"
// processBatch checks whether each tuple in a batch should be added to the topK
// heap. If partialOrder is true, processing stops when the current distinct
// ordered group is complete. If useSel is true, we use the selection vector.
// execgen:inline
const _ = "inlined_processBatch_false_true"
// processBatch checks whether each tuple in a batch should be added to the topK
// heap. If partialOrder is true, processing stops when the current distinct
// ordered group is complete. If useSel is true, we use the selection vector.
// execgen:inline
const _ = "inlined_processBatch_false_false"
| pkg/sql/colexec/sorttopk.eg.go | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.015022472478449345,
0.000543743371963501,
0.00016303289157804102,
0.00017234499682672322,
0.0021248904522508383
] |
{
"id": 3,
"code_window": [
"\tif err != nil {\n",
"\t\treturn roachpb.RowCount{}, errors.Wrap(err, \"exhausted retries\")\n",
"\t}\n",
"\treturn res, nil\n",
"}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn res, jobs.MarkPauseRequestError(errors.Wrap(err, \"exhausted retries\"))\n"
],
"file_path": "pkg/ccl/backupccl/restore_job.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
export * from "./identity";
| pkg/ui/workspaces/cluster-ui/src/network/index.ts | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.00017752494022715837,
0.0001774874544935301,
0.00017744996875990182,
0.0001774874544935301,
3.748573362827301e-8
] |
{
"id": 3,
"code_window": [
"\tif err != nil {\n",
"\t\treturn roachpb.RowCount{}, errors.Wrap(err, \"exhausted retries\")\n",
"\t}\n",
"\treturn res, nil\n",
"}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn res, jobs.MarkPauseRequestError(errors.Wrap(err, \"exhausted retries\"))\n"
],
"file_path": "pkg/ccl/backupccl/restore_job.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cyclegraphtest
import (
"fmt"
"reflect"
"testing"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel/reltest"
"gopkg.in/yaml.v3"
)
// testAttr is a rel.Attr used for testing.
type testAttr int8
var _ rel.Attr = testAttr(0)
//go:generate stringer --type testAttr --tags test
const (
s testAttr = iota
s1
s2
c
name
)
// struct1 is a struct which holds a bunch of potentially circular references.
type struct1 struct {
// Name identifies the struct.
Name string
// M1 points to some other struct1, maybe itself.
S1 *struct1
// M2 points to some other struct2, maybe itself.
S2 *struct2
// C points to some container which may contain itself.
C *container
}
// struct2 is like struct1 but has a different type.
type struct2 struct1
// container is a oneOf of struct1 or struct2.
type container struct {
S1 *struct1
S2 *struct2
}
// message is an interface to capture struct1 and struct2.
type message interface{ message() }
func (s *struct1) message() {}
func (s *struct2) message() {}
// This schema exercises cyclic references.
var schema = rel.MustSchema(
"testschema",
rel.AttrType(
s, reflect.TypeOf((*message)(nil)).Elem(),
),
rel.EntityMapping(
reflect.TypeOf((*struct1)(nil)),
rel.EntityAttr(c, "C"),
rel.EntityAttr(s1, "S1"),
rel.EntityAttr(s2, "S2"),
rel.EntityAttr(name, "Name"),
),
rel.EntityMapping(
reflect.TypeOf((*struct2)(nil)),
rel.EntityAttr(c, "C"),
rel.EntityAttr(s1, "S1"),
rel.EntityAttr(s2, "S2"),
rel.EntityAttr(name, "Name"),
),
rel.EntityMapping(
reflect.TypeOf((*container)(nil)),
rel.EntityAttr(s, "S1", "S2"),
),
)
// String helps ensure that serialization does not infinitely recurse.
func (s *struct1) String() string { return fmt.Sprintf("struct1(%s)", s.Name) }
// String helps ensure that serialization does not infinitely recurse.
func (s *struct2) String() string { return fmt.Sprintf("struct2(%s)", s.Name) }
// String helps ensure that serialization does not infinitely recurse.
func (c *container) String() string {
var name string
if c.S1 != nil {
name = c.S1.Name
} else {
name = c.S2.Name
}
return fmt.Sprintf("container(%s)", name)
}
func (s *struct1) EncodeToYAML(t *testing.T, r *reltest.Registry) interface{} {
yn := &yaml.Node{
Kind: yaml.MappingNode,
Style: yaml.FlowStyle,
Content: []*yaml.Node{
{Kind: yaml.ScalarNode, Value: "name"},
{Kind: yaml.ScalarNode, Value: s.Name},
},
}
if s.S1 != nil {
yn.Content = append(yn.Content,
&yaml.Node{Kind: yaml.ScalarNode, Value: "s1"},
&yaml.Node{Kind: yaml.ScalarNode, Value: r.MustGetName(t, s.S1)},
)
}
if s.S2 != nil {
yn.Content = append(yn.Content,
&yaml.Node{Kind: yaml.ScalarNode, Value: "s2"},
&yaml.Node{Kind: yaml.ScalarNode, Value: r.MustGetName(t, s.S2)},
)
}
if s.C != nil {
yn.Content = append(yn.Content,
&yaml.Node{Kind: yaml.ScalarNode, Value: "c"},
&yaml.Node{Kind: yaml.ScalarNode, Value: r.MustGetName(t, s.C)},
)
}
return yn
}
func (s *struct2) EncodeToYAML(t *testing.T, r *reltest.Registry) interface{} {
return (*struct1)(s).EncodeToYAML(t, r)
}
func (c *container) EncodeToYAML(t *testing.T, r *reltest.Registry) interface{} {
yn := &yaml.Node{
Kind: yaml.MappingNode,
Style: yaml.FlowStyle,
}
if c.S1 != nil {
yn.Content = append(yn.Content,
&yaml.Node{Kind: yaml.ScalarNode, Value: "s1"},
&yaml.Node{Kind: yaml.ScalarNode, Value: r.MustGetName(t, c.S1)},
)
}
if c.S2 != nil {
yn.Content = append(yn.Content,
&yaml.Node{Kind: yaml.ScalarNode, Value: "s2"},
&yaml.Node{Kind: yaml.ScalarNode, Value: r.MustGetName(t, c.S2)},
)
}
return yn
}
| pkg/sql/schemachanger/rel/internal/cyclegraphtest/schema.go | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.00017752494022715837,
0.00017112854402512312,
0.00016596296336501837,
0.00017082123667933047,
0.00000342952898790827
] |
{
"id": 4,
"code_window": [
"\t\t\tjob = reloadedJob\n",
"\t\t}\n",
"\t\tlog.Warningf(ctx, `encountered retryable error: %+v`, err)\n",
"\t}\n",
"\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t// We have exhausted retries, but we have not seen a \"PermanentBulkJobError\" so\n",
"\t// it is possible that this is a transient error that is taking longer than\n",
"\t// our configured retry to go away.\n",
"\t//\n",
"\t// Let's pause the job instead of failing it so that the user can decide\n",
"\t// whether to resume it or cancel it.\n"
],
"file_path": "pkg/sql/importer/import_job.go",
"type": "add",
"edit_start_line_idx": 1000
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package importer
import (
"bytes"
"context"
"fmt"
"math"
"time"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/joberror"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descidgen"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/ingesting"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/rewrite"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/gcjob"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/ioctx"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/log/eventpb"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
)
type importResumer struct {
job *jobs.Job
settings *cluster.Settings
res roachpb.RowCount
testingKnobs struct {
afterImport func(summary roachpb.RowCount) error
alwaysFlushJobProgress bool
}
}
func (r *importResumer) TestingSetAfterImportKnob(fn func(summary roachpb.RowCount) error) {
r.testingKnobs.afterImport = fn
}
var _ jobs.TraceableJob = &importResumer{}
func (r *importResumer) ForceRealSpan() bool {
return true
}
var _ jobs.Resumer = &importResumer{}
var processorsPerNode = settings.RegisterIntSetting(
settings.TenantWritable,
"bulkio.import.processors_per_node",
"number of input processors to run on each sql instance", 1,
settings.PositiveInt,
)
type preparedSchemaMetadata struct {
schemaPreparedDetails jobspb.ImportDetails
schemaRewrites jobspb.DescRewriteMap
newSchemaIDToName map[descpb.ID]string
oldSchemaIDToName map[descpb.ID]string
queuedSchemaJobs []jobspb.JobID
}
// Resume is part of the jobs.Resumer interface.
func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error {
p := execCtx.(sql.JobExecContext)
if err := r.parseBundleSchemaIfNeeded(ctx, p); err != nil {
return err
}
details := r.job.Details().(jobspb.ImportDetails)
files := details.URIs
format := details.Format
tables := make(map[string]*execinfrapb.ReadImportDataSpec_ImportTable, len(details.Tables))
if details.Tables != nil {
// Skip prepare stage on job resumption, if it has already been completed.
if !details.PrepareComplete {
var schemaMetadata *preparedSchemaMetadata
if err := sql.DescsTxn(ctx, p.ExecCfg(), func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
var preparedDetails jobspb.ImportDetails
schemaMetadata = &preparedSchemaMetadata{
newSchemaIDToName: make(map[descpb.ID]string),
oldSchemaIDToName: make(map[descpb.ID]string),
}
var err error
curDetails := details
if len(details.Schemas) != 0 {
schemaMetadata, err = r.prepareSchemasForIngestion(ctx, p, curDetails, txn, descsCol)
if err != nil {
return err
}
curDetails = schemaMetadata.schemaPreparedDetails
}
if r.settings.Version.IsActive(ctx, clusterversion.PublicSchemasWithDescriptors) {
// In 22.1, the Public schema should always be present in the database.
// Make sure it is part of schemaMetadata, it is not guaranteed to
// be added in prepareSchemasForIngestion if we're not importing any
// schemas.
// The Public schema will not change in the database so both the
// oldSchemaIDToName and newSchemaIDToName entries will be the
// same for the Public schema.
_, dbDesc, err := descsCol.GetImmutableDatabaseByID(ctx, txn, details.ParentID, tree.DatabaseLookupFlags{Required: true})
if err != nil {
return err
}
schemaMetadata.oldSchemaIDToName[dbDesc.GetSchemaID(tree.PublicSchema)] = tree.PublicSchema
schemaMetadata.newSchemaIDToName[dbDesc.GetSchemaID(tree.PublicSchema)] = tree.PublicSchema
}
preparedDetails, err = r.prepareTablesForIngestion(ctx, p, curDetails, txn, descsCol,
schemaMetadata)
if err != nil {
return err
}
// Telemetry for multi-region.
for _, table := range preparedDetails.Tables {
_, dbDesc, err := descsCol.GetImmutableDatabaseByID(
ctx, txn, table.Desc.GetParentID(), tree.DatabaseLookupFlags{Required: true})
if err != nil {
return err
}
if dbDesc.IsMultiRegion() {
telemetry.Inc(sqltelemetry.ImportIntoMultiRegionDatabaseCounter)
}
}
// Update the job details now that the schemas and table descs have
// been "prepared".
return r.job.Update(ctx, txn, func(
txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater,
) error {
pl := md.Payload
*pl.GetImport() = preparedDetails
// Update the set of descriptors for later observability.
// TODO(ajwerner): Do we need this idempotence test?
prev := md.Payload.DescriptorIDs
if prev == nil {
var descriptorIDs []descpb.ID
for _, schema := range preparedDetails.Schemas {
descriptorIDs = append(descriptorIDs, schema.Desc.GetID())
}
for _, table := range preparedDetails.Tables {
descriptorIDs = append(descriptorIDs, table.Desc.GetID())
}
pl.DescriptorIDs = descriptorIDs
}
ju.UpdatePayload(pl)
return nil
})
}); err != nil {
return err
}
// Run the queued job which updates the database descriptor to contain the
// newly created schemas.
// NB: Seems like the registry eventually adopts the job anyways but this
// is in keeping with the semantics we use when creating a schema during
// sql execution. Namely, queue job in the txn which creates the schema
// desc and run once the txn has committed.
if err := p.ExecCfg().JobRegistry.Run(ctx, p.ExecCfg().InternalExecutor,
schemaMetadata.queuedSchemaJobs); err != nil {
return err
}
// Re-initialize details after prepare step.
details = r.job.Details().(jobspb.ImportDetails)
emitImportJobEvent(ctx, p, jobs.StatusRunning, r.job)
}
// Create a mapping from schemaID to schemaName.
schemaIDToName := make(map[descpb.ID]string)
for _, i := range details.Schemas {
schemaIDToName[i.Desc.GetID()] = i.Desc.GetName()
}
for _, i := range details.Tables {
var tableName string
if i.Name != "" {
tableName = i.Name
} else if i.Desc != nil {
tableName = i.Desc.Name
} else {
return errors.New("invalid table specification")
}
// If we are importing from PGDUMP, qualify the table name with the schema
// name since we support non-public schemas.
if details.Format.Format == roachpb.IOFileFormat_PgDump {
schemaName := tree.PublicSchema
if schema, ok := schemaIDToName[i.Desc.GetUnexposedParentSchemaID()]; ok {
schemaName = schema
}
tableName = fmt.Sprintf("%s.%s", schemaName, tableName)
}
tables[tableName] = &execinfrapb.ReadImportDataSpec_ImportTable{
Desc: i.Desc,
TargetCols: i.TargetCols,
}
}
}
typeDescs := make([]*descpb.TypeDescriptor, len(details.Types))
for i, t := range details.Types {
typeDescs[i] = t.Desc
}
// If details.Walltime is still 0, then it was not set during
// `prepareTablesForIngestion`. This indicates that we are in an IMPORT INTO,
// and that the walltime was not set in a previous run of IMPORT.
//
// In the case of importing into existing tables we must wait for all nodes
// to see the same version of the updated table descriptor, after which we
// shall chose a ts to import from.
if details.Walltime == 0 {
// Now that we know all the tables are offline, pick a walltime at which we
// will write.
details.Walltime = p.ExecCfg().Clock.Now().WallTime
// Check if the tables being imported into are starting empty, in which
// case we can cheaply clear-range instead of revert-range to cleanup.
for i := range details.Tables {
if !details.Tables[i].IsNew {
tblDesc := tabledesc.NewBuilder(details.Tables[i].Desc).BuildImmutableTable()
tblSpan := tblDesc.TableSpan(p.ExecCfg().Codec)
res, err := p.ExecCfg().DB.Scan(ctx, tblSpan.Key, tblSpan.EndKey, 1 /* maxRows */)
if err != nil {
return errors.Wrap(err, "checking if existing table is empty")
}
details.Tables[i].WasEmpty = len(res) == 0
}
}
if err := r.job.SetDetails(ctx, nil /* txn */, details); err != nil {
return err
}
}
procsPerNode := int(processorsPerNode.Get(&p.ExecCfg().Settings.SV))
res, err := ingestWithRetry(ctx, p, r.job, tables, typeDescs, files, format, details.Walltime,
r.testingKnobs.alwaysFlushJobProgress, procsPerNode)
if err != nil {
return err
}
pkIDs := make(map[uint64]struct{}, len(details.Tables))
for _, t := range details.Tables {
pkIDs[roachpb.BulkOpSummaryID(uint64(t.Desc.ID), uint64(t.Desc.PrimaryIndex.ID))] = struct{}{}
}
r.res.DataSize = res.DataSize
for id, count := range res.EntryCounts {
if _, ok := pkIDs[id]; ok {
r.res.Rows += count
} else {
r.res.IndexEntries += count
}
}
if r.testingKnobs.afterImport != nil {
if err := r.testingKnobs.afterImport(r.res); err != nil {
return err
}
}
if err := p.ExecCfg().JobRegistry.CheckPausepoint("import.after_ingest"); err != nil {
return err
}
if err := r.checkVirtualConstraints(ctx, p.ExecCfg(), r.job); err != nil {
return err
}
// If the table being imported into referenced UDTs, ensure that a concurrent
// schema change on any of the typeDescs has not modified the type descriptor. If
// it has, it is unsafe to import the data and we fail the import job.
if err := r.checkForUDTModification(ctx, p.ExecCfg()); err != nil {
return err
}
if err := r.publishSchemas(ctx, p.ExecCfg()); err != nil {
return err
}
if err := r.publishTables(ctx, p.ExecCfg(), res); err != nil {
return err
}
// As of 21.2 we do not write a protected timestamp record during IMPORT INTO.
// In case of a mixed version cluster with 21.1 and 21.2 nodes, it is possible
// that the job was planned on an older node and then resumed on a 21.2 node.
// Thus, we still need to clear the timestamp record that was written when the
// IMPORT INTO was planned on the older node.
//
// TODO(adityamaru): Remove in 22.1.
if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
return r.releaseProtectedTimestamp(ctx, txn, p.ExecCfg().ProtectedTimestampProvider)
}); err != nil {
log.Errorf(ctx, "failed to release protected timestamp: %v", err)
}
emitImportJobEvent(ctx, p, jobs.StatusSucceeded, r.job)
addToFileFormatTelemetry(details.Format.Format.String(), "succeeded")
telemetry.CountBucketed("import.rows", r.res.Rows)
const mb = 1 << 20
sizeMb := r.res.DataSize / mb
telemetry.CountBucketed("import.size-mb", sizeMb)
sec := int64(timeutil.Since(timeutil.FromUnixMicros(r.job.Payload().StartedMicros)).Seconds())
var mbps int64
if sec > 0 {
mbps = mb / sec
}
telemetry.CountBucketed("import.duration-sec.succeeded", sec)
telemetry.CountBucketed("import.speed-mbps", mbps)
// Tiny imports may skew throughput numbers due to overhead.
if sizeMb > 10 {
telemetry.CountBucketed("import.speed-mbps.over10mb", mbps)
}
return nil
}
// prepareTablesForIngestion prepares table descriptors for the ingestion
// step of import. The descriptors are in an IMPORTING state (offline) on
// successful completion of this method.
func (r *importResumer) prepareTablesForIngestion(
ctx context.Context,
p sql.JobExecContext,
details jobspb.ImportDetails,
txn *kv.Txn,
descsCol *descs.Collection,
schemaMetadata *preparedSchemaMetadata,
) (jobspb.ImportDetails, error) {
importDetails := details
importDetails.Tables = make([]jobspb.ImportDetails_Table, len(details.Tables))
newSchemaAndTableNameToIdx := make(map[string]int, len(importDetails.Tables))
var hasExistingTables bool
var err error
var newTableDescs []jobspb.ImportDetails_Table
var desc *descpb.TableDescriptor
for i, table := range details.Tables {
if !table.IsNew {
desc, err = prepareExistingTablesForIngestion(ctx, txn, descsCol, table.Desc)
if err != nil {
return importDetails, err
}
importDetails.Tables[i] = jobspb.ImportDetails_Table{
Desc: desc, Name: table.Name,
SeqVal: table.SeqVal,
IsNew: table.IsNew,
TargetCols: table.TargetCols,
}
hasExistingTables = true
} else {
// PGDUMP imports support non-public schemas.
// For the purpose of disambiguation we must take the schema into
// account when constructing the newTablenameToIdx map.
// At this point the table descriptor's parent schema ID has not being
// remapped to the newly generated schema ID.
key, err := constructSchemaAndTableKey(ctx, table.Desc, schemaMetadata.oldSchemaIDToName, p.ExecCfg().Settings.Version)
if err != nil {
return importDetails, err
}
newSchemaAndTableNameToIdx[key.String()] = i
// Make a deep copy of the table descriptor so that rewrites do not
// partially clobber the descriptor stored in details.
newTableDescs = append(newTableDescs,
*protoutil.Clone(&table).(*jobspb.ImportDetails_Table))
}
}
// Prepare the table descriptors for newly created tables being imported
// into.
//
// TODO(adityamaru): This is still unnecessarily complicated. If we can get
// the new table desc preparation to work on a per desc basis, rather than
// requiring all the newly created descriptors, then this can look like the
// call to prepareExistingTablesForIngestion. Currently, FK references
// misbehave when I tried to write the desc one at a time.
if len(newTableDescs) != 0 {
res, err := prepareNewTablesForIngestion(
ctx, txn, descsCol, p, newTableDescs, importDetails.ParentID, schemaMetadata.schemaRewrites)
if err != nil {
return importDetails, err
}
for _, desc := range res {
key, err := constructSchemaAndTableKey(ctx, desc, schemaMetadata.newSchemaIDToName, p.ExecCfg().Settings.Version)
if err != nil {
return importDetails, err
}
i := newSchemaAndTableNameToIdx[key.String()]
table := details.Tables[i]
importDetails.Tables[i] = jobspb.ImportDetails_Table{
Desc: desc,
Name: table.Name,
SeqVal: table.SeqVal,
IsNew: table.IsNew,
TargetCols: table.TargetCols,
}
}
}
importDetails.PrepareComplete = true
// If we do not have pending schema changes on existing descriptors we can
// choose our Walltime (to IMPORT from) immediately. Otherwise, we have to
// wait for all nodes to see the same descriptor version before doing so.
if !hasExistingTables {
importDetails.Walltime = p.ExecCfg().Clock.Now().WallTime
} else {
importDetails.Walltime = 0
}
return importDetails, nil
}
// prepareExistingTablesForIngestion prepares descriptors for existing tables
// being imported into.
func prepareExistingTablesForIngestion(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, desc *descpb.TableDescriptor,
) (*descpb.TableDescriptor, error) {
if len(desc.Mutations) > 0 {
return nil, errors.Errorf("cannot IMPORT INTO a table with schema changes in progress -- try again later (pending mutation %s)", desc.Mutations[0].String())
}
// Note that desc is just used to verify that the version matches.
importing, err := descsCol.GetMutableTableVersionByID(ctx, desc.ID, txn)
if err != nil {
return nil, err
}
// Ensure that the version of the table has not been modified since this
// job was created.
if got, exp := importing.Version, desc.Version; got != exp {
return nil, errors.Errorf("another operation is currently operating on the table")
}
// Take the table offline for import.
// TODO(dt): audit everywhere we get table descs (leases or otherwise) to
// ensure that filtering by state handles IMPORTING correctly.
importing.SetOffline("importing")
// TODO(dt): de-validate all the FKs.
if err := descsCol.WriteDesc(
ctx, false /* kvTrace */, importing, txn,
); err != nil {
return nil, err
}
return importing.TableDesc(), nil
}
// prepareNewTablesForIngestion prepares descriptors for newly created
// tables being imported into.
func prepareNewTablesForIngestion(
ctx context.Context,
txn *kv.Txn,
descsCol *descs.Collection,
p sql.JobExecContext,
importTables []jobspb.ImportDetails_Table,
parentID descpb.ID,
schemaRewrites jobspb.DescRewriteMap,
) ([]*descpb.TableDescriptor, error) {
newMutableTableDescriptors := make([]*tabledesc.Mutable, len(importTables))
for i := range importTables {
newMutableTableDescriptors[i] = tabledesc.NewBuilder(importTables[i].Desc).BuildCreatedMutableTable()
}
// Verification steps have passed, generate a new table ID if we're
// restoring. We do this last because we want to avoid calling
// GenerateUniqueDescID if there's any kind of error above.
// Reserving a table ID now means we can avoid the rekey work during restore.
//
// schemaRewrites may contain information which is used in rewrite.TableDescs
// to rewrite the parent schema ID in the table desc to point to the correct
// schema ID.
tableRewrites := schemaRewrites
if tableRewrites == nil {
tableRewrites = make(jobspb.DescRewriteMap)
}
seqVals := make(map[descpb.ID]int64, len(importTables))
for _, tableDesc := range importTables {
id, err := descidgen.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
if err != nil {
return nil, err
}
oldParentSchemaID := tableDesc.Desc.GetUnexposedParentSchemaID()
parentSchemaID := oldParentSchemaID
if rw, ok := schemaRewrites[oldParentSchemaID]; ok {
parentSchemaID = rw.ID
}
tableRewrites[tableDesc.Desc.ID] = &jobspb.DescriptorRewrite{
ID: id,
ParentSchemaID: parentSchemaID,
ParentID: parentID,
}
seqVals[id] = tableDesc.SeqVal
}
if err := rewrite.TableDescs(
newMutableTableDescriptors, tableRewrites, "",
); err != nil {
return nil, err
}
// After all of the ID's have been remapped, ensure that there aren't any name
// collisions with any importing tables.
for i := range newMutableTableDescriptors {
tbl := newMutableTableDescriptors[i]
err := descsCol.Direct().CheckObjectCollision(
ctx,
txn,
tbl.GetParentID(),
tbl.GetParentSchemaID(),
tree.NewUnqualifiedTableName(tree.Name(tbl.GetName())),
)
if err != nil {
return nil, err
}
}
// tableDescs contains the same slice as newMutableTableDescriptors but
// as tabledesc.TableDescriptor.
tableDescs := make([]catalog.TableDescriptor, len(newMutableTableDescriptors))
for i := range tableDescs {
newMutableTableDescriptors[i].SetOffline("importing")
tableDescs[i] = newMutableTableDescriptors[i]
}
var seqValKVs []roachpb.KeyValue
for _, desc := range newMutableTableDescriptors {
if v, ok := seqVals[desc.GetID()]; ok && v != 0 {
key, val, err := sql.MakeSequenceKeyVal(p.ExecCfg().Codec, desc, v, false)
if err != nil {
return nil, err
}
kv := roachpb.KeyValue{Key: key}
kv.Value.SetInt(val)
seqValKVs = append(seqValKVs, kv)
}
}
// Write the new TableDescriptors and flip the namespace entries over to
// them. After this call, any queries on a table will be served by the newly
// imported data.
if err := ingesting.WriteDescriptors(ctx, p.ExecCfg().Codec, txn, p.User(), descsCol,
nil /* databases */, nil, /* schemas */
tableDescs, nil, tree.RequestedDescriptors, seqValKVs, "" /* inheritParentName */); err != nil {
return nil, errors.Wrapf(err, "creating importTables")
}
newPreparedTableDescs := make([]*descpb.TableDescriptor, len(newMutableTableDescriptors))
for i := range newMutableTableDescriptors {
newPreparedTableDescs[i] = newMutableTableDescriptors[i].TableDesc()
}
return newPreparedTableDescs, nil
}
// prepareSchemasForIngestion is responsible for assigning the created schema
// descriptors actual IDs, updating the parent DB with references to the new
// schemas and writing the schema descriptors to disk.
func (r *importResumer) prepareSchemasForIngestion(
ctx context.Context,
p sql.JobExecContext,
details jobspb.ImportDetails,
txn *kv.Txn,
descsCol *descs.Collection,
) (*preparedSchemaMetadata, error) {
schemaMetadata := &preparedSchemaMetadata{
schemaPreparedDetails: details,
newSchemaIDToName: make(map[descpb.ID]string),
oldSchemaIDToName: make(map[descpb.ID]string),
}
schemaMetadata.schemaPreparedDetails.Schemas = make([]jobspb.ImportDetails_Schema,
len(details.Schemas))
desc, err := descsCol.GetMutableDescriptorByID(ctx, txn, details.ParentID)
if err != nil {
return nil, err
}
dbDesc, ok := desc.(*dbdesc.Mutable)
if !ok {
return nil, errors.Newf("expected ID %d to refer to the database being imported into",
details.ParentID)
}
schemaMetadata.schemaRewrites = make(jobspb.DescRewriteMap)
mutableSchemaDescs := make([]*schemadesc.Mutable, 0)
for _, desc := range details.Schemas {
schemaMetadata.oldSchemaIDToName[desc.Desc.GetID()] = desc.Desc.GetName()
newMutableSchemaDescriptor := schemadesc.NewBuilder(desc.Desc).BuildCreatedMutable().(*schemadesc.Mutable)
// Verification steps have passed, generate a new schema ID. We do this
// last because we want to avoid calling GenerateUniqueDescID if there's
// any kind of error in the prior stages of import.
id, err := descidgen.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
if err != nil {
return nil, err
}
newMutableSchemaDescriptor.Version = 1
newMutableSchemaDescriptor.ID = id
mutableSchemaDescs = append(mutableSchemaDescs, newMutableSchemaDescriptor)
schemaMetadata.newSchemaIDToName[id] = newMutableSchemaDescriptor.GetName()
// Update the parent database with this schema information.
dbDesc.AddSchemaToDatabase(newMutableSchemaDescriptor.Name,
descpb.DatabaseDescriptor_SchemaInfo{ID: newMutableSchemaDescriptor.ID})
schemaMetadata.schemaRewrites[desc.Desc.ID] = &jobspb.DescriptorRewrite{
ID: id,
}
}
// Queue a job to write the updated database descriptor.
schemaMetadata.queuedSchemaJobs, err = writeNonDropDatabaseChange(ctx, dbDesc, txn, descsCol, p,
fmt.Sprintf("updating parent database %s when importing new schemas", dbDesc.GetName()))
if err != nil {
return nil, err
}
// Finally create the schemas on disk.
for i, mutDesc := range mutableSchemaDescs {
nameKey := catalogkeys.MakeSchemaNameKey(p.ExecCfg().Codec, dbDesc.ID, mutDesc.GetName())
err = createSchemaDescriptorWithID(ctx, nameKey, mutDesc.ID, mutDesc, p, descsCol, txn)
if err != nil {
return nil, err
}
schemaMetadata.schemaPreparedDetails.Schemas[i] = jobspb.ImportDetails_Schema{
Desc: mutDesc.SchemaDesc(),
}
}
return schemaMetadata, err
}
// createSchemaDescriptorWithID writes a schema descriptor with `id` to disk.
func createSchemaDescriptorWithID(
ctx context.Context,
idKey roachpb.Key,
id descpb.ID,
descriptor catalog.Descriptor,
p sql.JobExecContext,
descsCol *descs.Collection,
txn *kv.Txn,
) error {
if descriptor.GetID() == descpb.InvalidID {
return errors.AssertionFailedf("cannot create descriptor with an empty ID: %v", descriptor)
}
if descriptor.GetID() != id {
return errors.AssertionFailedf("cannot create descriptor with an ID %v; expected ID %v; descriptor %v",
id, descriptor.GetID(), descriptor)
}
b := &kv.Batch{}
descID := descriptor.GetID()
if p.ExtendedEvalContext().Tracing.KVTracingEnabled() {
log.VEventf(ctx, 2, "CPut %s -> %d", idKey, descID)
}
b.CPut(idKey, descID, nil)
if err := descsCol.Direct().WriteNewDescToBatch(
ctx,
p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
b,
descriptor,
); err != nil {
return err
}
mutDesc, ok := descriptor.(catalog.MutableDescriptor)
if !ok {
return errors.Newf("unexpected type %T when creating descriptor", descriptor)
}
switch mutDesc.(type) {
case *schemadesc.Mutable:
if err := descsCol.AddUncommittedDescriptor(mutDesc); err != nil {
return err
}
default:
return errors.Newf("unexpected type %T when creating descriptor", mutDesc)
}
return txn.Run(ctx, b)
}
// parseBundleSchemaIfNeeded parses dump files (PGDUMP, MYSQLDUMP) for DDL
// statements and creates the relevant database, schema, table and type
// descriptors. Data from the dump files is ingested into these descriptors in
// the next phase of the import.
func (r *importResumer) parseBundleSchemaIfNeeded(ctx context.Context, phs interface{}) error {
p := phs.(sql.JobExecContext)
seqVals := make(map[descpb.ID]int64)
details := r.job.Details().(jobspb.ImportDetails)
skipFKs := details.SkipFKs
parentID := details.ParentID
files := details.URIs
format := details.Format
owner := r.job.Payload().UsernameProto.Decode()
p.SessionDataMutatorIterator().SetSessionDefaultIntSize(details.DefaultIntSize)
if details.ParseBundleSchema {
var span *tracing.Span
ctx, span = tracing.ChildSpan(ctx, "import-parsing-bundle-schema")
defer span.Finish()
if err := r.job.RunningStatus(ctx, nil /* txn */, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) {
return runningStatusImportBundleParseSchema, nil
}); err != nil {
return errors.Wrapf(err, "failed to update running status of job %d", errors.Safe(r.job.ID()))
}
var dbDesc catalog.DatabaseDescriptor
{
if err := sql.DescsTxn(ctx, p.ExecCfg(), func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) (err error) {
_, dbDesc, err = descriptors.GetImmutableDatabaseByID(ctx, txn, parentID, tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
})
if err != nil {
return err
}
return err
}); err != nil {
return err
}
}
var schemaDescs []*schemadesc.Mutable
var tableDescs []*tabledesc.Mutable
var err error
walltime := p.ExecCfg().Clock.Now().WallTime
if tableDescs, schemaDescs, err = parseAndCreateBundleTableDescs(
ctx, p, details, seqVals, skipFKs, dbDesc, files, format, walltime, owner,
r.job.ID()); err != nil {
return err
}
schemaDetails := make([]jobspb.ImportDetails_Schema, len(schemaDescs))
for i, schemaDesc := range schemaDescs {
schemaDetails[i] = jobspb.ImportDetails_Schema{Desc: schemaDesc.SchemaDesc()}
}
details.Schemas = schemaDetails
tableDetails := make([]jobspb.ImportDetails_Table, len(tableDescs))
for i, tableDesc := range tableDescs {
tableDetails[i] = jobspb.ImportDetails_Table{
Name: tableDesc.GetName(),
Desc: tableDesc.TableDesc(),
SeqVal: seqVals[tableDescs[i].ID],
IsNew: true,
}
}
details.Tables = tableDetails
for _, tbl := range tableDescs {
// For reasons relating to #37691, we disallow user defined types in
// the standard IMPORT case.
for _, col := range tbl.Columns {
if col.Type.UserDefined() {
return errors.Newf("IMPORT cannot be used with user defined types; use IMPORT INTO instead")
}
}
}
// Prevent job from redoing schema parsing and table desc creation
// on subsequent resumptions.
details.ParseBundleSchema = false
if err := r.job.SetDetails(ctx, nil /* txn */, details); err != nil {
return err
}
}
return nil
}
func getPublicSchemaDescForDatabase(
ctx context.Context, execCfg *sql.ExecutorConfig, db catalog.DatabaseDescriptor,
) (scDesc catalog.SchemaDescriptor, err error) {
if !execCfg.Settings.Version.IsActive(ctx, clusterversion.PublicSchemasWithDescriptors) {
return schemadesc.GetPublicSchema(), err
}
if err := sql.DescsTxn(ctx, execCfg, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) error {
publicSchemaID := db.GetSchemaID(tree.PublicSchema)
scDesc, err = descriptors.GetImmutableSchemaByID(ctx, txn, publicSchemaID, tree.SchemaLookupFlags{Required: true})
return err
}); err != nil {
return nil, err
}
return scDesc, nil
}
// parseAndCreateBundleTableDescs parses and creates the table
// descriptors for bundle formats.
func parseAndCreateBundleTableDescs(
ctx context.Context,
p sql.JobExecContext,
details jobspb.ImportDetails,
seqVals map[descpb.ID]int64,
skipFKs bool,
parentDB catalog.DatabaseDescriptor,
files []string,
format roachpb.IOFileFormat,
walltime int64,
owner security.SQLUsername,
jobID jobspb.JobID,
) ([]*tabledesc.Mutable, []*schemadesc.Mutable, error) {
var schemaDescs []*schemadesc.Mutable
var tableDescs []*tabledesc.Mutable
var tableName string
// A single table entry in the import job details when importing a bundle format
// indicates that we are performing a single table import.
// This info is populated during the planning phase.
if len(details.Tables) > 0 {
tableName = details.Tables[0].Name
}
store, err := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, files[0], p.User())
if err != nil {
return tableDescs, schemaDescs, err
}
defer store.Close()
raw, err := store.ReadFile(ctx, "")
if err != nil {
return tableDescs, schemaDescs, err
}
defer raw.Close(ctx)
reader, err := decompressingReader(ioctx.ReaderCtxAdapter(ctx, raw), files[0], format.Compression)
if err != nil {
return tableDescs, schemaDescs, err
}
defer reader.Close()
fks := fkHandler{skip: skipFKs, allowed: true, resolver: fkResolver{
tableNameToDesc: make(map[string]*tabledesc.Mutable),
}}
switch format.Format {
case roachpb.IOFileFormat_Mysqldump:
id, err := descidgen.PeekNextUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
if err != nil {
return tableDescs, schemaDescs, err
}
fks.resolver.format.Format = roachpb.IOFileFormat_Mysqldump
evalCtx := &p.ExtendedEvalContext().EvalContext
tableDescs, err = readMysqlCreateTable(
ctx, reader, evalCtx, p, id, parentDB, tableName, fks,
seqVals, owner, walltime,
)
if err != nil {
return tableDescs, schemaDescs, err
}
case roachpb.IOFileFormat_PgDump:
fks.resolver.format.Format = roachpb.IOFileFormat_PgDump
evalCtx := &p.ExtendedEvalContext().EvalContext
// Setup a logger to handle unsupported DDL statements in the PGDUMP file.
unsupportedStmtLogger := makeUnsupportedStmtLogger(ctx, p.User(), int64(jobID),
format.PgDump.IgnoreUnsupported, format.PgDump.IgnoreUnsupportedLog, schemaParsing,
p.ExecCfg().DistSQLSrv.ExternalStorage)
tableDescs, schemaDescs, err = readPostgresCreateTable(ctx, reader, evalCtx, p, tableName,
parentDB, walltime, fks, int(format.PgDump.MaxRowSize), owner, unsupportedStmtLogger)
logErr := unsupportedStmtLogger.flush()
if logErr != nil {
return nil, nil, logErr
}
default:
return tableDescs, schemaDescs, errors.Errorf(
"non-bundle format %q does not support reading schemas", format.Format.String())
}
if err != nil {
return tableDescs, schemaDescs, err
}
if tableDescs == nil && len(details.Tables) > 0 {
return tableDescs, schemaDescs, errors.Errorf("table definition not found for %q", tableName)
}
return tableDescs, schemaDescs, err
}
// publishTables updates the status of imported tables from OFFLINE to PUBLIC.
func (r *importResumer) publishTables(
ctx context.Context, execCfg *sql.ExecutorConfig, res roachpb.BulkOpSummary,
) error {
details := r.job.Details().(jobspb.ImportDetails)
// Tables should only be published once.
if details.TablesPublished {
return nil
}
// Write stub statistics for new tables created during the import. This should
// be sufficient until the CREATE STATISTICS run finishes.
r.writeStubStatisticsForImportedTables(ctx, execCfg, res)
log.Event(ctx, "making tables live")
err := sql.DescsTxn(ctx, execCfg, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
b := txn.NewBatch()
for _, tbl := range details.Tables {
newTableDesc, err := descsCol.GetMutableTableVersionByID(ctx, tbl.Desc.ID, txn)
if err != nil {
return err
}
newTableDesc.SetPublic()
if !tbl.IsNew {
// NB: This is not using AllNonDropIndexes or directly mutating the
// constraints returned by the other usual helpers because we need to
// replace the `OutboundFKs` and `Checks` slices of newTableDesc with copies
// that we can mutate. We need to do that because newTableDesc is a shallow
// copy of tbl.Desc that we'll be asserting is the current version when we
// CPut below.
//
// Set FK constraints to unvalidated before publishing the table imported
// into.
newTableDesc.OutboundFKs = make([]descpb.ForeignKeyConstraint, len(newTableDesc.OutboundFKs))
copy(newTableDesc.OutboundFKs, tbl.Desc.OutboundFKs)
for i := range newTableDesc.OutboundFKs {
newTableDesc.OutboundFKs[i].Validity = descpb.ConstraintValidity_Unvalidated
}
// Set CHECK constraints to unvalidated before publishing the table imported into.
for _, c := range newTableDesc.AllActiveAndInactiveChecks() {
c.Validity = descpb.ConstraintValidity_Unvalidated
}
}
// TODO(dt): re-validate any FKs?
if err := descsCol.WriteDescToBatch(
ctx, false /* kvTrace */, newTableDesc, b,
); err != nil {
return errors.Wrapf(err, "publishing table %d", newTableDesc.ID)
}
}
if err := txn.Run(ctx, b); err != nil {
return errors.Wrap(err, "publishing tables")
}
// Update job record to mark tables published state as complete.
details.TablesPublished = true
err := r.job.SetDetails(ctx, txn, details)
if err != nil {
return errors.Wrap(err, "updating job details after publishing tables")
}
return nil
})
if err != nil {
return err
}
// Initiate a run of CREATE STATISTICS. We don't know the actual number of
// rows affected per table, so we use a large number because we want to make
// sure that stats always get created/refreshed here.
for i := range details.Tables {
desc := tabledesc.NewBuilder(details.Tables[i].Desc).BuildImmutableTable()
execCfg.StatsRefresher.NotifyMutation(desc, math.MaxInt32 /* rowsAffected */)
}
return nil
}
// writeStubStatisticsForImportedTables writes "stub" statistics for new tables
// created during an import.
func (r *importResumer) writeStubStatisticsForImportedTables(
ctx context.Context, execCfg *sql.ExecutorConfig, res roachpb.BulkOpSummary,
) {
details := r.job.Details().(jobspb.ImportDetails)
for _, tbl := range details.Tables {
if tbl.IsNew {
desc := tabledesc.NewBuilder(tbl.Desc).BuildImmutableTable()
id := roachpb.BulkOpSummaryID(uint64(desc.GetID()), uint64(desc.GetPrimaryIndexID()))
rowCount := uint64(res.EntryCounts[id])
// TODO(michae2): collect distinct and null counts during import.
distinctCount := uint64(float64(rowCount) * memo.UnknownDistinctCountRatio)
nullCount := uint64(float64(rowCount) * memo.UnknownNullCountRatio)
avgRowSize := uint64(memo.UnknownAvgRowSize)
// Because we don't yet have real distinct and null counts, only produce
// single-column stats to avoid the appearance of perfectly correlated
// columns.
multiColEnabled := false
statistics, err := sql.StubTableStats(desc, jobspb.ImportStatsName, multiColEnabled)
if err == nil {
for _, statistic := range statistics {
statistic.RowCount = rowCount
statistic.DistinctCount = distinctCount
statistic.NullCount = nullCount
statistic.AvgSize = avgRowSize
}
// TODO(michae2): parallelize insertion of statistics.
err = stats.InsertNewStats(ctx, execCfg.Settings, execCfg.InternalExecutor, nil /* txn */, statistics)
}
if err != nil {
// Failure to create statistics should not fail the entire import.
log.Warningf(
ctx, "error while creating statistics during import of %q: %v",
desc.GetName(), err,
)
}
}
}
}
// publishSchemas updates the status of imported schemas from OFFLINE to PUBLIC.
func (r *importResumer) publishSchemas(ctx context.Context, execCfg *sql.ExecutorConfig) error {
details := r.job.Details().(jobspb.ImportDetails)
// Schemas should only be published once.
if details.SchemasPublished {
return nil
}
log.Event(ctx, "making schemas live")
return sql.DescsTxn(ctx, execCfg, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
b := txn.NewBatch()
for _, schema := range details.Schemas {
newDesc, err := descsCol.GetMutableDescriptorByID(ctx, txn, schema.Desc.GetID())
if err != nil {
return err
}
newSchemaDesc, ok := newDesc.(*schemadesc.Mutable)
if !ok {
return errors.Newf("expected schema descriptor with ID %v, got %v",
schema.Desc.GetID(), newDesc)
}
newSchemaDesc.SetPublic()
if err := descsCol.WriteDescToBatch(
ctx, false /* kvTrace */, newSchemaDesc, b,
); err != nil {
return errors.Wrapf(err, "publishing schema %d", newSchemaDesc.ID)
}
}
if err := txn.Run(ctx, b); err != nil {
return errors.Wrap(err, "publishing schemas")
}
// Update job record to mark tables published state as complete.
details.SchemasPublished = true
err := r.job.SetDetails(ctx, txn, details)
if err != nil {
return errors.Wrap(err, "updating job details after publishing schemas")
}
return nil
})
}
// checkVirtualConstraints checks constraints that are enforced via runtime
// checks, such as uniqueness checks that are not directly backed by an index.
func (*importResumer) checkVirtualConstraints(
ctx context.Context, execCfg *sql.ExecutorConfig, job *jobs.Job,
) error {
for _, tbl := range job.Details().(jobspb.ImportDetails).Tables {
desc := tabledesc.NewBuilder(tbl.Desc).BuildExistingMutableTable()
desc.SetPublic()
if sql.HasVirtualUniqueConstraints(desc) {
if err := job.RunningStatus(ctx, nil /* txn */, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) {
return jobs.RunningStatus(fmt.Sprintf("re-validating %s", desc.GetName())), nil
}); err != nil {
return errors.Wrapf(err, "failed to update running status of job %d", errors.Safe(job.ID()))
}
}
if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
ie := execCfg.InternalExecutorFactory(ctx, sql.NewFakeSessionData(execCfg.SV()))
return ie.WithSyntheticDescriptors([]catalog.Descriptor{desc}, func() error {
return sql.RevalidateUniqueConstraintsInTable(ctx, txn, ie, desc)
})
}); err != nil {
return err
}
}
return nil
}
// checkForUDTModification checks whether any of the types referenced by the
// table being imported into have been modified incompatibly since they were
// read during import planning. If they have, it may be unsafe to continue
// with the import since we could be ingesting data that is no longer valid
// for the type.
//
// Egs: Renaming an enum value mid import could result in the import ingesting a
// value that is no longer valid.
//
// TODO(SQL Schema): This method might be unnecessarily aggressive in failing
// the import. The semantics of what concurrent type changes are/are not safe
// during an IMPORT still need to be ironed out. Once they are, we can make this
// method more conservative in what it uses to deem a type change dangerous. At
// the time of writing, changes to privileges and back-references are supported.
// Additions of new values could be supported but are not. Renaming of logical
// enum values or removal of enum values will need to forever remain
// incompatible.
func (r *importResumer) checkForUDTModification(
ctx context.Context, execCfg *sql.ExecutorConfig,
) error {
details := r.job.Details().(jobspb.ImportDetails)
if details.Types == nil {
return nil
}
// typeDescsAreEquivalent returns true if a and b are the same types save
// for the version, modification time, privileges, or the set of referencing
// descriptors.
typeDescsAreEquivalent := func(a, b *descpb.TypeDescriptor) (bool, error) {
clearIgnoredFields := func(d *descpb.TypeDescriptor) *descpb.TypeDescriptor {
d = protoutil.Clone(d).(*descpb.TypeDescriptor)
d.ModificationTime = hlc.Timestamp{}
d.Privileges = nil
d.Version = 0
d.ReferencingDescriptorIDs = nil
return d
}
aData, err := protoutil.Marshal(clearIgnoredFields(a))
if err != nil {
return false, err
}
bData, err := protoutil.Marshal(clearIgnoredFields(b))
if err != nil {
return false, err
}
return bytes.Equal(aData, bData), nil
}
// checkTypeIsEquivalent checks that the current version of the type as
// retrieved from the collection is equivalent to the previously saved
// type descriptor used by the import.
checkTypeIsEquivalent := func(
ctx context.Context, txn *kv.Txn, col *descs.Collection,
savedTypeDesc *descpb.TypeDescriptor,
) error {
typeDesc, err := col.Direct().MustGetTypeDescByID(ctx, txn, savedTypeDesc.GetID())
if err != nil {
return errors.Wrap(err, "resolving type descriptor when checking version mismatch")
}
if typeDesc.GetModificationTime() == savedTypeDesc.GetModificationTime() {
return nil
}
equivalent, err := typeDescsAreEquivalent(typeDesc.TypeDesc(), savedTypeDesc)
if err != nil {
return errors.NewAssertionErrorWithWrappedErrf(
err, "failed to check for type descriptor equivalence for type %q (%d)",
typeDesc.GetName(), typeDesc.GetID())
}
if equivalent {
return nil
}
return errors.WithHint(
errors.Newf(
"type descriptor %q (%d) has been modified, potentially incompatibly,"+
" since import planning; aborting to avoid possible corruption",
typeDesc.GetName(), typeDesc.GetID(),
),
"retrying the IMPORT operation may succeed if the operation concurrently"+
" modifying the descriptor does not reoccur during the retry attempt",
)
}
checkTypesAreEquivalent := func(
ctx context.Context, txn *kv.Txn, col *descs.Collection,
) error {
for _, savedTypeDesc := range details.Types {
if err := checkTypeIsEquivalent(
ctx, txn, col, savedTypeDesc.Desc,
); err != nil {
return err
}
}
return nil
}
return sql.DescsTxn(ctx, execCfg, checkTypesAreEquivalent)
}
func ingestWithRetry(
ctx context.Context,
execCtx sql.JobExecContext,
job *jobs.Job,
tables map[string]*execinfrapb.ReadImportDataSpec_ImportTable,
typeDescs []*descpb.TypeDescriptor,
from []string,
format roachpb.IOFileFormat,
walltime int64,
alwaysFlushProgress bool,
procsPerNode int,
) (roachpb.BulkOpSummary, error) {
resumerSpan := tracing.SpanFromContext(ctx)
// We retry on pretty generic failures -- any rpc error. If a worker node were
// to restart, it would produce this kind of error, but there may be other
// errors that are also rpc errors. Don't retry to aggressively.
retryOpts := retry.Options{
MaxBackoff: 1 * time.Second,
MaxRetries: 5,
}
// We want to retry an import if there are transient failures (i.e. worker
// nodes dying), so if we receive a retryable error, re-plan and retry the
// import.
var res roachpb.BulkOpSummary
var err error
var retryCount int32
for r := retry.StartWithCtx(ctx, retryOpts); r.Next(); {
for {
retryCount++
resumerSpan.RecordStructured(&roachpb.RetryTracingEvent{
Operation: "importResumer.ingestWithRetry",
AttemptNumber: retryCount,
RetryError: tracing.RedactAndTruncateError(err),
})
res, err = distImport(ctx, execCtx, job, tables, typeDescs, from, format, walltime,
alwaysFlushProgress, procsPerNode)
// Replanning errors should not count towards retry limits.
if err == nil || !errors.Is(err, sql.ErrPlanChanged) {
break
}
}
if err == nil {
break
}
if errors.HasType(err, &roachpb.InsufficientSpaceError{}) {
return res, jobs.MarkPauseRequestError(errors.UnwrapAll(err))
}
if joberror.IsPermanentBulkJobError(err) {
return res, err
}
// Re-load the job in order to update our progress object, which may have
// been updated by the changeFrontier processor since the flow started.
reloadedJob, reloadErr := execCtx.ExecCfg().JobRegistry.LoadClaimedJob(ctx, job.ID())
if reloadErr != nil {
if ctx.Err() != nil {
return res, ctx.Err()
}
log.Warningf(ctx, `IMPORT job %d could not reload job progress when retrying: %+v`,
int64(job.ID()), reloadErr)
} else {
job = reloadedJob
}
log.Warningf(ctx, `encountered retryable error: %+v`, err)
}
if err != nil {
return roachpb.BulkOpSummary{}, errors.Wrap(err, "exhausted retries")
}
return res, nil
}
// emitImportJobEvent emits an import job event to the event log.
func emitImportJobEvent(
ctx context.Context, p sql.JobExecContext, status jobs.Status, job *jobs.Job,
) {
var importEvent eventpb.Import
if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
return sql.LogEventForJobs(ctx, p.ExecCfg(), txn, &importEvent, int64(job.ID()),
job.Payload(), p.User(), status)
}); err != nil {
log.Warningf(ctx, "failed to log event: %v", err)
}
}
func constructSchemaAndTableKey(
ctx context.Context,
tableDesc *descpb.TableDescriptor,
schemaIDToName map[descpb.ID]string,
version clusterversion.Handle,
) (schemaAndTableName, error) {
if !version.IsActive(ctx, clusterversion.PublicSchemasWithDescriptors) {
if tableDesc.UnexposedParentSchemaID == keys.PublicSchemaIDForBackup {
return schemaAndTableName{schema: "", table: tableDesc.GetName()}, nil
}
}
schemaName, ok := schemaIDToName[tableDesc.GetUnexposedParentSchemaID()]
if !ok && schemaName != tree.PublicSchema {
return schemaAndTableName{}, errors.Newf("invalid parent schema %s with ID %d for table %s",
schemaName, tableDesc.UnexposedParentSchemaID, tableDesc.GetName())
}
return schemaAndTableName{schema: schemaName, table: tableDesc.GetName()}, nil
}
func writeNonDropDatabaseChange(
ctx context.Context,
desc *dbdesc.Mutable,
txn *kv.Txn,
descsCol *descs.Collection,
p sql.JobExecContext,
jobDesc string,
) ([]jobspb.JobID, error) {
var job *jobs.Job
var err error
if job, err = createNonDropDatabaseChangeJob(p.User(), desc.ID, jobDesc, p, txn); err != nil {
return nil, err
}
queuedJob := []jobspb.JobID{job.ID()}
b := txn.NewBatch()
err = descsCol.WriteDescToBatch(
ctx,
p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
desc,
b,
)
if err != nil {
return nil, err
}
return queuedJob, txn.Run(ctx, b)
}
func createNonDropDatabaseChangeJob(
user security.SQLUsername,
databaseID descpb.ID,
jobDesc string,
p sql.JobExecContext,
txn *kv.Txn,
) (*jobs.Job, error) {
jobRecord := jobs.Record{
Description: jobDesc,
Username: user,
Details: jobspb.SchemaChangeDetails{
DescID: databaseID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
jobID := p.ExecCfg().JobRegistry.MakeJobID()
return p.ExecCfg().JobRegistry.CreateJobWithTxn(
p.ExtendedEvalContext().Context,
jobRecord,
jobID,
txn,
)
}
// OnFailOrCancel is part of the jobs.Resumer interface. Removes data that has
// been committed from a import that has failed or been canceled. It does this
// by adding the table descriptors in DROP state, which causes the schema change
// stuff to delete the keys in the background.
func (r *importResumer) OnFailOrCancel(ctx context.Context, execCtx interface{}) error {
p := execCtx.(sql.JobExecContext)
// Emit to the event log that the job has started reverting.
emitImportJobEvent(ctx, p, jobs.StatusReverting, r.job)
details := r.job.Details().(jobspb.ImportDetails)
addToFileFormatTelemetry(details.Format.Format.String(), "failed")
cfg := execCtx.(sql.JobExecContext).ExecCfg()
var jobsToRunAfterTxnCommit []jobspb.JobID
if err := sql.DescsTxn(ctx, cfg, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
if err := r.dropTables(ctx, txn, descsCol, cfg); err != nil {
return err
}
// Drop all the schemas which may have been created during a bundle import.
// These schemas should now be empty as all the tables in them would be new
// tables created during the import, and therefore dropped by the above
// dropTables method. This allows us to avoid "collecting" objects in the
// schema before dropping the descriptor.
var err error
jobsToRunAfterTxnCommit, err = r.dropSchemas(ctx, txn, descsCol, cfg, p)
if err != nil {
return err
}
// TODO(adityamaru): Remove in 22.1 since we do not write PTS records during
// IMPORT INTO from 21.2+.
return r.releaseProtectedTimestamp(ctx, txn, cfg.ProtectedTimestampProvider)
}); err != nil {
return err
}
// Run any jobs which might have been queued when dropping the schemas.
// This would be a job to drop all the schemas, and a job to update the parent
// database descriptor.
if len(jobsToRunAfterTxnCommit) != 0 {
if err := p.ExecCfg().JobRegistry.Run(ctx, p.ExecCfg().InternalExecutor,
jobsToRunAfterTxnCommit); err != nil {
return errors.Wrap(err, "failed to run jobs that drop the imported schemas")
}
}
// Emit to the event log that the job has completed reverting.
emitImportJobEvent(ctx, p, jobs.StatusFailed, r.job)
return nil
}
// dropTables implements the OnFailOrCancel logic.
func (r *importResumer) dropTables(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, execCfg *sql.ExecutorConfig,
) error {
details := r.job.Details().(jobspb.ImportDetails)
dropTime := int64(1)
// If the prepare step of the import job was not completed then the
// descriptors do not need to be rolled back as the txn updating them never
// completed.
if !details.PrepareComplete {
return nil
}
var revert []catalog.TableDescriptor
var empty []catalog.TableDescriptor
for _, tbl := range details.Tables {
if !tbl.IsNew {
desc, err := descsCol.GetMutableTableVersionByID(ctx, tbl.Desc.ID, txn)
if err != nil {
return err
}
imm := desc.ImmutableCopy().(catalog.TableDescriptor)
if tbl.WasEmpty {
empty = append(empty, imm)
} else {
revert = append(revert, imm)
}
}
}
// The walltime can be 0 if there is a failure between publishing the tables
// as OFFLINE and then choosing a ingestion timestamp. This might happen
// while waiting for the descriptor version to propagate across the cluster
// for example.
//
// In this case, we don't want to rollback the data since data ingestion has
// not yet begun (since we have not chosen a timestamp at which to ingest.)
if details.Walltime != 0 && len(revert) > 0 {
// NB: if a revert fails it will abort the rest of this failure txn, which is
// also what brings tables back online. We _could_ change the error handling
// or just move the revert into Resume()'s error return path, however it isn't
// clear that just bringing a table back online with partially imported data
// that may or may not be partially reverted is actually a good idea. It seems
// better to do the revert here so that the table comes back if and only if,
// it was rolled back to its pre-IMPORT state, and instead provide a manual
// admin knob (e.g. ALTER TABLE REVERT TO SYSTEM TIME) if anything goes wrong.
ts := hlc.Timestamp{WallTime: details.Walltime}.Prev()
// disallowShadowingBelow=writeTS used to write means no existing keys could
// have been covered by a key imported and the table was offline to other
// writes, so even if GC has run it would not have GC'ed any keys to which
// we need to revert, so we can safely ignore the target-time GC check.
const ignoreGC = true
if err := sql.RevertTables(ctx, txn.DB(), execCfg, revert, ts, ignoreGC, sql.RevertTableDefaultBatchSize); err != nil {
return errors.Wrap(err, "rolling back partially completed IMPORT")
}
}
for i := range empty {
// Set a DropTime on the table descriptor to differentiate it from an
// older-format (v1.1) descriptor. This enables ClearTableData to use a
// RangeClear for faster data removal, rather than removing by chunks.
empty[i].TableDesc().DropTime = dropTime
if err := gcjob.ClearTableData(
ctx, execCfg.DB, execCfg.DistSender, execCfg.Codec, &execCfg.Settings.SV, empty[i],
); err != nil {
return errors.Wrapf(err, "clearing data for table %d", empty[i].GetID())
}
}
b := txn.NewBatch()
tablesToGC := make([]descpb.ID, 0, len(details.Tables))
toWrite := make([]*tabledesc.Mutable, 0, len(details.Tables))
for _, tbl := range details.Tables {
newTableDesc, err := descsCol.GetMutableTableVersionByID(ctx, tbl.Desc.ID, txn)
if err != nil {
return err
}
if tbl.IsNew {
newTableDesc.SetDropped()
// If the DropTime if set, a table uses RangeClear for fast data removal. This
// operation starts at DropTime + the GC TTL. If we used now() here, it would
// not clean up data until the TTL from the time of the error. Instead, use 1
// (that is, 1ns past the epoch) to allow this to be cleaned up as soon as
// possible. This is safe since the table data was never visible to users,
// and so we don't need to preserve MVCC semantics.
newTableDesc.DropTime = dropTime
b.Del(catalogkeys.EncodeNameKey(execCfg.Codec, newTableDesc))
tablesToGC = append(tablesToGC, newTableDesc.ID)
descsCol.AddDeletedDescriptor(newTableDesc.GetID())
} else {
// IMPORT did not create this table, so we should not drop it.
newTableDesc.SetPublic()
}
// Accumulate the changes before adding them to the batch to avoid
// making any table invalid before having read it.
toWrite = append(toWrite, newTableDesc)
}
for _, d := range toWrite {
const kvTrace = false
if err := descsCol.WriteDescToBatch(ctx, kvTrace, d, b); err != nil {
return err
}
}
// Queue a GC job.
gcDetails := jobspb.SchemaChangeGCDetails{}
for _, tableID := range tablesToGC {
gcDetails.Tables = append(gcDetails.Tables, jobspb.SchemaChangeGCDetails_DroppedID{
ID: tableID,
DropTime: dropTime,
})
}
gcJobRecord := jobs.Record{
Description: fmt.Sprintf("GC for %s", r.job.Payload().Description),
Username: r.job.Payload().UsernameProto.Decode(),
DescriptorIDs: tablesToGC,
Details: gcDetails,
Progress: jobspb.SchemaChangeGCProgress{},
NonCancelable: true,
}
if _, err := execCfg.JobRegistry.CreateJobWithTxn(
ctx, gcJobRecord, execCfg.JobRegistry.MakeJobID(), txn); err != nil {
return err
}
return errors.Wrap(txn.Run(ctx, b), "rolling back tables")
}
func (r *importResumer) dropSchemas(
ctx context.Context,
txn *kv.Txn,
descsCol *descs.Collection,
execCfg *sql.ExecutorConfig,
p sql.JobExecContext,
) ([]jobspb.JobID, error) {
details := r.job.Details().(jobspb.ImportDetails)
// If the prepare step of the import job was not completed then the
// descriptors do not need to be rolled back as the txn updating them never
// completed.
if !details.PrepareComplete || len(details.Schemas) == 0 {
return nil, nil
}
// Resolve the database descriptor.
desc, err := descsCol.GetMutableDescriptorByID(ctx, txn, details.ParentID)
if err != nil {
return nil, err
}
dbDesc, ok := desc.(*dbdesc.Mutable)
if !ok {
return nil, errors.Newf("expected ID %d to refer to the database being imported into",
details.ParentID)
}
droppedSchemaIDs := make([]descpb.ID, 0)
for _, schema := range details.Schemas {
desc, err := descsCol.GetMutableDescriptorByID(ctx, txn, schema.Desc.ID)
if err != nil {
return nil, err
}
var schemaDesc *schemadesc.Mutable
var ok bool
if schemaDesc, ok = desc.(*schemadesc.Mutable); !ok {
return nil, errors.Newf("unable to resolve schema desc with ID %d", schema.Desc.ID)
}
// Mark the descriptor as dropped and write it to the batch.
// Delete namespace entry or update draining names depending on version.
schemaDesc.SetDropped()
droppedSchemaIDs = append(droppedSchemaIDs, schemaDesc.GetID())
b := txn.NewBatch()
// TODO(postamar): remove version gate and else-block in 22.2
if execCfg.Settings.Version.IsActive(ctx, clusterversion.AvoidDrainingNames) {
if dbDesc.Schemas != nil {
delete(dbDesc.Schemas, schemaDesc.GetName())
}
b.Del(catalogkeys.EncodeNameKey(p.ExecCfg().Codec, schemaDesc))
} else {
//lint:ignore SA1019 removal of deprecated method call scheduled for 22.2
schemaDesc.AddDrainingName(descpb.NameInfo{
ParentID: details.ParentID,
ParentSchemaID: keys.RootNamespaceID,
Name: schemaDesc.Name,
})
// Update the parent database with information about the dropped schema.
dbDesc.AddSchemaToDatabase(schema.Desc.Name, descpb.DatabaseDescriptor_SchemaInfo{ID: dbDesc.ID, Dropped: true})
}
if err := descsCol.WriteDescToBatch(ctx, p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
schemaDesc, b); err != nil {
return nil, err
}
err = txn.Run(ctx, b)
if err != nil {
return nil, err
}
}
// Write out the change to the database. This only creates a job record to be
// run after the txn commits.
queuedJob, err := writeNonDropDatabaseChange(ctx, dbDesc, txn, descsCol, p, "")
if err != nil {
return nil, err
}
// Create the job to drop the schema.
dropSchemaJobRecord := jobs.Record{
Description: "dropping schemas as part of an import job rollback",
Username: p.User(),
DescriptorIDs: droppedSchemaIDs,
Details: jobspb.SchemaChangeDetails{
DroppedSchemas: droppedSchemaIDs,
DroppedDatabaseID: descpb.InvalidID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
jobID := p.ExecCfg().JobRegistry.MakeJobID()
job, err := execCfg.JobRegistry.CreateJobWithTxn(ctx, dropSchemaJobRecord, jobID, txn)
if err != nil {
return nil, err
}
queuedJob = append(queuedJob, job.ID())
return queuedJob, nil
}
func (r *importResumer) releaseProtectedTimestamp(
ctx context.Context, txn *kv.Txn, pts protectedts.Storage,
) error {
details := r.job.Details().(jobspb.ImportDetails)
ptsID := details.ProtectedTimestampRecord
// If the job doesn't have a protected timestamp then there's nothing to do.
if ptsID == nil {
return nil
}
err := pts.Release(ctx, txn, *ptsID)
if errors.Is(err, protectedts.ErrNotExists) {
// No reason to return an error which might cause problems if it doesn't
// seem to exist.
log.Warningf(ctx, "failed to release protected which seems not to exist: %v", err)
err = nil
}
return err
}
// ReportResults implements JobResultsReporter interface.
func (r *importResumer) ReportResults(ctx context.Context, resultsCh chan<- tree.Datums) error {
select {
case resultsCh <- tree.Datums{
tree.NewDInt(tree.DInt(r.job.ID())),
tree.NewDString(string(jobs.StatusSucceeded)),
tree.NewDFloat(tree.DFloat(1.0)),
tree.NewDInt(tree.DInt(r.res.Rows)),
tree.NewDInt(tree.DInt(r.res.IndexEntries)),
tree.NewDInt(tree.DInt(r.res.DataSize)),
}:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func init() {
jobs.RegisterConstructor(
jobspb.TypeImport,
func(job *jobs.Job, settings *cluster.Settings) jobs.Resumer {
return &importResumer{
job: job,
settings: settings,
}
},
)
}
| pkg/sql/importer/import_job.go | 1 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.9976934790611267,
0.010343645699322224,
0.00016327371122315526,
0.00029594582156278193,
0.08999053388834
] |
{
"id": 4,
"code_window": [
"\t\t\tjob = reloadedJob\n",
"\t\t}\n",
"\t\tlog.Warningf(ctx, `encountered retryable error: %+v`, err)\n",
"\t}\n",
"\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t// We have exhausted retries, but we have not seen a \"PermanentBulkJobError\" so\n",
"\t// it is possible that this is a transient error that is taking longer than\n",
"\t// our configured retry to go away.\n",
"\t//\n",
"\t// Let's pause the job instead of failing it so that the user can decide\n",
"\t// whether to resume it or cancel it.\n"
],
"file_path": "pkg/sql/importer/import_job.go",
"type": "add",
"edit_start_line_idx": 1000
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import React from "react";
import { storiesOf } from "@storybook/react";
import { InlineAlert } from "./inlineAlert";
import { styledWrapper } from "src/util/decorators";
import { Anchor } from "src/components";
storiesOf("InlineAlert", module)
.addDecorator(styledWrapper({ padding: "24px" }))
.add("with text title", () => (
<InlineAlert title="Hello world!" message="blah-blah-blah" />
))
.add("with Error intent", () => (
<InlineAlert title="Hello world!" message="blah-blah-blah" intent="error" />
))
.add("with link in title", () => (
<InlineAlert
title={
<span>
You do not have permission to view this information.{" "}
<Anchor href="#">Learn more.</Anchor>
</span>
}
/>
))
.add("with multiline message", () => (
<InlineAlert
title="Hello world!"
message={
<div>
<div>Message 1</div>
<div>Message 2</div>
<div>Message 3</div>
</div>
}
/>
));
| pkg/ui/workspaces/db-console/src/components/inlineAlert/inlineAlert.stories.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.00017638198914937675,
0.0001719377760309726,
0.00016738071280997247,
0.00017242153990082443,
0.0000029178179374866886
] |
{
"id": 4,
"code_window": [
"\t\t\tjob = reloadedJob\n",
"\t\t}\n",
"\t\tlog.Warningf(ctx, `encountered retryable error: %+v`, err)\n",
"\t}\n",
"\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t// We have exhausted retries, but we have not seen a \"PermanentBulkJobError\" so\n",
"\t// it is possible that this is a transient error that is taking longer than\n",
"\t// our configured retry to go away.\n",
"\t//\n",
"\t// Let's pause the job instead of failing it so that the user can decide\n",
"\t// whether to resume it or cancel it.\n"
],
"file_path": "pkg/sql/importer/import_job.go",
"type": "add",
"edit_start_line_idx": 1000
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package typedesc_test
import (
"context"
"fmt"
"math"
"testing"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/internal/validate"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/nstree"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc"
"github.com/cockroachdb/cockroach/pkg/sql/oidext"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/lib/pq/oid"
"github.com/stretchr/testify/require"
)
func TestTypeDescIsCompatibleWith(t *testing.T) {
defer leaktest.AfterTest(t)()
tests := []struct {
a descpb.TypeDescriptor
b descpb.TypeDescriptor
// If err == "", then no error is expected. Otherwise, an error that
// matches is expected.
err string
}{
// Different type kinds shouldn't be equal.
{
a: descpb.TypeDescriptor{
Name: "a",
Kind: descpb.TypeDescriptor_ENUM,
},
b: descpb.TypeDescriptor{
Name: "b",
Kind: descpb.TypeDescriptor_ALIAS,
},
err: `"b" of type "ALIAS" is not compatible with type "ENUM"`,
},
{
a: descpb.TypeDescriptor{
Name: "a",
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{128},
},
},
},
b: descpb.TypeDescriptor{
Name: "b",
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{128},
},
},
},
err: `"b" of type "MULTIREGION_ENUM" is not compatible with type "ENUM"`,
},
// We aren't considering compatibility between different alias kinds.
{
a: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ALIAS,
},
b: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ALIAS,
},
err: `compatibility comparison unsupported`,
},
// The empty enum should be compatible with any other enums.
{
a: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ENUM,
},
b: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "hello",
PhysicalRepresentation: []byte{128},
},
},
},
err: ``,
},
// The same enum should be compatible with itself.
{
a: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "hello",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "hi",
PhysicalRepresentation: []byte{200},
},
},
},
b: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "hello",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "hi",
PhysicalRepresentation: []byte{200},
},
},
},
err: ``,
},
{
a: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "us-east-2",
PhysicalRepresentation: []byte{200},
},
},
},
b: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "us-east-2",
PhysicalRepresentation: []byte{200},
},
},
},
err: ``,
},
// An enum with only some members of another enum should be compatible.
{
a: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "hi",
PhysicalRepresentation: []byte{200},
},
},
},
b: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "hello",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "hi",
PhysicalRepresentation: []byte{200},
},
},
},
err: ``,
},
{
a: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-2",
PhysicalRepresentation: []byte{200},
},
},
},
b: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "us-east-2",
PhysicalRepresentation: []byte{200},
},
},
},
err: ``,
},
// An enum with missing members shouldn't be compatible.
{
a: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "howdy",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "hi",
PhysicalRepresentation: []byte{200},
},
},
},
b: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "hello",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "hi",
PhysicalRepresentation: []byte{200},
},
},
},
err: `could not find enum value "howdy"`,
},
{
a: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "us-east-2",
PhysicalRepresentation: []byte{200},
},
},
},
b: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-3",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "us-east-2",
PhysicalRepresentation: []byte{200},
},
},
},
err: `could not find enum value "us-east-1"`,
},
// An enum with a different physical representation shouldn't be compatible.
{
a: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "hello",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "hi",
PhysicalRepresentation: []byte{201},
},
},
},
b: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "hello",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "hi",
PhysicalRepresentation: []byte{200},
},
},
},
err: `has differing physical representation for value "hi"`,
},
{
a: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "us-east-2",
PhysicalRepresentation: []byte{201},
},
},
},
b: descpb.TypeDescriptor{
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{128},
},
{
LogicalRepresentation: "us-east-2",
PhysicalRepresentation: []byte{200},
},
},
},
err: `has differing physical representation for value "us-east-2"`,
},
}
for i, test := range tests {
a := typedesc.NewBuilder(&test.a).BuildImmutableType()
b := typedesc.NewBuilder(&test.b).BuildImmutableType()
err := a.IsCompatibleWith(b)
if test.err == "" {
require.NoError(t, err)
} else {
if !testutils.IsError(err, test.err) {
t.Errorf("#%d expected error %s, but found %s", i, test.err, err)
}
}
}
}
func TestValidateTypeDesc(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
const (
dbID = 1000
schemaID = dbID + 1
typeID = dbID + 2
multiRegionDBID = 2000
)
var cb nstree.MutableCatalog
cb.UpsertDescriptorEntry(dbdesc.NewBuilder(&descpb.DatabaseDescriptor{
Name: "db",
ID: dbID,
}).BuildImmutable())
cb.UpsertDescriptorEntry(schemadesc.NewBuilder(&descpb.SchemaDescriptor{
ID: schemaID,
ParentID: dbID,
Name: "schema",
}).BuildImmutable())
cb.UpsertDescriptorEntry(typedesc.NewBuilder(&descpb.TypeDescriptor{
ID: typeID,
Name: "type",
}).BuildImmutable())
cb.UpsertDescriptorEntry(dbdesc.NewBuilder(&descpb.DatabaseDescriptor{
Name: "multi-region-db",
ID: multiRegionDBID,
RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{
PrimaryRegion: "us-east-1",
},
}).BuildImmutable())
defaultPrivileges := catpb.NewBasePrivilegeDescriptor(security.RootUserName())
invalidPrivileges := catpb.NewBasePrivilegeDescriptor(security.RootUserName())
// Make the PrivilegeDescriptor invalid by granting SELECT to a type.
invalidPrivileges.Grant(security.TestUserName(), privilege.List{privilege.SELECT}, false)
typeDescID := descpb.ID(bootstrap.TestingUserDescID(0))
testData := []struct {
err string
desc descpb.TypeDescriptor
}{
{
`empty type name`,
descpb.TypeDescriptor{
Privileges: defaultPrivileges,
},
},
{
`invalid ID 0`,
descpb.TypeDescriptor{
Name: "t",
Privileges: defaultPrivileges,
},
},
{
`invalid parentID 0`,
descpb.TypeDescriptor{
Name: "t", ID: typeDescID,
Privileges: defaultPrivileges,
},
},
{
`invalid parent schema ID 0`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: dbID,
Privileges: defaultPrivileges,
},
},
{
`enum members are not sorted [{[2] a ALL NONE} {[1] b ALL NONE}]`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: dbID,
ParentSchemaID: keys.PublicSchemaID,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "a",
PhysicalRepresentation: []byte{2},
},
{
LogicalRepresentation: "b",
PhysicalRepresentation: []byte{1},
},
},
Privileges: defaultPrivileges,
},
},
{
`duplicate enum physical rep [1]`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: dbID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "a",
PhysicalRepresentation: []byte{1},
},
{
LogicalRepresentation: "b",
PhysicalRepresentation: []byte{1},
},
},
Privileges: defaultPrivileges,
},
},
{
`duplicate enum physical rep [1]`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: multiRegionDBID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
RegionConfig: &descpb.TypeDescriptor_RegionConfig{
PrimaryRegion: "us-east-1",
},
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{1},
},
{
LogicalRepresentation: "us-east-2",
PhysicalRepresentation: []byte{1},
},
},
Privileges: defaultPrivileges,
},
},
{
`duplicate enum member "a"`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: dbID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "a",
PhysicalRepresentation: []byte{1},
},
{
LogicalRepresentation: "a",
PhysicalRepresentation: []byte{2},
},
},
Privileges: defaultPrivileges,
},
},
{
`duplicate enum member "us-east-1"`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: multiRegionDBID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{1},
},
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{2},
},
},
Privileges: defaultPrivileges,
},
},
{
`read only capability member must have transition direction set`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: dbID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "a",
PhysicalRepresentation: []byte{1},
Capability: descpb.TypeDescriptor_EnumMember_READ_ONLY,
Direction: descpb.TypeDescriptor_EnumMember_NONE,
},
},
Privileges: defaultPrivileges,
},
},
{
`public enum member can not have transition direction set`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: dbID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "a",
PhysicalRepresentation: []byte{1},
Capability: descpb.TypeDescriptor_EnumMember_ALL,
Direction: descpb.TypeDescriptor_EnumMember_ADD,
},
},
Privileges: defaultPrivileges,
},
},
{
`public enum member can not have transition direction set`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: dbID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
RegionConfig: &descpb.TypeDescriptor_RegionConfig{
PrimaryRegion: "us-east1",
},
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east1",
PhysicalRepresentation: []byte{1},
Capability: descpb.TypeDescriptor_EnumMember_ALL,
Direction: descpb.TypeDescriptor_EnumMember_REMOVE,
},
},
Privileges: defaultPrivileges,
},
},
{
`ALIAS type desc has nil alias type`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: dbID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_ALIAS,
Privileges: defaultPrivileges,
},
},
{
`referenced database ID 500: referenced descriptor not found`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: 500,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_ALIAS,
Alias: types.Int,
Privileges: defaultPrivileges,
},
},
{
`referenced schema ID 500: referenced descriptor not found`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: dbID,
ParentSchemaID: 500,
Kind: descpb.TypeDescriptor_ALIAS,
Alias: types.Int,
Privileges: defaultPrivileges,
},
},
{
`arrayTypeID 500 does not exist for "ENUM": referenced type ID 500: referenced descriptor not found`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: dbID,
ParentSchemaID: schemaID,
Kind: descpb.TypeDescriptor_ENUM,
ArrayTypeID: 500,
Privileges: defaultPrivileges,
},
},
{
`arrayTypeID 500 does not exist for "MULTIREGION_ENUM": referenced type ID 500: referenced descriptor not found`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: multiRegionDBID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
RegionConfig: &descpb.TypeDescriptor_RegionConfig{
PrimaryRegion: "us-east-1",
},
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{1},
},
},
ArrayTypeID: 500,
Privileges: defaultPrivileges,
},
},
{
"referenced table ID 500: referenced descriptor not found",
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: dbID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_ENUM,
ArrayTypeID: typeID,
ReferencingDescriptorIDs: []descpb.ID{500},
Privileges: defaultPrivileges,
},
},
{
"referenced table ID 500: referenced descriptor not found",
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: multiRegionDBID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
RegionConfig: &descpb.TypeDescriptor_RegionConfig{
PrimaryRegion: "us-east-1",
},
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{1},
},
},
ArrayTypeID: typeID,
ReferencingDescriptorIDs: []descpb.ID{500},
Privileges: defaultPrivileges,
},
},
{
`user testuser must not have SELECT privileges on type "t"`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: dbID,
ParentSchemaID: schemaID,
Kind: descpb.TypeDescriptor_ENUM,
ArrayTypeID: typeID,
Privileges: invalidPrivileges,
},
},
{
`found region config on ENUM type desc`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: dbID,
ParentSchemaID: schemaID,
Kind: descpb.TypeDescriptor_ENUM,
RegionConfig: &descpb.TypeDescriptor_RegionConfig{
PrimaryRegion: "us-east-1",
},
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "foo",
PhysicalRepresentation: []byte{2},
},
},
ArrayTypeID: typeID,
Privileges: defaultPrivileges,
},
},
{
`no region config on MULTIREGION_ENUM type desc`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: multiRegionDBID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{2},
},
},
ArrayTypeID: typeID,
Privileges: defaultPrivileges,
},
},
{
`unexpected primary region on db desc: "us-east-1" expected "us-east-2"`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: multiRegionDBID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
RegionConfig: &descpb.TypeDescriptor_RegionConfig{
PrimaryRegion: "us-east-2",
},
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-2",
PhysicalRepresentation: []byte{2},
},
},
ArrayTypeID: typeID,
Privileges: defaultPrivileges,
},
},
{
`primary region "us-east-2" not found in list of enum members`,
descpb.TypeDescriptor{
Name: "t",
ID: typeDescID,
ParentID: multiRegionDBID,
ParentSchemaID: keys.PublicSchemaID,
Kind: descpb.TypeDescriptor_MULTIREGION_ENUM,
RegionConfig: &descpb.TypeDescriptor_RegionConfig{
PrimaryRegion: "us-east-2",
},
EnumMembers: []descpb.TypeDescriptor_EnumMember{
{
LogicalRepresentation: "us-east-1",
PhysicalRepresentation: []byte{2},
},
},
ArrayTypeID: typeID,
Privileges: defaultPrivileges,
},
},
}
for i, test := range testData {
desc := typedesc.NewBuilder(&test.desc).BuildImmutable()
expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.DescriptorType(), desc.GetName(), desc.GetID(), test.err)
ve := cb.Validate(ctx, clusterversion.TestingClusterVersion, catalog.NoValidationTelemetry, catalog.ValidationLevelCrossReferences, desc)
if err := ve.CombinedError(); err == nil {
t.Errorf("#%d expected err: %s but found nil: %v", i, expectedErr, test.desc)
} else if expectedErr != err.Error() {
t.Errorf("#%d expected err: %s but found: %s", i, expectedErr, err)
}
}
}
func TestOIDToIDConversion(t *testing.T) {
tests := []struct {
oid oid.Oid
ok bool
name string
}{
{oid.Oid(0), false, "default OID"},
{oid.Oid(1), false, "Standard OID"},
{oid.Oid(oidext.CockroachPredefinedOIDMax), false, "max standard OID"},
{oid.Oid(oidext.CockroachPredefinedOIDMax + 1), true, "user-defined OID"},
{oid.Oid(math.MaxUint32), true, "max user-defined OID"},
}
for _, test := range tests {
t.Run(fmt.Sprint(test.oid), func(t *testing.T) {
_, err := typedesc.UserDefinedTypeOIDToID(test.oid)
if test.ok {
require.NoError(t, err)
} else {
require.Error(t, err)
}
})
}
}
func TestTableImplicitTypeDescCannotBeSerializedOrValidated(t *testing.T) {
td := &descpb.TypeDescriptor{
Name: "foo",
ID: 10,
ParentID: 1,
ParentSchemaID: 1,
Kind: descpb.TypeDescriptor_TABLE_IMPLICIT_RECORD_TYPE,
Privileges: catpb.NewBasePrivilegeDescriptor(security.AdminRoleName()),
}
desc := typedesc.NewBuilder(td).BuildImmutable()
err := validate.Self(clusterversion.TestingClusterVersion, desc)
require.Contains(t, err.Error(), "kind TABLE_IMPLICIT_RECORD_TYPE should never be serialized")
}
| pkg/sql/catalog/typedesc/type_desc_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.00174439896363765,
0.00020601059077307582,
0.00016458106983918697,
0.00017044447304215282,
0.000203600138775073
] |
{
"id": 4,
"code_window": [
"\t\t\tjob = reloadedJob\n",
"\t\t}\n",
"\t\tlog.Warningf(ctx, `encountered retryable error: %+v`, err)\n",
"\t}\n",
"\n",
"\tif err != nil {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t// We have exhausted retries, but we have not seen a \"PermanentBulkJobError\" so\n",
"\t// it is possible that this is a transient error that is taking longer than\n",
"\t// our configured retry to go away.\n",
"\t//\n",
"\t// Let's pause the job instead of failing it so that the user can decide\n",
"\t// whether to resume it or cancel it.\n"
],
"file_path": "pkg/sql/importer/import_job.go",
"type": "add",
"edit_start_line_idx": 1000
} | // Code generated by TestPretty. DO NOT EDIT.
// GENERATED FILE DO NOT EDIT
1:
-
SELECT
(
(
(
(
(
(1, '2', 3)
AS a, b, c
),
((4, '5') AS a, b),
((6,) AS a)
)
AS a, b, c
),
((7, 8) AS a, b),
(('9',) AS a)
)
AS a, b, c
)
AS r
| pkg/sql/sem/tree/testdata/pretty/9.ref.golden.short | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.0002031767216976732,
0.00018363732669968158,
0.0001716194674372673,
0.00017611579096410424,
0.000013937842595623806
] |
{
"id": 5,
"code_window": [
"\tif err != nil {\n",
"\t\treturn roachpb.BulkOpSummary{}, errors.Wrap(err, \"exhausted retries\")\n",
"\t}\n",
"\treturn res, nil\n",
"}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn res, jobs.MarkPauseRequestError(errors.Wrap(err, \"exhausted retries\"))\n"
],
"file_path": "pkg/sql/importer/import_job.go",
"type": "replace",
"edit_start_line_idx": 1001
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package importer
import (
"bytes"
"context"
"fmt"
"math"
"time"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/joberror"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descidgen"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/ingesting"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/rewrite"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/gcjob"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/ioctx"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/log/eventpb"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
)
type importResumer struct {
job *jobs.Job
settings *cluster.Settings
res roachpb.RowCount
testingKnobs struct {
afterImport func(summary roachpb.RowCount) error
alwaysFlushJobProgress bool
}
}
func (r *importResumer) TestingSetAfterImportKnob(fn func(summary roachpb.RowCount) error) {
r.testingKnobs.afterImport = fn
}
var _ jobs.TraceableJob = &importResumer{}
func (r *importResumer) ForceRealSpan() bool {
return true
}
var _ jobs.Resumer = &importResumer{}
var processorsPerNode = settings.RegisterIntSetting(
settings.TenantWritable,
"bulkio.import.processors_per_node",
"number of input processors to run on each sql instance", 1,
settings.PositiveInt,
)
type preparedSchemaMetadata struct {
schemaPreparedDetails jobspb.ImportDetails
schemaRewrites jobspb.DescRewriteMap
newSchemaIDToName map[descpb.ID]string
oldSchemaIDToName map[descpb.ID]string
queuedSchemaJobs []jobspb.JobID
}
// Resume is part of the jobs.Resumer interface.
func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error {
p := execCtx.(sql.JobExecContext)
if err := r.parseBundleSchemaIfNeeded(ctx, p); err != nil {
return err
}
details := r.job.Details().(jobspb.ImportDetails)
files := details.URIs
format := details.Format
tables := make(map[string]*execinfrapb.ReadImportDataSpec_ImportTable, len(details.Tables))
if details.Tables != nil {
// Skip prepare stage on job resumption, if it has already been completed.
if !details.PrepareComplete {
var schemaMetadata *preparedSchemaMetadata
if err := sql.DescsTxn(ctx, p.ExecCfg(), func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
var preparedDetails jobspb.ImportDetails
schemaMetadata = &preparedSchemaMetadata{
newSchemaIDToName: make(map[descpb.ID]string),
oldSchemaIDToName: make(map[descpb.ID]string),
}
var err error
curDetails := details
if len(details.Schemas) != 0 {
schemaMetadata, err = r.prepareSchemasForIngestion(ctx, p, curDetails, txn, descsCol)
if err != nil {
return err
}
curDetails = schemaMetadata.schemaPreparedDetails
}
if r.settings.Version.IsActive(ctx, clusterversion.PublicSchemasWithDescriptors) {
// In 22.1, the Public schema should always be present in the database.
// Make sure it is part of schemaMetadata, it is not guaranteed to
// be added in prepareSchemasForIngestion if we're not importing any
// schemas.
// The Public schema will not change in the database so both the
// oldSchemaIDToName and newSchemaIDToName entries will be the
// same for the Public schema.
_, dbDesc, err := descsCol.GetImmutableDatabaseByID(ctx, txn, details.ParentID, tree.DatabaseLookupFlags{Required: true})
if err != nil {
return err
}
schemaMetadata.oldSchemaIDToName[dbDesc.GetSchemaID(tree.PublicSchema)] = tree.PublicSchema
schemaMetadata.newSchemaIDToName[dbDesc.GetSchemaID(tree.PublicSchema)] = tree.PublicSchema
}
preparedDetails, err = r.prepareTablesForIngestion(ctx, p, curDetails, txn, descsCol,
schemaMetadata)
if err != nil {
return err
}
// Telemetry for multi-region.
for _, table := range preparedDetails.Tables {
_, dbDesc, err := descsCol.GetImmutableDatabaseByID(
ctx, txn, table.Desc.GetParentID(), tree.DatabaseLookupFlags{Required: true})
if err != nil {
return err
}
if dbDesc.IsMultiRegion() {
telemetry.Inc(sqltelemetry.ImportIntoMultiRegionDatabaseCounter)
}
}
// Update the job details now that the schemas and table descs have
// been "prepared".
return r.job.Update(ctx, txn, func(
txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater,
) error {
pl := md.Payload
*pl.GetImport() = preparedDetails
// Update the set of descriptors for later observability.
// TODO(ajwerner): Do we need this idempotence test?
prev := md.Payload.DescriptorIDs
if prev == nil {
var descriptorIDs []descpb.ID
for _, schema := range preparedDetails.Schemas {
descriptorIDs = append(descriptorIDs, schema.Desc.GetID())
}
for _, table := range preparedDetails.Tables {
descriptorIDs = append(descriptorIDs, table.Desc.GetID())
}
pl.DescriptorIDs = descriptorIDs
}
ju.UpdatePayload(pl)
return nil
})
}); err != nil {
return err
}
// Run the queued job which updates the database descriptor to contain the
// newly created schemas.
// NB: Seems like the registry eventually adopts the job anyways but this
// is in keeping with the semantics we use when creating a schema during
// sql execution. Namely, queue job in the txn which creates the schema
// desc and run once the txn has committed.
if err := p.ExecCfg().JobRegistry.Run(ctx, p.ExecCfg().InternalExecutor,
schemaMetadata.queuedSchemaJobs); err != nil {
return err
}
// Re-initialize details after prepare step.
details = r.job.Details().(jobspb.ImportDetails)
emitImportJobEvent(ctx, p, jobs.StatusRunning, r.job)
}
// Create a mapping from schemaID to schemaName.
schemaIDToName := make(map[descpb.ID]string)
for _, i := range details.Schemas {
schemaIDToName[i.Desc.GetID()] = i.Desc.GetName()
}
for _, i := range details.Tables {
var tableName string
if i.Name != "" {
tableName = i.Name
} else if i.Desc != nil {
tableName = i.Desc.Name
} else {
return errors.New("invalid table specification")
}
// If we are importing from PGDUMP, qualify the table name with the schema
// name since we support non-public schemas.
if details.Format.Format == roachpb.IOFileFormat_PgDump {
schemaName := tree.PublicSchema
if schema, ok := schemaIDToName[i.Desc.GetUnexposedParentSchemaID()]; ok {
schemaName = schema
}
tableName = fmt.Sprintf("%s.%s", schemaName, tableName)
}
tables[tableName] = &execinfrapb.ReadImportDataSpec_ImportTable{
Desc: i.Desc,
TargetCols: i.TargetCols,
}
}
}
typeDescs := make([]*descpb.TypeDescriptor, len(details.Types))
for i, t := range details.Types {
typeDescs[i] = t.Desc
}
// If details.Walltime is still 0, then it was not set during
// `prepareTablesForIngestion`. This indicates that we are in an IMPORT INTO,
// and that the walltime was not set in a previous run of IMPORT.
//
// In the case of importing into existing tables we must wait for all nodes
// to see the same version of the updated table descriptor, after which we
// shall chose a ts to import from.
if details.Walltime == 0 {
// Now that we know all the tables are offline, pick a walltime at which we
// will write.
details.Walltime = p.ExecCfg().Clock.Now().WallTime
// Check if the tables being imported into are starting empty, in which
// case we can cheaply clear-range instead of revert-range to cleanup.
for i := range details.Tables {
if !details.Tables[i].IsNew {
tblDesc := tabledesc.NewBuilder(details.Tables[i].Desc).BuildImmutableTable()
tblSpan := tblDesc.TableSpan(p.ExecCfg().Codec)
res, err := p.ExecCfg().DB.Scan(ctx, tblSpan.Key, tblSpan.EndKey, 1 /* maxRows */)
if err != nil {
return errors.Wrap(err, "checking if existing table is empty")
}
details.Tables[i].WasEmpty = len(res) == 0
}
}
if err := r.job.SetDetails(ctx, nil /* txn */, details); err != nil {
return err
}
}
procsPerNode := int(processorsPerNode.Get(&p.ExecCfg().Settings.SV))
res, err := ingestWithRetry(ctx, p, r.job, tables, typeDescs, files, format, details.Walltime,
r.testingKnobs.alwaysFlushJobProgress, procsPerNode)
if err != nil {
return err
}
pkIDs := make(map[uint64]struct{}, len(details.Tables))
for _, t := range details.Tables {
pkIDs[roachpb.BulkOpSummaryID(uint64(t.Desc.ID), uint64(t.Desc.PrimaryIndex.ID))] = struct{}{}
}
r.res.DataSize = res.DataSize
for id, count := range res.EntryCounts {
if _, ok := pkIDs[id]; ok {
r.res.Rows += count
} else {
r.res.IndexEntries += count
}
}
if r.testingKnobs.afterImport != nil {
if err := r.testingKnobs.afterImport(r.res); err != nil {
return err
}
}
if err := p.ExecCfg().JobRegistry.CheckPausepoint("import.after_ingest"); err != nil {
return err
}
if err := r.checkVirtualConstraints(ctx, p.ExecCfg(), r.job); err != nil {
return err
}
// If the table being imported into referenced UDTs, ensure that a concurrent
// schema change on any of the typeDescs has not modified the type descriptor. If
// it has, it is unsafe to import the data and we fail the import job.
if err := r.checkForUDTModification(ctx, p.ExecCfg()); err != nil {
return err
}
if err := r.publishSchemas(ctx, p.ExecCfg()); err != nil {
return err
}
if err := r.publishTables(ctx, p.ExecCfg(), res); err != nil {
return err
}
// As of 21.2 we do not write a protected timestamp record during IMPORT INTO.
// In case of a mixed version cluster with 21.1 and 21.2 nodes, it is possible
// that the job was planned on an older node and then resumed on a 21.2 node.
// Thus, we still need to clear the timestamp record that was written when the
// IMPORT INTO was planned on the older node.
//
// TODO(adityamaru): Remove in 22.1.
if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
return r.releaseProtectedTimestamp(ctx, txn, p.ExecCfg().ProtectedTimestampProvider)
}); err != nil {
log.Errorf(ctx, "failed to release protected timestamp: %v", err)
}
emitImportJobEvent(ctx, p, jobs.StatusSucceeded, r.job)
addToFileFormatTelemetry(details.Format.Format.String(), "succeeded")
telemetry.CountBucketed("import.rows", r.res.Rows)
const mb = 1 << 20
sizeMb := r.res.DataSize / mb
telemetry.CountBucketed("import.size-mb", sizeMb)
sec := int64(timeutil.Since(timeutil.FromUnixMicros(r.job.Payload().StartedMicros)).Seconds())
var mbps int64
if sec > 0 {
mbps = mb / sec
}
telemetry.CountBucketed("import.duration-sec.succeeded", sec)
telemetry.CountBucketed("import.speed-mbps", mbps)
// Tiny imports may skew throughput numbers due to overhead.
if sizeMb > 10 {
telemetry.CountBucketed("import.speed-mbps.over10mb", mbps)
}
return nil
}
// prepareTablesForIngestion prepares table descriptors for the ingestion
// step of import. The descriptors are in an IMPORTING state (offline) on
// successful completion of this method.
func (r *importResumer) prepareTablesForIngestion(
ctx context.Context,
p sql.JobExecContext,
details jobspb.ImportDetails,
txn *kv.Txn,
descsCol *descs.Collection,
schemaMetadata *preparedSchemaMetadata,
) (jobspb.ImportDetails, error) {
importDetails := details
importDetails.Tables = make([]jobspb.ImportDetails_Table, len(details.Tables))
newSchemaAndTableNameToIdx := make(map[string]int, len(importDetails.Tables))
var hasExistingTables bool
var err error
var newTableDescs []jobspb.ImportDetails_Table
var desc *descpb.TableDescriptor
for i, table := range details.Tables {
if !table.IsNew {
desc, err = prepareExistingTablesForIngestion(ctx, txn, descsCol, table.Desc)
if err != nil {
return importDetails, err
}
importDetails.Tables[i] = jobspb.ImportDetails_Table{
Desc: desc, Name: table.Name,
SeqVal: table.SeqVal,
IsNew: table.IsNew,
TargetCols: table.TargetCols,
}
hasExistingTables = true
} else {
// PGDUMP imports support non-public schemas.
// For the purpose of disambiguation we must take the schema into
// account when constructing the newTablenameToIdx map.
// At this point the table descriptor's parent schema ID has not being
// remapped to the newly generated schema ID.
key, err := constructSchemaAndTableKey(ctx, table.Desc, schemaMetadata.oldSchemaIDToName, p.ExecCfg().Settings.Version)
if err != nil {
return importDetails, err
}
newSchemaAndTableNameToIdx[key.String()] = i
// Make a deep copy of the table descriptor so that rewrites do not
// partially clobber the descriptor stored in details.
newTableDescs = append(newTableDescs,
*protoutil.Clone(&table).(*jobspb.ImportDetails_Table))
}
}
// Prepare the table descriptors for newly created tables being imported
// into.
//
// TODO(adityamaru): This is still unnecessarily complicated. If we can get
// the new table desc preparation to work on a per desc basis, rather than
// requiring all the newly created descriptors, then this can look like the
// call to prepareExistingTablesForIngestion. Currently, FK references
// misbehave when I tried to write the desc one at a time.
if len(newTableDescs) != 0 {
res, err := prepareNewTablesForIngestion(
ctx, txn, descsCol, p, newTableDescs, importDetails.ParentID, schemaMetadata.schemaRewrites)
if err != nil {
return importDetails, err
}
for _, desc := range res {
key, err := constructSchemaAndTableKey(ctx, desc, schemaMetadata.newSchemaIDToName, p.ExecCfg().Settings.Version)
if err != nil {
return importDetails, err
}
i := newSchemaAndTableNameToIdx[key.String()]
table := details.Tables[i]
importDetails.Tables[i] = jobspb.ImportDetails_Table{
Desc: desc,
Name: table.Name,
SeqVal: table.SeqVal,
IsNew: table.IsNew,
TargetCols: table.TargetCols,
}
}
}
importDetails.PrepareComplete = true
// If we do not have pending schema changes on existing descriptors we can
// choose our Walltime (to IMPORT from) immediately. Otherwise, we have to
// wait for all nodes to see the same descriptor version before doing so.
if !hasExistingTables {
importDetails.Walltime = p.ExecCfg().Clock.Now().WallTime
} else {
importDetails.Walltime = 0
}
return importDetails, nil
}
// prepareExistingTablesForIngestion prepares descriptors for existing tables
// being imported into.
func prepareExistingTablesForIngestion(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, desc *descpb.TableDescriptor,
) (*descpb.TableDescriptor, error) {
if len(desc.Mutations) > 0 {
return nil, errors.Errorf("cannot IMPORT INTO a table with schema changes in progress -- try again later (pending mutation %s)", desc.Mutations[0].String())
}
// Note that desc is just used to verify that the version matches.
importing, err := descsCol.GetMutableTableVersionByID(ctx, desc.ID, txn)
if err != nil {
return nil, err
}
// Ensure that the version of the table has not been modified since this
// job was created.
if got, exp := importing.Version, desc.Version; got != exp {
return nil, errors.Errorf("another operation is currently operating on the table")
}
// Take the table offline for import.
// TODO(dt): audit everywhere we get table descs (leases or otherwise) to
// ensure that filtering by state handles IMPORTING correctly.
importing.SetOffline("importing")
// TODO(dt): de-validate all the FKs.
if err := descsCol.WriteDesc(
ctx, false /* kvTrace */, importing, txn,
); err != nil {
return nil, err
}
return importing.TableDesc(), nil
}
// prepareNewTablesForIngestion prepares descriptors for newly created
// tables being imported into.
func prepareNewTablesForIngestion(
ctx context.Context,
txn *kv.Txn,
descsCol *descs.Collection,
p sql.JobExecContext,
importTables []jobspb.ImportDetails_Table,
parentID descpb.ID,
schemaRewrites jobspb.DescRewriteMap,
) ([]*descpb.TableDescriptor, error) {
newMutableTableDescriptors := make([]*tabledesc.Mutable, len(importTables))
for i := range importTables {
newMutableTableDescriptors[i] = tabledesc.NewBuilder(importTables[i].Desc).BuildCreatedMutableTable()
}
// Verification steps have passed, generate a new table ID if we're
// restoring. We do this last because we want to avoid calling
// GenerateUniqueDescID if there's any kind of error above.
// Reserving a table ID now means we can avoid the rekey work during restore.
//
// schemaRewrites may contain information which is used in rewrite.TableDescs
// to rewrite the parent schema ID in the table desc to point to the correct
// schema ID.
tableRewrites := schemaRewrites
if tableRewrites == nil {
tableRewrites = make(jobspb.DescRewriteMap)
}
seqVals := make(map[descpb.ID]int64, len(importTables))
for _, tableDesc := range importTables {
id, err := descidgen.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
if err != nil {
return nil, err
}
oldParentSchemaID := tableDesc.Desc.GetUnexposedParentSchemaID()
parentSchemaID := oldParentSchemaID
if rw, ok := schemaRewrites[oldParentSchemaID]; ok {
parentSchemaID = rw.ID
}
tableRewrites[tableDesc.Desc.ID] = &jobspb.DescriptorRewrite{
ID: id,
ParentSchemaID: parentSchemaID,
ParentID: parentID,
}
seqVals[id] = tableDesc.SeqVal
}
if err := rewrite.TableDescs(
newMutableTableDescriptors, tableRewrites, "",
); err != nil {
return nil, err
}
// After all of the ID's have been remapped, ensure that there aren't any name
// collisions with any importing tables.
for i := range newMutableTableDescriptors {
tbl := newMutableTableDescriptors[i]
err := descsCol.Direct().CheckObjectCollision(
ctx,
txn,
tbl.GetParentID(),
tbl.GetParentSchemaID(),
tree.NewUnqualifiedTableName(tree.Name(tbl.GetName())),
)
if err != nil {
return nil, err
}
}
// tableDescs contains the same slice as newMutableTableDescriptors but
// as tabledesc.TableDescriptor.
tableDescs := make([]catalog.TableDescriptor, len(newMutableTableDescriptors))
for i := range tableDescs {
newMutableTableDescriptors[i].SetOffline("importing")
tableDescs[i] = newMutableTableDescriptors[i]
}
var seqValKVs []roachpb.KeyValue
for _, desc := range newMutableTableDescriptors {
if v, ok := seqVals[desc.GetID()]; ok && v != 0 {
key, val, err := sql.MakeSequenceKeyVal(p.ExecCfg().Codec, desc, v, false)
if err != nil {
return nil, err
}
kv := roachpb.KeyValue{Key: key}
kv.Value.SetInt(val)
seqValKVs = append(seqValKVs, kv)
}
}
// Write the new TableDescriptors and flip the namespace entries over to
// them. After this call, any queries on a table will be served by the newly
// imported data.
if err := ingesting.WriteDescriptors(ctx, p.ExecCfg().Codec, txn, p.User(), descsCol,
nil /* databases */, nil, /* schemas */
tableDescs, nil, tree.RequestedDescriptors, seqValKVs, "" /* inheritParentName */); err != nil {
return nil, errors.Wrapf(err, "creating importTables")
}
newPreparedTableDescs := make([]*descpb.TableDescriptor, len(newMutableTableDescriptors))
for i := range newMutableTableDescriptors {
newPreparedTableDescs[i] = newMutableTableDescriptors[i].TableDesc()
}
return newPreparedTableDescs, nil
}
// prepareSchemasForIngestion is responsible for assigning the created schema
// descriptors actual IDs, updating the parent DB with references to the new
// schemas and writing the schema descriptors to disk.
func (r *importResumer) prepareSchemasForIngestion(
ctx context.Context,
p sql.JobExecContext,
details jobspb.ImportDetails,
txn *kv.Txn,
descsCol *descs.Collection,
) (*preparedSchemaMetadata, error) {
schemaMetadata := &preparedSchemaMetadata{
schemaPreparedDetails: details,
newSchemaIDToName: make(map[descpb.ID]string),
oldSchemaIDToName: make(map[descpb.ID]string),
}
schemaMetadata.schemaPreparedDetails.Schemas = make([]jobspb.ImportDetails_Schema,
len(details.Schemas))
desc, err := descsCol.GetMutableDescriptorByID(ctx, txn, details.ParentID)
if err != nil {
return nil, err
}
dbDesc, ok := desc.(*dbdesc.Mutable)
if !ok {
return nil, errors.Newf("expected ID %d to refer to the database being imported into",
details.ParentID)
}
schemaMetadata.schemaRewrites = make(jobspb.DescRewriteMap)
mutableSchemaDescs := make([]*schemadesc.Mutable, 0)
for _, desc := range details.Schemas {
schemaMetadata.oldSchemaIDToName[desc.Desc.GetID()] = desc.Desc.GetName()
newMutableSchemaDescriptor := schemadesc.NewBuilder(desc.Desc).BuildCreatedMutable().(*schemadesc.Mutable)
// Verification steps have passed, generate a new schema ID. We do this
// last because we want to avoid calling GenerateUniqueDescID if there's
// any kind of error in the prior stages of import.
id, err := descidgen.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
if err != nil {
return nil, err
}
newMutableSchemaDescriptor.Version = 1
newMutableSchemaDescriptor.ID = id
mutableSchemaDescs = append(mutableSchemaDescs, newMutableSchemaDescriptor)
schemaMetadata.newSchemaIDToName[id] = newMutableSchemaDescriptor.GetName()
// Update the parent database with this schema information.
dbDesc.AddSchemaToDatabase(newMutableSchemaDescriptor.Name,
descpb.DatabaseDescriptor_SchemaInfo{ID: newMutableSchemaDescriptor.ID})
schemaMetadata.schemaRewrites[desc.Desc.ID] = &jobspb.DescriptorRewrite{
ID: id,
}
}
// Queue a job to write the updated database descriptor.
schemaMetadata.queuedSchemaJobs, err = writeNonDropDatabaseChange(ctx, dbDesc, txn, descsCol, p,
fmt.Sprintf("updating parent database %s when importing new schemas", dbDesc.GetName()))
if err != nil {
return nil, err
}
// Finally create the schemas on disk.
for i, mutDesc := range mutableSchemaDescs {
nameKey := catalogkeys.MakeSchemaNameKey(p.ExecCfg().Codec, dbDesc.ID, mutDesc.GetName())
err = createSchemaDescriptorWithID(ctx, nameKey, mutDesc.ID, mutDesc, p, descsCol, txn)
if err != nil {
return nil, err
}
schemaMetadata.schemaPreparedDetails.Schemas[i] = jobspb.ImportDetails_Schema{
Desc: mutDesc.SchemaDesc(),
}
}
return schemaMetadata, err
}
// createSchemaDescriptorWithID writes a schema descriptor with `id` to disk.
func createSchemaDescriptorWithID(
ctx context.Context,
idKey roachpb.Key,
id descpb.ID,
descriptor catalog.Descriptor,
p sql.JobExecContext,
descsCol *descs.Collection,
txn *kv.Txn,
) error {
if descriptor.GetID() == descpb.InvalidID {
return errors.AssertionFailedf("cannot create descriptor with an empty ID: %v", descriptor)
}
if descriptor.GetID() != id {
return errors.AssertionFailedf("cannot create descriptor with an ID %v; expected ID %v; descriptor %v",
id, descriptor.GetID(), descriptor)
}
b := &kv.Batch{}
descID := descriptor.GetID()
if p.ExtendedEvalContext().Tracing.KVTracingEnabled() {
log.VEventf(ctx, 2, "CPut %s -> %d", idKey, descID)
}
b.CPut(idKey, descID, nil)
if err := descsCol.Direct().WriteNewDescToBatch(
ctx,
p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
b,
descriptor,
); err != nil {
return err
}
mutDesc, ok := descriptor.(catalog.MutableDescriptor)
if !ok {
return errors.Newf("unexpected type %T when creating descriptor", descriptor)
}
switch mutDesc.(type) {
case *schemadesc.Mutable:
if err := descsCol.AddUncommittedDescriptor(mutDesc); err != nil {
return err
}
default:
return errors.Newf("unexpected type %T when creating descriptor", mutDesc)
}
return txn.Run(ctx, b)
}
// parseBundleSchemaIfNeeded parses dump files (PGDUMP, MYSQLDUMP) for DDL
// statements and creates the relevant database, schema, table and type
// descriptors. Data from the dump files is ingested into these descriptors in
// the next phase of the import.
func (r *importResumer) parseBundleSchemaIfNeeded(ctx context.Context, phs interface{}) error {
p := phs.(sql.JobExecContext)
seqVals := make(map[descpb.ID]int64)
details := r.job.Details().(jobspb.ImportDetails)
skipFKs := details.SkipFKs
parentID := details.ParentID
files := details.URIs
format := details.Format
owner := r.job.Payload().UsernameProto.Decode()
p.SessionDataMutatorIterator().SetSessionDefaultIntSize(details.DefaultIntSize)
if details.ParseBundleSchema {
var span *tracing.Span
ctx, span = tracing.ChildSpan(ctx, "import-parsing-bundle-schema")
defer span.Finish()
if err := r.job.RunningStatus(ctx, nil /* txn */, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) {
return runningStatusImportBundleParseSchema, nil
}); err != nil {
return errors.Wrapf(err, "failed to update running status of job %d", errors.Safe(r.job.ID()))
}
var dbDesc catalog.DatabaseDescriptor
{
if err := sql.DescsTxn(ctx, p.ExecCfg(), func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) (err error) {
_, dbDesc, err = descriptors.GetImmutableDatabaseByID(ctx, txn, parentID, tree.DatabaseLookupFlags{
Required: true,
AvoidLeased: true,
})
if err != nil {
return err
}
return err
}); err != nil {
return err
}
}
var schemaDescs []*schemadesc.Mutable
var tableDescs []*tabledesc.Mutable
var err error
walltime := p.ExecCfg().Clock.Now().WallTime
if tableDescs, schemaDescs, err = parseAndCreateBundleTableDescs(
ctx, p, details, seqVals, skipFKs, dbDesc, files, format, walltime, owner,
r.job.ID()); err != nil {
return err
}
schemaDetails := make([]jobspb.ImportDetails_Schema, len(schemaDescs))
for i, schemaDesc := range schemaDescs {
schemaDetails[i] = jobspb.ImportDetails_Schema{Desc: schemaDesc.SchemaDesc()}
}
details.Schemas = schemaDetails
tableDetails := make([]jobspb.ImportDetails_Table, len(tableDescs))
for i, tableDesc := range tableDescs {
tableDetails[i] = jobspb.ImportDetails_Table{
Name: tableDesc.GetName(),
Desc: tableDesc.TableDesc(),
SeqVal: seqVals[tableDescs[i].ID],
IsNew: true,
}
}
details.Tables = tableDetails
for _, tbl := range tableDescs {
// For reasons relating to #37691, we disallow user defined types in
// the standard IMPORT case.
for _, col := range tbl.Columns {
if col.Type.UserDefined() {
return errors.Newf("IMPORT cannot be used with user defined types; use IMPORT INTO instead")
}
}
}
// Prevent job from redoing schema parsing and table desc creation
// on subsequent resumptions.
details.ParseBundleSchema = false
if err := r.job.SetDetails(ctx, nil /* txn */, details); err != nil {
return err
}
}
return nil
}
func getPublicSchemaDescForDatabase(
ctx context.Context, execCfg *sql.ExecutorConfig, db catalog.DatabaseDescriptor,
) (scDesc catalog.SchemaDescriptor, err error) {
if !execCfg.Settings.Version.IsActive(ctx, clusterversion.PublicSchemasWithDescriptors) {
return schemadesc.GetPublicSchema(), err
}
if err := sql.DescsTxn(ctx, execCfg, func(
ctx context.Context, txn *kv.Txn, descriptors *descs.Collection,
) error {
publicSchemaID := db.GetSchemaID(tree.PublicSchema)
scDesc, err = descriptors.GetImmutableSchemaByID(ctx, txn, publicSchemaID, tree.SchemaLookupFlags{Required: true})
return err
}); err != nil {
return nil, err
}
return scDesc, nil
}
// parseAndCreateBundleTableDescs parses and creates the table
// descriptors for bundle formats.
func parseAndCreateBundleTableDescs(
ctx context.Context,
p sql.JobExecContext,
details jobspb.ImportDetails,
seqVals map[descpb.ID]int64,
skipFKs bool,
parentDB catalog.DatabaseDescriptor,
files []string,
format roachpb.IOFileFormat,
walltime int64,
owner security.SQLUsername,
jobID jobspb.JobID,
) ([]*tabledesc.Mutable, []*schemadesc.Mutable, error) {
var schemaDescs []*schemadesc.Mutable
var tableDescs []*tabledesc.Mutable
var tableName string
// A single table entry in the import job details when importing a bundle format
// indicates that we are performing a single table import.
// This info is populated during the planning phase.
if len(details.Tables) > 0 {
tableName = details.Tables[0].Name
}
store, err := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, files[0], p.User())
if err != nil {
return tableDescs, schemaDescs, err
}
defer store.Close()
raw, err := store.ReadFile(ctx, "")
if err != nil {
return tableDescs, schemaDescs, err
}
defer raw.Close(ctx)
reader, err := decompressingReader(ioctx.ReaderCtxAdapter(ctx, raw), files[0], format.Compression)
if err != nil {
return tableDescs, schemaDescs, err
}
defer reader.Close()
fks := fkHandler{skip: skipFKs, allowed: true, resolver: fkResolver{
tableNameToDesc: make(map[string]*tabledesc.Mutable),
}}
switch format.Format {
case roachpb.IOFileFormat_Mysqldump:
id, err := descidgen.PeekNextUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
if err != nil {
return tableDescs, schemaDescs, err
}
fks.resolver.format.Format = roachpb.IOFileFormat_Mysqldump
evalCtx := &p.ExtendedEvalContext().EvalContext
tableDescs, err = readMysqlCreateTable(
ctx, reader, evalCtx, p, id, parentDB, tableName, fks,
seqVals, owner, walltime,
)
if err != nil {
return tableDescs, schemaDescs, err
}
case roachpb.IOFileFormat_PgDump:
fks.resolver.format.Format = roachpb.IOFileFormat_PgDump
evalCtx := &p.ExtendedEvalContext().EvalContext
// Setup a logger to handle unsupported DDL statements in the PGDUMP file.
unsupportedStmtLogger := makeUnsupportedStmtLogger(ctx, p.User(), int64(jobID),
format.PgDump.IgnoreUnsupported, format.PgDump.IgnoreUnsupportedLog, schemaParsing,
p.ExecCfg().DistSQLSrv.ExternalStorage)
tableDescs, schemaDescs, err = readPostgresCreateTable(ctx, reader, evalCtx, p, tableName,
parentDB, walltime, fks, int(format.PgDump.MaxRowSize), owner, unsupportedStmtLogger)
logErr := unsupportedStmtLogger.flush()
if logErr != nil {
return nil, nil, logErr
}
default:
return tableDescs, schemaDescs, errors.Errorf(
"non-bundle format %q does not support reading schemas", format.Format.String())
}
if err != nil {
return tableDescs, schemaDescs, err
}
if tableDescs == nil && len(details.Tables) > 0 {
return tableDescs, schemaDescs, errors.Errorf("table definition not found for %q", tableName)
}
return tableDescs, schemaDescs, err
}
// publishTables updates the status of imported tables from OFFLINE to PUBLIC.
func (r *importResumer) publishTables(
ctx context.Context, execCfg *sql.ExecutorConfig, res roachpb.BulkOpSummary,
) error {
details := r.job.Details().(jobspb.ImportDetails)
// Tables should only be published once.
if details.TablesPublished {
return nil
}
// Write stub statistics for new tables created during the import. This should
// be sufficient until the CREATE STATISTICS run finishes.
r.writeStubStatisticsForImportedTables(ctx, execCfg, res)
log.Event(ctx, "making tables live")
err := sql.DescsTxn(ctx, execCfg, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
b := txn.NewBatch()
for _, tbl := range details.Tables {
newTableDesc, err := descsCol.GetMutableTableVersionByID(ctx, tbl.Desc.ID, txn)
if err != nil {
return err
}
newTableDesc.SetPublic()
if !tbl.IsNew {
// NB: This is not using AllNonDropIndexes or directly mutating the
// constraints returned by the other usual helpers because we need to
// replace the `OutboundFKs` and `Checks` slices of newTableDesc with copies
// that we can mutate. We need to do that because newTableDesc is a shallow
// copy of tbl.Desc that we'll be asserting is the current version when we
// CPut below.
//
// Set FK constraints to unvalidated before publishing the table imported
// into.
newTableDesc.OutboundFKs = make([]descpb.ForeignKeyConstraint, len(newTableDesc.OutboundFKs))
copy(newTableDesc.OutboundFKs, tbl.Desc.OutboundFKs)
for i := range newTableDesc.OutboundFKs {
newTableDesc.OutboundFKs[i].Validity = descpb.ConstraintValidity_Unvalidated
}
// Set CHECK constraints to unvalidated before publishing the table imported into.
for _, c := range newTableDesc.AllActiveAndInactiveChecks() {
c.Validity = descpb.ConstraintValidity_Unvalidated
}
}
// TODO(dt): re-validate any FKs?
if err := descsCol.WriteDescToBatch(
ctx, false /* kvTrace */, newTableDesc, b,
); err != nil {
return errors.Wrapf(err, "publishing table %d", newTableDesc.ID)
}
}
if err := txn.Run(ctx, b); err != nil {
return errors.Wrap(err, "publishing tables")
}
// Update job record to mark tables published state as complete.
details.TablesPublished = true
err := r.job.SetDetails(ctx, txn, details)
if err != nil {
return errors.Wrap(err, "updating job details after publishing tables")
}
return nil
})
if err != nil {
return err
}
// Initiate a run of CREATE STATISTICS. We don't know the actual number of
// rows affected per table, so we use a large number because we want to make
// sure that stats always get created/refreshed here.
for i := range details.Tables {
desc := tabledesc.NewBuilder(details.Tables[i].Desc).BuildImmutableTable()
execCfg.StatsRefresher.NotifyMutation(desc, math.MaxInt32 /* rowsAffected */)
}
return nil
}
// writeStubStatisticsForImportedTables writes "stub" statistics for new tables
// created during an import.
func (r *importResumer) writeStubStatisticsForImportedTables(
ctx context.Context, execCfg *sql.ExecutorConfig, res roachpb.BulkOpSummary,
) {
details := r.job.Details().(jobspb.ImportDetails)
for _, tbl := range details.Tables {
if tbl.IsNew {
desc := tabledesc.NewBuilder(tbl.Desc).BuildImmutableTable()
id := roachpb.BulkOpSummaryID(uint64(desc.GetID()), uint64(desc.GetPrimaryIndexID()))
rowCount := uint64(res.EntryCounts[id])
// TODO(michae2): collect distinct and null counts during import.
distinctCount := uint64(float64(rowCount) * memo.UnknownDistinctCountRatio)
nullCount := uint64(float64(rowCount) * memo.UnknownNullCountRatio)
avgRowSize := uint64(memo.UnknownAvgRowSize)
// Because we don't yet have real distinct and null counts, only produce
// single-column stats to avoid the appearance of perfectly correlated
// columns.
multiColEnabled := false
statistics, err := sql.StubTableStats(desc, jobspb.ImportStatsName, multiColEnabled)
if err == nil {
for _, statistic := range statistics {
statistic.RowCount = rowCount
statistic.DistinctCount = distinctCount
statistic.NullCount = nullCount
statistic.AvgSize = avgRowSize
}
// TODO(michae2): parallelize insertion of statistics.
err = stats.InsertNewStats(ctx, execCfg.Settings, execCfg.InternalExecutor, nil /* txn */, statistics)
}
if err != nil {
// Failure to create statistics should not fail the entire import.
log.Warningf(
ctx, "error while creating statistics during import of %q: %v",
desc.GetName(), err,
)
}
}
}
}
// publishSchemas updates the status of imported schemas from OFFLINE to PUBLIC.
func (r *importResumer) publishSchemas(ctx context.Context, execCfg *sql.ExecutorConfig) error {
details := r.job.Details().(jobspb.ImportDetails)
// Schemas should only be published once.
if details.SchemasPublished {
return nil
}
log.Event(ctx, "making schemas live")
return sql.DescsTxn(ctx, execCfg, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
b := txn.NewBatch()
for _, schema := range details.Schemas {
newDesc, err := descsCol.GetMutableDescriptorByID(ctx, txn, schema.Desc.GetID())
if err != nil {
return err
}
newSchemaDesc, ok := newDesc.(*schemadesc.Mutable)
if !ok {
return errors.Newf("expected schema descriptor with ID %v, got %v",
schema.Desc.GetID(), newDesc)
}
newSchemaDesc.SetPublic()
if err := descsCol.WriteDescToBatch(
ctx, false /* kvTrace */, newSchemaDesc, b,
); err != nil {
return errors.Wrapf(err, "publishing schema %d", newSchemaDesc.ID)
}
}
if err := txn.Run(ctx, b); err != nil {
return errors.Wrap(err, "publishing schemas")
}
// Update job record to mark tables published state as complete.
details.SchemasPublished = true
err := r.job.SetDetails(ctx, txn, details)
if err != nil {
return errors.Wrap(err, "updating job details after publishing schemas")
}
return nil
})
}
// checkVirtualConstraints checks constraints that are enforced via runtime
// checks, such as uniqueness checks that are not directly backed by an index.
func (*importResumer) checkVirtualConstraints(
ctx context.Context, execCfg *sql.ExecutorConfig, job *jobs.Job,
) error {
for _, tbl := range job.Details().(jobspb.ImportDetails).Tables {
desc := tabledesc.NewBuilder(tbl.Desc).BuildExistingMutableTable()
desc.SetPublic()
if sql.HasVirtualUniqueConstraints(desc) {
if err := job.RunningStatus(ctx, nil /* txn */, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) {
return jobs.RunningStatus(fmt.Sprintf("re-validating %s", desc.GetName())), nil
}); err != nil {
return errors.Wrapf(err, "failed to update running status of job %d", errors.Safe(job.ID()))
}
}
if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
ie := execCfg.InternalExecutorFactory(ctx, sql.NewFakeSessionData(execCfg.SV()))
return ie.WithSyntheticDescriptors([]catalog.Descriptor{desc}, func() error {
return sql.RevalidateUniqueConstraintsInTable(ctx, txn, ie, desc)
})
}); err != nil {
return err
}
}
return nil
}
// checkForUDTModification checks whether any of the types referenced by the
// table being imported into have been modified incompatibly since they were
// read during import planning. If they have, it may be unsafe to continue
// with the import since we could be ingesting data that is no longer valid
// for the type.
//
// Egs: Renaming an enum value mid import could result in the import ingesting a
// value that is no longer valid.
//
// TODO(SQL Schema): This method might be unnecessarily aggressive in failing
// the import. The semantics of what concurrent type changes are/are not safe
// during an IMPORT still need to be ironed out. Once they are, we can make this
// method more conservative in what it uses to deem a type change dangerous. At
// the time of writing, changes to privileges and back-references are supported.
// Additions of new values could be supported but are not. Renaming of logical
// enum values or removal of enum values will need to forever remain
// incompatible.
func (r *importResumer) checkForUDTModification(
ctx context.Context, execCfg *sql.ExecutorConfig,
) error {
details := r.job.Details().(jobspb.ImportDetails)
if details.Types == nil {
return nil
}
// typeDescsAreEquivalent returns true if a and b are the same types save
// for the version, modification time, privileges, or the set of referencing
// descriptors.
typeDescsAreEquivalent := func(a, b *descpb.TypeDescriptor) (bool, error) {
clearIgnoredFields := func(d *descpb.TypeDescriptor) *descpb.TypeDescriptor {
d = protoutil.Clone(d).(*descpb.TypeDescriptor)
d.ModificationTime = hlc.Timestamp{}
d.Privileges = nil
d.Version = 0
d.ReferencingDescriptorIDs = nil
return d
}
aData, err := protoutil.Marshal(clearIgnoredFields(a))
if err != nil {
return false, err
}
bData, err := protoutil.Marshal(clearIgnoredFields(b))
if err != nil {
return false, err
}
return bytes.Equal(aData, bData), nil
}
// checkTypeIsEquivalent checks that the current version of the type as
// retrieved from the collection is equivalent to the previously saved
// type descriptor used by the import.
checkTypeIsEquivalent := func(
ctx context.Context, txn *kv.Txn, col *descs.Collection,
savedTypeDesc *descpb.TypeDescriptor,
) error {
typeDesc, err := col.Direct().MustGetTypeDescByID(ctx, txn, savedTypeDesc.GetID())
if err != nil {
return errors.Wrap(err, "resolving type descriptor when checking version mismatch")
}
if typeDesc.GetModificationTime() == savedTypeDesc.GetModificationTime() {
return nil
}
equivalent, err := typeDescsAreEquivalent(typeDesc.TypeDesc(), savedTypeDesc)
if err != nil {
return errors.NewAssertionErrorWithWrappedErrf(
err, "failed to check for type descriptor equivalence for type %q (%d)",
typeDesc.GetName(), typeDesc.GetID())
}
if equivalent {
return nil
}
return errors.WithHint(
errors.Newf(
"type descriptor %q (%d) has been modified, potentially incompatibly,"+
" since import planning; aborting to avoid possible corruption",
typeDesc.GetName(), typeDesc.GetID(),
),
"retrying the IMPORT operation may succeed if the operation concurrently"+
" modifying the descriptor does not reoccur during the retry attempt",
)
}
checkTypesAreEquivalent := func(
ctx context.Context, txn *kv.Txn, col *descs.Collection,
) error {
for _, savedTypeDesc := range details.Types {
if err := checkTypeIsEquivalent(
ctx, txn, col, savedTypeDesc.Desc,
); err != nil {
return err
}
}
return nil
}
return sql.DescsTxn(ctx, execCfg, checkTypesAreEquivalent)
}
func ingestWithRetry(
ctx context.Context,
execCtx sql.JobExecContext,
job *jobs.Job,
tables map[string]*execinfrapb.ReadImportDataSpec_ImportTable,
typeDescs []*descpb.TypeDescriptor,
from []string,
format roachpb.IOFileFormat,
walltime int64,
alwaysFlushProgress bool,
procsPerNode int,
) (roachpb.BulkOpSummary, error) {
resumerSpan := tracing.SpanFromContext(ctx)
// We retry on pretty generic failures -- any rpc error. If a worker node were
// to restart, it would produce this kind of error, but there may be other
// errors that are also rpc errors. Don't retry to aggressively.
retryOpts := retry.Options{
MaxBackoff: 1 * time.Second,
MaxRetries: 5,
}
// We want to retry an import if there are transient failures (i.e. worker
// nodes dying), so if we receive a retryable error, re-plan and retry the
// import.
var res roachpb.BulkOpSummary
var err error
var retryCount int32
for r := retry.StartWithCtx(ctx, retryOpts); r.Next(); {
for {
retryCount++
resumerSpan.RecordStructured(&roachpb.RetryTracingEvent{
Operation: "importResumer.ingestWithRetry",
AttemptNumber: retryCount,
RetryError: tracing.RedactAndTruncateError(err),
})
res, err = distImport(ctx, execCtx, job, tables, typeDescs, from, format, walltime,
alwaysFlushProgress, procsPerNode)
// Replanning errors should not count towards retry limits.
if err == nil || !errors.Is(err, sql.ErrPlanChanged) {
break
}
}
if err == nil {
break
}
if errors.HasType(err, &roachpb.InsufficientSpaceError{}) {
return res, jobs.MarkPauseRequestError(errors.UnwrapAll(err))
}
if joberror.IsPermanentBulkJobError(err) {
return res, err
}
// Re-load the job in order to update our progress object, which may have
// been updated by the changeFrontier processor since the flow started.
reloadedJob, reloadErr := execCtx.ExecCfg().JobRegistry.LoadClaimedJob(ctx, job.ID())
if reloadErr != nil {
if ctx.Err() != nil {
return res, ctx.Err()
}
log.Warningf(ctx, `IMPORT job %d could not reload job progress when retrying: %+v`,
int64(job.ID()), reloadErr)
} else {
job = reloadedJob
}
log.Warningf(ctx, `encountered retryable error: %+v`, err)
}
if err != nil {
return roachpb.BulkOpSummary{}, errors.Wrap(err, "exhausted retries")
}
return res, nil
}
// emitImportJobEvent emits an import job event to the event log.
func emitImportJobEvent(
ctx context.Context, p sql.JobExecContext, status jobs.Status, job *jobs.Job,
) {
var importEvent eventpb.Import
if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
return sql.LogEventForJobs(ctx, p.ExecCfg(), txn, &importEvent, int64(job.ID()),
job.Payload(), p.User(), status)
}); err != nil {
log.Warningf(ctx, "failed to log event: %v", err)
}
}
func constructSchemaAndTableKey(
ctx context.Context,
tableDesc *descpb.TableDescriptor,
schemaIDToName map[descpb.ID]string,
version clusterversion.Handle,
) (schemaAndTableName, error) {
if !version.IsActive(ctx, clusterversion.PublicSchemasWithDescriptors) {
if tableDesc.UnexposedParentSchemaID == keys.PublicSchemaIDForBackup {
return schemaAndTableName{schema: "", table: tableDesc.GetName()}, nil
}
}
schemaName, ok := schemaIDToName[tableDesc.GetUnexposedParentSchemaID()]
if !ok && schemaName != tree.PublicSchema {
return schemaAndTableName{}, errors.Newf("invalid parent schema %s with ID %d for table %s",
schemaName, tableDesc.UnexposedParentSchemaID, tableDesc.GetName())
}
return schemaAndTableName{schema: schemaName, table: tableDesc.GetName()}, nil
}
func writeNonDropDatabaseChange(
ctx context.Context,
desc *dbdesc.Mutable,
txn *kv.Txn,
descsCol *descs.Collection,
p sql.JobExecContext,
jobDesc string,
) ([]jobspb.JobID, error) {
var job *jobs.Job
var err error
if job, err = createNonDropDatabaseChangeJob(p.User(), desc.ID, jobDesc, p, txn); err != nil {
return nil, err
}
queuedJob := []jobspb.JobID{job.ID()}
b := txn.NewBatch()
err = descsCol.WriteDescToBatch(
ctx,
p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
desc,
b,
)
if err != nil {
return nil, err
}
return queuedJob, txn.Run(ctx, b)
}
func createNonDropDatabaseChangeJob(
user security.SQLUsername,
databaseID descpb.ID,
jobDesc string,
p sql.JobExecContext,
txn *kv.Txn,
) (*jobs.Job, error) {
jobRecord := jobs.Record{
Description: jobDesc,
Username: user,
Details: jobspb.SchemaChangeDetails{
DescID: databaseID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
jobID := p.ExecCfg().JobRegistry.MakeJobID()
return p.ExecCfg().JobRegistry.CreateJobWithTxn(
p.ExtendedEvalContext().Context,
jobRecord,
jobID,
txn,
)
}
// OnFailOrCancel is part of the jobs.Resumer interface. Removes data that has
// been committed from a import that has failed or been canceled. It does this
// by adding the table descriptors in DROP state, which causes the schema change
// stuff to delete the keys in the background.
func (r *importResumer) OnFailOrCancel(ctx context.Context, execCtx interface{}) error {
p := execCtx.(sql.JobExecContext)
// Emit to the event log that the job has started reverting.
emitImportJobEvent(ctx, p, jobs.StatusReverting, r.job)
details := r.job.Details().(jobspb.ImportDetails)
addToFileFormatTelemetry(details.Format.Format.String(), "failed")
cfg := execCtx.(sql.JobExecContext).ExecCfg()
var jobsToRunAfterTxnCommit []jobspb.JobID
if err := sql.DescsTxn(ctx, cfg, func(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection,
) error {
if err := r.dropTables(ctx, txn, descsCol, cfg); err != nil {
return err
}
// Drop all the schemas which may have been created during a bundle import.
// These schemas should now be empty as all the tables in them would be new
// tables created during the import, and therefore dropped by the above
// dropTables method. This allows us to avoid "collecting" objects in the
// schema before dropping the descriptor.
var err error
jobsToRunAfterTxnCommit, err = r.dropSchemas(ctx, txn, descsCol, cfg, p)
if err != nil {
return err
}
// TODO(adityamaru): Remove in 22.1 since we do not write PTS records during
// IMPORT INTO from 21.2+.
return r.releaseProtectedTimestamp(ctx, txn, cfg.ProtectedTimestampProvider)
}); err != nil {
return err
}
// Run any jobs which might have been queued when dropping the schemas.
// This would be a job to drop all the schemas, and a job to update the parent
// database descriptor.
if len(jobsToRunAfterTxnCommit) != 0 {
if err := p.ExecCfg().JobRegistry.Run(ctx, p.ExecCfg().InternalExecutor,
jobsToRunAfterTxnCommit); err != nil {
return errors.Wrap(err, "failed to run jobs that drop the imported schemas")
}
}
// Emit to the event log that the job has completed reverting.
emitImportJobEvent(ctx, p, jobs.StatusFailed, r.job)
return nil
}
// dropTables implements the OnFailOrCancel logic.
func (r *importResumer) dropTables(
ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, execCfg *sql.ExecutorConfig,
) error {
details := r.job.Details().(jobspb.ImportDetails)
dropTime := int64(1)
// If the prepare step of the import job was not completed then the
// descriptors do not need to be rolled back as the txn updating them never
// completed.
if !details.PrepareComplete {
return nil
}
var revert []catalog.TableDescriptor
var empty []catalog.TableDescriptor
for _, tbl := range details.Tables {
if !tbl.IsNew {
desc, err := descsCol.GetMutableTableVersionByID(ctx, tbl.Desc.ID, txn)
if err != nil {
return err
}
imm := desc.ImmutableCopy().(catalog.TableDescriptor)
if tbl.WasEmpty {
empty = append(empty, imm)
} else {
revert = append(revert, imm)
}
}
}
// The walltime can be 0 if there is a failure between publishing the tables
// as OFFLINE and then choosing a ingestion timestamp. This might happen
// while waiting for the descriptor version to propagate across the cluster
// for example.
//
// In this case, we don't want to rollback the data since data ingestion has
// not yet begun (since we have not chosen a timestamp at which to ingest.)
if details.Walltime != 0 && len(revert) > 0 {
// NB: if a revert fails it will abort the rest of this failure txn, which is
// also what brings tables back online. We _could_ change the error handling
// or just move the revert into Resume()'s error return path, however it isn't
// clear that just bringing a table back online with partially imported data
// that may or may not be partially reverted is actually a good idea. It seems
// better to do the revert here so that the table comes back if and only if,
// it was rolled back to its pre-IMPORT state, and instead provide a manual
// admin knob (e.g. ALTER TABLE REVERT TO SYSTEM TIME) if anything goes wrong.
ts := hlc.Timestamp{WallTime: details.Walltime}.Prev()
// disallowShadowingBelow=writeTS used to write means no existing keys could
// have been covered by a key imported and the table was offline to other
// writes, so even if GC has run it would not have GC'ed any keys to which
// we need to revert, so we can safely ignore the target-time GC check.
const ignoreGC = true
if err := sql.RevertTables(ctx, txn.DB(), execCfg, revert, ts, ignoreGC, sql.RevertTableDefaultBatchSize); err != nil {
return errors.Wrap(err, "rolling back partially completed IMPORT")
}
}
for i := range empty {
// Set a DropTime on the table descriptor to differentiate it from an
// older-format (v1.1) descriptor. This enables ClearTableData to use a
// RangeClear for faster data removal, rather than removing by chunks.
empty[i].TableDesc().DropTime = dropTime
if err := gcjob.ClearTableData(
ctx, execCfg.DB, execCfg.DistSender, execCfg.Codec, &execCfg.Settings.SV, empty[i],
); err != nil {
return errors.Wrapf(err, "clearing data for table %d", empty[i].GetID())
}
}
b := txn.NewBatch()
tablesToGC := make([]descpb.ID, 0, len(details.Tables))
toWrite := make([]*tabledesc.Mutable, 0, len(details.Tables))
for _, tbl := range details.Tables {
newTableDesc, err := descsCol.GetMutableTableVersionByID(ctx, tbl.Desc.ID, txn)
if err != nil {
return err
}
if tbl.IsNew {
newTableDesc.SetDropped()
// If the DropTime if set, a table uses RangeClear for fast data removal. This
// operation starts at DropTime + the GC TTL. If we used now() here, it would
// not clean up data until the TTL from the time of the error. Instead, use 1
// (that is, 1ns past the epoch) to allow this to be cleaned up as soon as
// possible. This is safe since the table data was never visible to users,
// and so we don't need to preserve MVCC semantics.
newTableDesc.DropTime = dropTime
b.Del(catalogkeys.EncodeNameKey(execCfg.Codec, newTableDesc))
tablesToGC = append(tablesToGC, newTableDesc.ID)
descsCol.AddDeletedDescriptor(newTableDesc.GetID())
} else {
// IMPORT did not create this table, so we should not drop it.
newTableDesc.SetPublic()
}
// Accumulate the changes before adding them to the batch to avoid
// making any table invalid before having read it.
toWrite = append(toWrite, newTableDesc)
}
for _, d := range toWrite {
const kvTrace = false
if err := descsCol.WriteDescToBatch(ctx, kvTrace, d, b); err != nil {
return err
}
}
// Queue a GC job.
gcDetails := jobspb.SchemaChangeGCDetails{}
for _, tableID := range tablesToGC {
gcDetails.Tables = append(gcDetails.Tables, jobspb.SchemaChangeGCDetails_DroppedID{
ID: tableID,
DropTime: dropTime,
})
}
gcJobRecord := jobs.Record{
Description: fmt.Sprintf("GC for %s", r.job.Payload().Description),
Username: r.job.Payload().UsernameProto.Decode(),
DescriptorIDs: tablesToGC,
Details: gcDetails,
Progress: jobspb.SchemaChangeGCProgress{},
NonCancelable: true,
}
if _, err := execCfg.JobRegistry.CreateJobWithTxn(
ctx, gcJobRecord, execCfg.JobRegistry.MakeJobID(), txn); err != nil {
return err
}
return errors.Wrap(txn.Run(ctx, b), "rolling back tables")
}
func (r *importResumer) dropSchemas(
ctx context.Context,
txn *kv.Txn,
descsCol *descs.Collection,
execCfg *sql.ExecutorConfig,
p sql.JobExecContext,
) ([]jobspb.JobID, error) {
details := r.job.Details().(jobspb.ImportDetails)
// If the prepare step of the import job was not completed then the
// descriptors do not need to be rolled back as the txn updating them never
// completed.
if !details.PrepareComplete || len(details.Schemas) == 0 {
return nil, nil
}
// Resolve the database descriptor.
desc, err := descsCol.GetMutableDescriptorByID(ctx, txn, details.ParentID)
if err != nil {
return nil, err
}
dbDesc, ok := desc.(*dbdesc.Mutable)
if !ok {
return nil, errors.Newf("expected ID %d to refer to the database being imported into",
details.ParentID)
}
droppedSchemaIDs := make([]descpb.ID, 0)
for _, schema := range details.Schemas {
desc, err := descsCol.GetMutableDescriptorByID(ctx, txn, schema.Desc.ID)
if err != nil {
return nil, err
}
var schemaDesc *schemadesc.Mutable
var ok bool
if schemaDesc, ok = desc.(*schemadesc.Mutable); !ok {
return nil, errors.Newf("unable to resolve schema desc with ID %d", schema.Desc.ID)
}
// Mark the descriptor as dropped and write it to the batch.
// Delete namespace entry or update draining names depending on version.
schemaDesc.SetDropped()
droppedSchemaIDs = append(droppedSchemaIDs, schemaDesc.GetID())
b := txn.NewBatch()
// TODO(postamar): remove version gate and else-block in 22.2
if execCfg.Settings.Version.IsActive(ctx, clusterversion.AvoidDrainingNames) {
if dbDesc.Schemas != nil {
delete(dbDesc.Schemas, schemaDesc.GetName())
}
b.Del(catalogkeys.EncodeNameKey(p.ExecCfg().Codec, schemaDesc))
} else {
//lint:ignore SA1019 removal of deprecated method call scheduled for 22.2
schemaDesc.AddDrainingName(descpb.NameInfo{
ParentID: details.ParentID,
ParentSchemaID: keys.RootNamespaceID,
Name: schemaDesc.Name,
})
// Update the parent database with information about the dropped schema.
dbDesc.AddSchemaToDatabase(schema.Desc.Name, descpb.DatabaseDescriptor_SchemaInfo{ID: dbDesc.ID, Dropped: true})
}
if err := descsCol.WriteDescToBatch(ctx, p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
schemaDesc, b); err != nil {
return nil, err
}
err = txn.Run(ctx, b)
if err != nil {
return nil, err
}
}
// Write out the change to the database. This only creates a job record to be
// run after the txn commits.
queuedJob, err := writeNonDropDatabaseChange(ctx, dbDesc, txn, descsCol, p, "")
if err != nil {
return nil, err
}
// Create the job to drop the schema.
dropSchemaJobRecord := jobs.Record{
Description: "dropping schemas as part of an import job rollback",
Username: p.User(),
DescriptorIDs: droppedSchemaIDs,
Details: jobspb.SchemaChangeDetails{
DroppedSchemas: droppedSchemaIDs,
DroppedDatabaseID: descpb.InvalidID,
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
}
jobID := p.ExecCfg().JobRegistry.MakeJobID()
job, err := execCfg.JobRegistry.CreateJobWithTxn(ctx, dropSchemaJobRecord, jobID, txn)
if err != nil {
return nil, err
}
queuedJob = append(queuedJob, job.ID())
return queuedJob, nil
}
func (r *importResumer) releaseProtectedTimestamp(
ctx context.Context, txn *kv.Txn, pts protectedts.Storage,
) error {
details := r.job.Details().(jobspb.ImportDetails)
ptsID := details.ProtectedTimestampRecord
// If the job doesn't have a protected timestamp then there's nothing to do.
if ptsID == nil {
return nil
}
err := pts.Release(ctx, txn, *ptsID)
if errors.Is(err, protectedts.ErrNotExists) {
// No reason to return an error which might cause problems if it doesn't
// seem to exist.
log.Warningf(ctx, "failed to release protected which seems not to exist: %v", err)
err = nil
}
return err
}
// ReportResults implements JobResultsReporter interface.
func (r *importResumer) ReportResults(ctx context.Context, resultsCh chan<- tree.Datums) error {
select {
case resultsCh <- tree.Datums{
tree.NewDInt(tree.DInt(r.job.ID())),
tree.NewDString(string(jobs.StatusSucceeded)),
tree.NewDFloat(tree.DFloat(1.0)),
tree.NewDInt(tree.DInt(r.res.Rows)),
tree.NewDInt(tree.DInt(r.res.IndexEntries)),
tree.NewDInt(tree.DInt(r.res.DataSize)),
}:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func init() {
jobs.RegisterConstructor(
jobspb.TypeImport,
func(job *jobs.Job, settings *cluster.Settings) jobs.Resumer {
return &importResumer{
job: job,
settings: settings,
}
},
)
}
| pkg/sql/importer/import_job.go | 1 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.9914637207984924,
0.0062691508792340755,
0.00015998086018953472,
0.0001710524084046483,
0.0751367062330246
] |
{
"id": 5,
"code_window": [
"\tif err != nil {\n",
"\t\treturn roachpb.BulkOpSummary{}, errors.Wrap(err, \"exhausted retries\")\n",
"\t}\n",
"\treturn res, nil\n",
"}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn res, jobs.MarkPauseRequestError(errors.Wrap(err, \"exhausted retries\"))\n"
],
"file_path": "pkg/sql/importer/import_job.go",
"type": "replace",
"edit_start_line_idx": 1001
} | alter_rename_index_stmt ::=
'ALTER' 'INDEX' table_name '@' index_name 'RENAME' 'TO' index_name
| 'ALTER' 'INDEX' index_name 'RENAME' 'TO' index_name
| 'ALTER' 'INDEX' 'IF' 'EXISTS' table_name '@' index_name 'RENAME' 'TO' index_name
| 'ALTER' 'INDEX' 'IF' 'EXISTS' index_name 'RENAME' 'TO' index_name
| docs/generated/sql/bnf/rename_index.bnf | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.0001659351255511865,
0.0001659351255511865,
0.0001659351255511865,
0.0001659351255511865,
0
] |
{
"id": 5,
"code_window": [
"\tif err != nil {\n",
"\t\treturn roachpb.BulkOpSummary{}, errors.Wrap(err, \"exhausted retries\")\n",
"\t}\n",
"\treturn res, nil\n",
"}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn res, jobs.MarkPauseRequestError(errors.Wrap(err, \"exhausted retries\"))\n"
],
"file_path": "pkg/sql/importer/import_job.go",
"type": "replace",
"edit_start_line_idx": 1001
} | // Copyright 2022 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package changefeedccl
import (
"context"
gosql "database/sql"
"fmt"
"sync/atomic"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/cdctest"
"github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/tests"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
func TestAlterChangefeedAddTarget(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`)
defer closeFeed(t, testFeed)
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
sqlDB.Exec(t, `PAUSE JOB $1`, feed.JobID())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d ADD bar`, feed.JobID()))
sqlDB.Exec(t, fmt.Sprintf(`RESUME JOB %d`, feed.JobID()))
waitForJobStatus(sqlDB, t, feed.JobID(), `running`)
sqlDB.Exec(t, `INSERT INTO foo VALUES(1)`)
assertPayloads(t, testFeed, []string{
`foo: [1]->{"after": {"a": 1}}`,
})
sqlDB.Exec(t, `INSERT INTO bar VALUES(2)`)
assertPayloads(t, testFeed, []string{
`bar: [2]->{"after": {"a": 2}}`,
})
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedAddTargetFamily(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, FAMILY onlya (a), FAMILY onlyb (b))`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo FAMILY onlya`)
defer closeFeed(t, testFeed)
sqlDB.Exec(t, `INSERT INTO foo VALUES(1, 'hello')`)
assertPayloads(t, testFeed, []string{
`foo.onlya: [1]->{"after": {"a": 1}}`,
})
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
sqlDB.Exec(t, `PAUSE JOB $1`, feed.JobID())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d ADD foo FAMILY onlyb`, feed.JobID()))
sqlDB.Exec(t, fmt.Sprintf(`RESUME JOB %d`, feed.JobID()))
waitForJobStatus(sqlDB, t, feed.JobID(), `running`)
sqlDB.Exec(t, `INSERT INTO foo VALUES(2, 'goodbye')`)
assertPayloads(t, testFeed, []string{
`foo.onlyb: [1]->{"after": {"b": "hello"}}`,
`foo.onlya: [2]->{"after": {"a": 2}}`,
`foo.onlyb: [2]->{"after": {"b": "goodbye"}}`,
})
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedSwitchFamily(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, FAMILY onlya (a), FAMILY onlyb (b))`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo FAMILY onlya`)
defer closeFeed(t, testFeed)
sqlDB.Exec(t, `INSERT INTO foo VALUES(1, 'hello')`)
assertPayloads(t, testFeed, []string{
`foo.onlya: [1]->{"after": {"a": 1}}`,
})
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
sqlDB.Exec(t, `PAUSE JOB $1`, feed.JobID())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d ADD foo FAMILY onlyb DROP foo FAMILY onlya`, feed.JobID()))
sqlDB.Exec(t, fmt.Sprintf(`RESUME JOB %d`, feed.JobID()))
waitForJobStatus(sqlDB, t, feed.JobID(), `running`)
sqlDB.Exec(t, `INSERT INTO foo VALUES(2, 'goodbye')`)
assertPayloads(t, testFeed, []string{
`foo.onlyb: [1]->{"after": {"b": "hello"}}`,
`foo.onlyb: [2]->{"after": {"b": "goodbye"}}`,
})
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedDropTarget(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo, bar`)
defer closeFeed(t, testFeed)
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
sqlDB.Exec(t, `PAUSE JOB $1`, feed.JobID())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d DROP bar`, feed.JobID()))
sqlDB.Exec(t, fmt.Sprintf(`RESUME JOB %d`, feed.JobID()))
waitForJobStatus(sqlDB, t, feed.JobID(), `running`)
sqlDB.Exec(t, `INSERT INTO foo VALUES(1)`)
assertPayloads(t, testFeed, []string{
`foo: [1]->{"after": {"a": 1}}`,
})
sqlDB.Exec(t, `INSERT INTO bar VALUES(2)`)
assertPayloads(t, testFeed, nil)
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedDropTargetFamily(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, FAMILY onlya (a), FAMILY onlyb (b))`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo FAMILY onlya, foo FAMILY onlyb`)
defer closeFeed(t, testFeed)
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
sqlDB.Exec(t, `PAUSE JOB $1`, feed.JobID())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d DROP foo FAMILY onlyb`, feed.JobID()))
sqlDB.Exec(t, fmt.Sprintf(`RESUME JOB %d`, feed.JobID()))
waitForJobStatus(sqlDB, t, feed.JobID(), `running`)
sqlDB.Exec(t, `INSERT INTO foo VALUES(1, 'hello')`)
sqlDB.Exec(t, `INSERT INTO foo VALUES(2, 'goodbye')`)
assertPayloads(t, testFeed, []string{
`foo.onlya: [1]->{"after": {"a": 1}}`,
`foo.onlya: [2]->{"after": {"a": 2}}`,
})
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedSetDiffOption(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`)
defer closeFeed(t, testFeed)
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
sqlDB.Exec(t, `PAUSE JOB $1`, feed.JobID())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d SET diff`, feed.JobID()))
sqlDB.Exec(t, fmt.Sprintf(`RESUME JOB %d`, feed.JobID()))
waitForJobStatus(sqlDB, t, feed.JobID(), `running`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`)
assertPayloads(t, testFeed, []string{
`foo: [0]->{"after": {"a": 0, "b": "initial"}, "before": null}`,
})
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedUnsetDiffOption(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH diff`)
defer closeFeed(t, testFeed)
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
sqlDB.Exec(t, `PAUSE JOB $1`, feed.JobID())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d UNSET diff`, feed.JobID()))
sqlDB.Exec(t, fmt.Sprintf(`RESUME JOB %d`, feed.JobID()))
waitForJobStatus(sqlDB, t, feed.JobID(), `running`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`)
assertPayloads(t, testFeed, []string{
`foo: [0]->{"after": {"a": 0, "b": "initial"}}`,
})
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedErrors(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`)
defer closeFeed(t, testFeed)
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
sqlDB.ExpectErr(t,
`could not load job with job id -1`,
`ALTER CHANGEFEED -1 ADD bar`,
)
sqlDB.Exec(t, `ALTER TABLE bar ADD COLUMN b INT`)
var alterTableJobID jobspb.JobID
sqlDB.QueryRow(t, `SELECT job_id FROM [SHOW JOBS] WHERE job_type = 'SCHEMA CHANGE'`).Scan(&alterTableJobID)
sqlDB.ExpectErr(t,
fmt.Sprintf(`job %d is not changefeed job`, alterTableJobID),
fmt.Sprintf(`ALTER CHANGEFEED %d ADD bar`, alterTableJobID),
)
sqlDB.ExpectErr(t,
fmt.Sprintf(`job %d is not paused`, feed.JobID()),
fmt.Sprintf(`ALTER CHANGEFEED %d ADD bar`, feed.JobID()),
)
sqlDB.Exec(t, `PAUSE JOB $1`, feed.JobID())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.ExpectErr(t,
`pq: target "TABLE baz" does not exist`,
fmt.Sprintf(`ALTER CHANGEFEED %d ADD baz`, feed.JobID()),
)
sqlDB.ExpectErr(t,
`pq: target "TABLE baz" does not exist`,
fmt.Sprintf(`ALTER CHANGEFEED %d DROP baz`, feed.JobID()),
)
sqlDB.ExpectErr(t,
`pq: target "TABLE bar" already not watched by changefeed`,
fmt.Sprintf(`ALTER CHANGEFEED %d DROP bar`, feed.JobID()),
)
sqlDB.ExpectErr(t,
`pq: invalid option "qux"`,
fmt.Sprintf(`ALTER CHANGEFEED %d SET qux`, feed.JobID()),
)
sqlDB.ExpectErr(t,
`pq: cannot alter option "initial_scan"`,
fmt.Sprintf(`ALTER CHANGEFEED %d SET initial_scan`, feed.JobID()),
)
sqlDB.ExpectErr(t,
`pq: invalid option "qux"`,
fmt.Sprintf(`ALTER CHANGEFEED %d UNSET qux`, feed.JobID()),
)
sqlDB.ExpectErr(t,
`pq: cannot alter option "initial_scan"`,
fmt.Sprintf(`ALTER CHANGEFEED %d UNSET initial_scan`, feed.JobID()),
)
sqlDB.ExpectErr(t,
`pq: cannot alter option "initial_scan_only"`,
fmt.Sprintf(`ALTER CHANGEFEED %d UNSET initial_scan_only`, feed.JobID()),
)
sqlDB.ExpectErr(t,
`pq: cannot alter option "end_time"`,
fmt.Sprintf(`ALTER CHANGEFEED %d UNSET end_time`, feed.JobID()),
)
sqlDB.ExpectErr(t,
`cannot unset option "sink"`,
fmt.Sprintf(`ALTER CHANGEFEED %d UNSET sink`, feed.JobID()),
)
sqlDB.ExpectErr(t,
`pq: invalid option "diff"`,
fmt.Sprintf(`ALTER CHANGEFEED %d ADD bar WITH diff`, feed.JobID()),
)
sqlDB.ExpectErr(t,
`pq: cannot specify both "initial_scan" and "no_initial_scan"`,
fmt.Sprintf(`ALTER CHANGEFEED %d ADD bar WITH initial_scan, no_initial_scan`, feed.JobID()),
)
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedDropAllTargetsError(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo, bar`)
defer closeFeed(t, testFeed)
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
sqlDB.Exec(t, `PAUSE JOB $1`, feed.JobID())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.ExpectErr(t,
`cannot drop all targets`,
fmt.Sprintf(`ALTER CHANGEFEED %d DROP foo, bar`, feed.JobID()),
)
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedTelemetry(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`)
sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO bar VALUES (1)`)
sqlDB.Exec(t, `CREATE TABLE baz (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO baz VALUES (1)`)
// Reset the counts.
_ = telemetry.GetFeatureCounts(telemetry.Raw, telemetry.ResetCounts)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo, bar WITH diff`)
defer closeFeed(t, testFeed)
feed := testFeed.(cdctest.EnterpriseTestFeed)
require.NoError(t, feed.Pause())
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d DROP bar, foo ADD baz UNSET diff SET resolved, format=json`, feed.JobID()))
counts := telemetry.GetFeatureCounts(telemetry.Raw, telemetry.ResetCounts)
require.Equal(t, int32(1), counts[`changefeed.alter`])
require.Equal(t, int32(1), counts[`changefeed.alter.dropped_targets.2`])
require.Equal(t, int32(1), counts[`changefeed.alter.added_targets.1`])
require.Equal(t, int32(1), counts[`changefeed.alter.set_options.2`])
require.Equal(t, int32(1), counts[`changefeed.alter.unset_options.1`])
}
t.Run(`kafka`, kafkaTest(testFn))
}
// The purpose of this test is to ensure that the ALTER CHANGEFEED statement
// does not accidentally redact secret keys in the changefeed details
func TestAlterChangefeedPersistSinkURI(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
bucket, accessKey, secretKey := checkS3Credentials(t)
params, _ := tests.CreateTestServerParams()
s, rawSQLDB, _ := serverutils.StartServer(t, params)
sqlDB := sqlutils.MakeSQLRunner(rawSQLDB)
registry := s.JobRegistry().(*jobs.Registry)
ctx := context.Background()
defer s.Stopper().Stop(ctx)
query := `CREATE TABLE foo (a string)`
sqlDB.Exec(t, query)
query = `CREATE TABLE bar (b string)`
sqlDB.Exec(t, query)
query = `SET CLUSTER SETTING kv.rangefeed.enabled = true`
sqlDB.Exec(t, query)
var changefeedID jobspb.JobID
doneCh := make(chan struct{})
defer close(doneCh)
registry.TestingResumerCreationKnobs = map[jobspb.Type]func(raw jobs.Resumer) jobs.Resumer{
jobspb.TypeChangefeed: func(raw jobs.Resumer) jobs.Resumer {
r := fakeResumer{
done: doneCh,
}
return &r
},
}
query = fmt.Sprintf(`CREATE CHANGEFEED FOR TABLE foo, bar INTO
's3://%s/fake/path?AWS_ACCESS_KEY_ID=%s&AWS_SECRET_ACCESS_KEY=%s'`, bucket, accessKey, secretKey)
sqlDB.QueryRow(t, query).Scan(&changefeedID)
sqlDB.Exec(t, `PAUSE JOB $1`, changefeedID)
waitForJobStatus(sqlDB, t, changefeedID, `paused`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d SET diff`, changefeedID))
sqlDB.Exec(t, fmt.Sprintf(`RESUME JOB %d`, changefeedID))
waitForJobStatus(sqlDB, t, changefeedID, `running`)
job, err := registry.LoadJob(ctx, changefeedID)
require.NoError(t, err)
details, ok := job.Details().(jobspb.ChangefeedDetails)
require.True(t, ok)
require.Equal(t, details.SinkURI,
fmt.Sprintf(`s3://%s/fake/path?AWS_ACCESS_KEY_ID=%s&AWS_SECRET_ACCESS_KEY=%s`, bucket, accessKey, secretKey))
}
func TestAlterChangefeedChangeSinkTypeError(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
bucket, accessKey, secretKey := checkS3Credentials(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`)
defer closeFeed(t, testFeed)
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
sqlDB.Exec(t, `PAUSE JOB $1`, feed.JobID())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.ExpectErr(t,
`pq: New sink type "s3" does not match original sink type "kafka". Altering the sink type of a changefeed is disallowed, consider creating a new changefeed instead.`,
fmt.Sprintf(`ALTER CHANGEFEED %d SET sink = 's3://%s/fake/path?AWS_ACCESS_KEY_ID=%s&AWS_SECRET_ACCESS_KEY=%s'`, feed.JobID(), bucket, accessKey, secretKey),
)
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedChangeSinkURI(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
registry := f.Server().JobRegistry().(*jobs.Registry)
ctx := context.Background()
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`)
defer closeFeed(t, testFeed)
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
sqlDB.Exec(t, `PAUSE JOB $1`, feed.JobID())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
newSinkURI := `kafka://new_kafka_uri`
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d SET sink = '%s'`, feed.JobID(), newSinkURI))
sqlDB.Exec(t, fmt.Sprintf(`RESUME JOB %d`, feed.JobID()))
waitForJobStatus(sqlDB, t, feed.JobID(), `running`)
job, err := registry.LoadJob(ctx, feed.JobID())
require.NoError(t, err)
details, ok := job.Details().(jobspb.ChangefeedDetails)
require.True(t, ok)
require.Equal(t, newSinkURI, details.SinkURI)
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedAddTargetErrors(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO foo (a) SELECT * FROM generate_series(1, 1000)`)
knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs.
DistSQL.(*execinfra.TestingKnobs).
Changefeed.(*TestingKnobs)
//Ensure Scan Requests are always small enough that we receive multiple
//resolved events during a backfill
knobs.FeedKnobs.BeforeScanRequest = func(b *kv.Batch) error {
b.Header.MaxSpanRequestKeys = 10
return nil
}
// ensure that we do not emit a resolved timestamp
knobs.ShouldSkipResolved = func(r *jobspb.ResolvedSpan) bool {
return true
}
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved = '100ms'`)
// Kafka feeds are not buffered, so we have to consume messages.
g := ctxgroup.WithContext(context.Background())
g.Go(func() error {
for {
_, err := testFeed.Next()
if err != nil {
return err
}
}
})
defer func() {
closeFeed(t, testFeed)
_ = g.Wait()
}()
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
require.NoError(t, feed.Pause())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO bar VALUES (1), (2), (3)`)
sqlDB.ExpectErr(t,
`pq: target "bar" cannot be resolved as of the creation time of the changefeed. Please wait until the high water mark progresses past the creation time of this target in order to add it to the changefeed.`,
fmt.Sprintf(`ALTER CHANGEFEED %d ADD bar`, feed.JobID()),
)
// allow the changefeed to emit resolved events now
knobs.ShouldSkipResolved = func(r *jobspb.ResolvedSpan) bool {
return false
}
require.NoError(t, feed.Resume())
// Wait for the high water mark to be non-zero.
testutils.SucceedsSoon(t, func() error {
registry := f.Server().JobRegistry().(*jobs.Registry)
job, err := registry.LoadJob(context.Background(), feed.JobID())
require.NoError(t, err)
prog := job.Progress()
if p := prog.GetHighWater(); p != nil && !p.IsEmpty() {
return nil
}
return errors.New("waiting for highwater")
})
require.NoError(t, feed.Pause())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.Exec(t, `CREATE TABLE baz (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO baz VALUES (1), (2), (3)`)
sqlDB.ExpectErr(t,
`pq: target "baz" cannot be resolved as of the high water mark. Please wait until the high water mark progresses past the creation time of this target in order to add it to the changefeed.`,
fmt.Sprintf(`ALTER CHANGEFEED %d ADD baz`, feed.JobID()),
)
}
t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants))
}
func TestAlterChangefeedDatabaseQualifiedNames(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE DATABASE movr`)
sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING)`)
sqlDB.Exec(t, `CREATE TABLE movr.users (id INT PRIMARY KEY, name STRING)`)
sqlDB.Exec(t,
`INSERT INTO movr.drivers VALUES (1, 'Alice')`,
)
sqlDB.Exec(t,
`INSERT INTO movr.users VALUES (1, 'Bob')`,
)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR movr.drivers WITH resolved = '100ms', diff`)
defer closeFeed(t, testFeed)
assertPayloads(t, testFeed, []string{
`drivers: [1]->{"after": {"id": 1, "name": "Alice"}, "before": null}`,
})
expectResolvedTimestamp(t, testFeed)
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
require.NoError(t, feed.Pause())
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d ADD movr.users WITH initial_scan UNSET diff`, feed.JobID()))
require.NoError(t, feed.Resume())
assertPayloads(t, testFeed, []string{
`users: [1]->{"after": {"id": 1, "name": "Bob"}}`,
})
sqlDB.Exec(t,
`INSERT INTO movr.drivers VALUES (3, 'Carol')`,
)
assertPayloads(t, testFeed, []string{
`drivers: [3]->{"after": {"id": 3, "name": "Carol"}}`,
})
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedDatabaseScope(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE DATABASE movr`)
sqlDB.Exec(t, `CREATE DATABASE new_movr`)
sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING)`)
sqlDB.Exec(t, `CREATE TABLE new_movr.drivers (id INT PRIMARY KEY, name STRING)`)
sqlDB.Exec(t,
`INSERT INTO movr.drivers VALUES (1, 'Alice')`,
)
sqlDB.Exec(t,
`INSERT INTO new_movr.drivers VALUES (1, 'Bob')`,
)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR movr.drivers WITH diff`)
defer closeFeed(t, testFeed)
assertPayloads(t, testFeed, []string{
`drivers: [1]->{"after": {"id": 1, "name": "Alice"}, "before": null}`,
})
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
require.NoError(t, feed.Pause())
sqlDB.Exec(t, `USE new_movr`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d DROP movr.drivers ADD drivers WITH initial_scan UNSET diff`, feed.JobID()))
require.NoError(t, feed.Resume())
assertPayloads(t, testFeed, []string{
`drivers: [1]->{"after": {"id": 1, "name": "Bob"}}`,
})
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedDatabaseScopeUnqualifiedName(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE DATABASE movr`)
sqlDB.Exec(t, `CREATE DATABASE new_movr`)
sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING)`)
sqlDB.Exec(t, `CREATE TABLE new_movr.drivers (id INT PRIMARY KEY, name STRING)`)
sqlDB.Exec(t,
`INSERT INTO movr.drivers VALUES (1, 'Alice')`,
)
sqlDB.Exec(t, `USE movr`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR drivers WITH diff, resolved = '100ms'`)
defer closeFeed(t, testFeed)
assertPayloads(t, testFeed, []string{
`drivers: [1]->{"after": {"id": 1, "name": "Alice"}, "before": null}`,
})
expectResolvedTimestamp(t, testFeed)
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
require.NoError(t, feed.Pause())
sqlDB.Exec(t, `USE new_movr`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d UNSET diff`, feed.JobID()))
require.NoError(t, feed.Resume())
sqlDB.Exec(t,
`INSERT INTO movr.drivers VALUES (2, 'Bob')`,
)
assertPayloads(t, testFeed, []string{
`drivers: [2]->{"after": {"id": 2, "name": "Bob"}}`,
})
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedColumnFamilyDatabaseScope(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE DATABASE movr`)
sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING, FAMILY onlyid (id), FAMILY onlyname (name))`)
sqlDB.Exec(t,
`INSERT INTO movr.drivers VALUES (1, 'Alice')`,
)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR movr.drivers WITH diff, split_column_families`)
defer closeFeed(t, testFeed)
assertPayloads(t, testFeed, []string{
`drivers.onlyid: [1]->{"after": {"id": 1}, "before": null}`,
`drivers.onlyname: [1]->{"after": {"name": "Alice"}, "before": null}`,
})
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
require.NoError(t, feed.Pause())
sqlDB.Exec(t, `USE movr`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d DROP movr.drivers ADD movr.drivers FAMILY onlyid ADD drivers FAMILY onlyname UNSET diff`, feed.JobID()))
require.NoError(t, feed.Resume())
sqlDB.Exec(t,
`INSERT INTO movr.drivers VALUES (2, 'Bob')`,
)
assertPayloads(t, testFeed, []string{
`drivers.onlyid: [2]->{"after": {"id": 2}}`,
`drivers.onlyname: [2]->{"after": {"name": "Bob"}}`,
})
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedAlterTableName(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE DATABASE movr`)
sqlDB.Exec(t, `CREATE TABLE movr.users (id INT PRIMARY KEY, name STRING)`)
sqlDB.Exec(t,
`INSERT INTO movr.users VALUES (1, 'Alice')`,
)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR movr.users WITH diff, resolved = '100ms'`)
defer closeFeed(t, testFeed)
assertPayloads(t, testFeed, []string{
`users: [1]->{"after": {"id": 1, "name": "Alice"}, "before": null}`,
})
expectResolvedTimestamp(t, testFeed)
waitForSchemaChange(t, sqlDB, `ALTER TABLE movr.users RENAME TO movr.riders`)
var tsLogical string
sqlDB.QueryRow(t, `SELECT cluster_logical_timestamp()`).Scan(&tsLogical)
ts := parseTimeToHLC(t, tsLogical)
// ensure that the high watermark has progressed past the time in which the
// schema change occurred
testutils.SucceedsSoon(t, func() error {
resolvedTS, _ := expectResolvedTimestamp(t, testFeed)
if resolvedTS.Less(ts) {
return errors.New("waiting for resolved timestamp to progress past the schema change event")
}
return nil
})
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
require.NoError(t, feed.Pause())
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d UNSET diff`, feed.JobID()))
require.NoError(t, feed.Resume())
sqlDB.Exec(t,
`INSERT INTO movr.riders VALUES (2, 'Bob')`,
)
assertPayloads(t, testFeed, []string{
`users: [2]->{"after": {"id": 2, "name": "Bob"}}`,
})
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedInitialScan(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1), (2), (3)`)
sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO bar VALUES (1), (2), (3)`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved = '1s', no_initial_scan`)
defer closeFeed(t, testFeed)
expectResolvedTimestamp(t, testFeed)
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
sqlDB.Exec(t, `PAUSE JOB $1`, feed.JobID())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d ADD bar WITH initial_scan`, feed.JobID()))
sqlDB.Exec(t, fmt.Sprintf(`RESUME JOB %d`, feed.JobID()))
waitForJobStatus(sqlDB, t, feed.JobID(), `running`)
assertPayloads(t, testFeed, []string{
`bar: [1]->{"after": {"a": 1}}`,
`bar: [2]->{"after": {"a": 2}}`,
`bar: [3]->{"after": {"a": 3}}`,
})
sqlDB.Exec(t, `INSERT INTO bar VALUES (4)`)
assertPayloads(t, testFeed, []string{
`bar: [4]->{"after": {"a": 4}}`,
})
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedNoInitialScan(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO foo VALUES (1), (2), (3)`)
sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO bar VALUES (1), (2), (3)`)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved = '1s'`)
defer closeFeed(t, testFeed)
assertPayloads(t, testFeed, []string{
`foo: [1]->{"after": {"a": 1}}`,
`foo: [2]->{"after": {"a": 2}}`,
`foo: [3]->{"after": {"a": 3}}`,
})
expectResolvedTimestamp(t, testFeed)
feed, ok := testFeed.(cdctest.EnterpriseTestFeed)
require.True(t, ok)
sqlDB.Exec(t, `PAUSE JOB $1`, feed.JobID())
waitForJobStatus(sqlDB, t, feed.JobID(), `paused`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d ADD bar WITH no_initial_scan`, feed.JobID()))
sqlDB.Exec(t, fmt.Sprintf(`RESUME JOB %d`, feed.JobID()))
waitForJobStatus(sqlDB, t, feed.JobID(), `running`)
expectResolvedTimestamp(t, testFeed)
sqlDB.Exec(t, `INSERT INTO bar VALUES (4)`)
assertPayloads(t, testFeed, []string{
`bar: [4]->{"after": {"a": 4}}`,
})
}
t.Run(`kafka`, kafkaTest(testFn))
}
func TestAlterChangefeedAddTargetsDuringSchemaChangeError(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
rnd, _ := randutil.NewPseudoRand()
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs.
DistSQL.(*execinfra.TestingKnobs).
Changefeed.(*TestingKnobs)
sqlDB.Exec(t, `CREATE TABLE foo(val INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO foo (val) SELECT * FROM generate_series(0, 999)`)
sqlDB.Exec(t, `CREATE TABLE bar(val INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO bar (val) SELECT * FROM generate_series(0, 999)`)
// Ensure Scan Requests are always small enough that we receive multiple
// resolved events during a backfill
knobs.FeedKnobs.BeforeScanRequest = func(b *kv.Batch) error {
b.Header.MaxSpanRequestKeys = 10
return nil
}
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved = '1s', no_initial_scan`)
jobFeed := testFeed.(cdctest.EnterpriseTestFeed)
jobRegistry := f.Server().JobRegistry().(*jobs.Registry)
// Kafka feeds are not buffered, so we have to consume messages.
g := ctxgroup.WithContext(context.Background())
g.Go(func() error {
for {
_, err := testFeed.Next()
if err != nil {
return err
}
}
})
defer func() {
closeFeed(t, testFeed)
_ = g.Wait()
}()
// Helper to read job progress
loadProgress := func() jobspb.Progress {
jobID := jobFeed.JobID()
job, err := jobRegistry.LoadJob(context.Background(), jobID)
require.NoError(t, err)
return job.Progress()
}
// Ensure initial backfill completes
testutils.SucceedsSoon(t, func() error {
prog := loadProgress()
if p := prog.GetHighWater(); p != nil && !p.IsEmpty() {
return nil
}
return errors.New("waiting for highwater")
})
// Pause job and setup overrides to force a checkpoint
require.NoError(t, jobFeed.Pause())
var maxCheckpointSize int64 = 100 << 20
// Checkpoint progress frequently, and set the checkpoint size limit.
changefeedbase.FrontierCheckpointFrequency.Override(
context.Background(), &f.Server().ClusterSettings().SV, 10*time.Millisecond)
changefeedbase.FrontierCheckpointMaxBytes.Override(
context.Background(), &f.Server().ClusterSettings().SV, maxCheckpointSize)
// Note the tableSpan to avoid resolved events that leave no gaps
fooDesc := desctestutils.TestingGetPublicTableDescriptor(
f.Server().DB(), keys.SystemSQLCodec, "d", "foo")
tableSpan := fooDesc.PrimaryIndexSpan(keys.SystemSQLCodec)
// ShouldSkipResolved should ensure that once the backfill begins, the following resolved events
// that are for that backfill (are of the timestamp right after the backfill timestamp) resolve some
// but not all of the time, which results in a checkpoint eventually being created
haveGaps := false
var backfillTimestamp hlc.Timestamp
var initialCheckpoint roachpb.SpanGroup
var foundCheckpoint int32
knobs.ShouldSkipResolved = func(r *jobspb.ResolvedSpan) bool {
// Stop resolving anything after checkpoint set to avoid eventually resolving the full span
if initialCheckpoint.Len() > 0 {
return true
}
// A backfill begins when the backfill resolved event arrives, which has a
// timestamp such that all backfill spans have a timestamp of
// timestamp.Next()
if r.BoundaryType == jobspb.ResolvedSpan_BACKFILL {
backfillTimestamp = r.Timestamp
return false
}
// Check if we've set a checkpoint yet
progress := loadProgress()
if p := progress.GetChangefeed(); p != nil && p.Checkpoint != nil && len(p.Checkpoint.Spans) > 0 {
initialCheckpoint.Add(p.Checkpoint.Spans...)
atomic.StoreInt32(&foundCheckpoint, 1)
}
// Filter non-backfill-related spans
if !r.Timestamp.Equal(backfillTimestamp.Next()) {
// Only allow spans prior to a valid backfillTimestamp to avoid moving past the backfill
return !(backfillTimestamp.IsEmpty() || r.Timestamp.LessEq(backfillTimestamp.Next()))
}
// Only allow resolving if we definitely won't have a completely resolved table
if !r.Span.Equal(tableSpan) && haveGaps {
return rnd.Intn(10) > 7
}
haveGaps = true
return true
}
require.NoError(t, jobFeed.Resume())
sqlDB.Exec(t, `ALTER TABLE foo ADD COLUMN b STRING DEFAULT 'd'`)
// Wait for a checkpoint to have been set
testutils.SucceedsSoon(t, func() error {
if atomic.LoadInt32(&foundCheckpoint) != 0 {
return nil
}
return errors.New("waiting for checkpoint")
})
require.NoError(t, jobFeed.Pause())
waitForJobStatus(sqlDB, t, jobFeed.JobID(), `paused`)
errMsg := fmt.Sprintf(
`pq: cannot perform initial scan on newly added targets while the checkpoint is non-empty, please unpause the changefeed and wait until the high watermark progresses past the current value %s to add these targets.`,
backfillTimestamp.AsOfSystemTime(),
)
sqlDB.ExpectErr(t, errMsg, fmt.Sprintf(`ALTER CHANGEFEED %d ADD bar WITH initial_scan`, jobFeed.JobID()))
}
t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants))
}
func TestAlterChangefeedAddTargetsDuringBackfill(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderRace(t)
rnd, _ := randutil.NewTestRand()
var maxCheckpointSize int64 = 100
testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) {
sqlDB := sqlutils.MakeSQLRunner(db)
sqlDB.Exec(t, `CREATE TABLE foo(val INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO foo (val) SELECT * FROM generate_series(0, 999)`)
sqlDB.Exec(t, `CREATE TABLE bar(val INT PRIMARY KEY)`)
sqlDB.Exec(t, `INSERT INTO bar (val) SELECT * FROM generate_series(0, 999)`)
fooDesc := desctestutils.TestingGetPublicTableDescriptor(
f.Server().DB(), keys.SystemSQLCodec, "d", "foo")
fooTableSpan := fooDesc.PrimaryIndexSpan(keys.SystemSQLCodec)
knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs.
DistSQL.(*execinfra.TestingKnobs).
Changefeed.(*TestingKnobs)
// Ensure Scan Requests are always small enough that we receive multiple
// resolvedFoo events during a backfill
knobs.FeedKnobs.BeforeScanRequest = func(b *kv.Batch) error {
b.Header.MaxSpanRequestKeys = 1 + rnd.Int63n(100)
return nil
}
// Emit resolved events for majority of spans. Be extra paranoid and ensure that
// we have at least 1 span for which we don't emit resolvedFoo timestamp (to force checkpointing).
haveGaps := false
knobs.ShouldSkipResolved = func(r *jobspb.ResolvedSpan) bool {
if r.Span.Equal(fooTableSpan) {
// Do not emit resolved events for the entire table span.
// We "simulate" large table by splitting single table span into many parts, so
// we want to resolve those sub-spans instead of the entire table span.
// However, we have to emit something -- otherwise the entire changefeed
// machine would not work.
r.Span.EndKey = fooTableSpan.Key.Next()
return false
}
if haveGaps {
return rnd.Intn(10) > 7
}
haveGaps = true
return true
}
// Checkpoint progress frequently, and set the checkpoint size limit.
changefeedbase.FrontierCheckpointFrequency.Override(
context.Background(), &f.Server().ClusterSettings().SV, 1)
changefeedbase.FrontierCheckpointMaxBytes.Override(
context.Background(), &f.Server().ClusterSettings().SV, maxCheckpointSize)
registry := f.Server().JobRegistry().(*jobs.Registry)
testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved = '100ms'`)
// Kafka feeds are not buffered, so we have to consume messages.
g := ctxgroup.WithContext(context.Background())
g.Go(func() error {
expectedValues := make([]string, 1000)
for j := 0; j <= 999; j++ {
expectedValues[j] = fmt.Sprintf(`foo: [%d]->{"after": {"val": %d}}`, j, j)
}
err := assertPayloadsBaseErr(testFeed, expectedValues, false, false)
if err != nil {
return err
}
for j := 0; j <= 999; j++ {
expectedValues[j] = fmt.Sprintf(`bar: [%d]->{"after": {"val": %d}}`, j, j)
}
err = assertPayloadsBaseErr(testFeed, expectedValues, false, false)
return err
})
defer func() {
require.NoError(t, g.Wait())
closeFeed(t, testFeed)
}()
jobFeed := testFeed.(cdctest.EnterpriseTestFeed)
loadProgress := func() jobspb.Progress {
jobID := jobFeed.JobID()
job, err := registry.LoadJob(context.Background(), jobID)
require.NoError(t, err)
return job.Progress()
}
// Wait for non-nil checkpoint.
testutils.SucceedsSoon(t, func() error {
progress := loadProgress()
if p := progress.GetChangefeed(); p != nil && p.Checkpoint != nil && len(p.Checkpoint.Spans) > 0 {
return nil
}
return errors.New("waiting for checkpoint")
})
// Pause the job and read and verify the latest checkpoint information.
require.NoError(t, jobFeed.Pause())
progress := loadProgress()
require.NotNil(t, progress.GetChangefeed())
h := progress.GetHighWater()
noHighWater := h == nil || h.IsEmpty()
require.True(t, noHighWater)
jobCheckpoint := progress.GetChangefeed().Checkpoint
require.Less(t, 0, len(jobCheckpoint.Spans))
var checkpoint roachpb.SpanGroup
checkpoint.Add(jobCheckpoint.Spans...)
waitForJobStatus(sqlDB, t, jobFeed.JobID(), `paused`)
sqlDB.Exec(t, fmt.Sprintf(`ALTER CHANGEFEED %d ADD bar WITH initial_scan`, jobFeed.JobID()))
// Collect spans we attempt to resolve after when we resume.
var resolvedFoo []roachpb.Span
knobs.ShouldSkipResolved = func(r *jobspb.ResolvedSpan) bool {
if !r.Span.Equal(fooTableSpan) {
resolvedFoo = append(resolvedFoo, r.Span)
}
return false
}
require.NoError(t, jobFeed.Resume())
// Wait for the high water mark to be non-zero.
testutils.SucceedsSoon(t, func() error {
prog := loadProgress()
if p := prog.GetHighWater(); p != nil && !p.IsEmpty() {
return nil
}
return errors.New("waiting for highwater")
})
// At this point, highwater mark should be set, and previous checkpoint should be gone.
progress = loadProgress()
require.NotNil(t, progress.GetChangefeed())
require.Equal(t, 0, len(progress.GetChangefeed().Checkpoint.Spans))
// Verify that none of the resolvedFoo spans after resume were checkpointed.
for _, sp := range resolvedFoo {
require.Falsef(t, checkpoint.Contains(sp.Key), "span should not have been resolved: %s", sp)
}
}
t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants))
}
| pkg/ccl/changefeedccl/alter_changefeed_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.00019740493735298514,
0.00017282983753830194,
0.00015982732293196023,
0.00017319148173555732,
0.00000405155014959746
] |
{
"id": 5,
"code_window": [
"\tif err != nil {\n",
"\t\treturn roachpb.BulkOpSummary{}, errors.Wrap(err, \"exhausted retries\")\n",
"\t}\n",
"\treturn res, nil\n",
"}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\treturn res, jobs.MarkPauseRequestError(errors.Wrap(err, \"exhausted retries\"))\n"
],
"file_path": "pkg/sql/importer/import_job.go",
"type": "replace",
"edit_start_line_idx": 1001
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
)
type lookupJoinNode struct {
input planNode
table *scanNode
// joinType is either INNER, LEFT_OUTER, LEFT_SEMI, or LEFT_ANTI.
joinType descpb.JoinType
// eqCols represents the part of the join condition used to perform
// the lookup into the index. It should only be set when lookupExpr is empty.
// eqCols identifies the columns from the input which are used for the
// lookup. These correspond to a prefix of the index columns (of the index we
// are looking up into).
eqCols []int
// eqColsAreKey is true when each lookup can return at most one row.
eqColsAreKey bool
// lookupExpr represents the part of the join condition used to perform
// the lookup into the index. It should only be set when eqCols is empty.
// lookupExpr is used instead of eqCols when the lookup condition is
// more complicated than a simple equality between input columns and index
// columns. In this case, lookupExpr specifies the expression that will be
// used to construct the spans for each lookup.
lookupExpr tree.TypedExpr
// If remoteLookupExpr is set, this is a locality optimized lookup join. In
// this case, lookupExpr contains the lookup join conditions targeting ranges
// located on local nodes (relative to the gateway region), and
// remoteLookupExpr contains the lookup join conditions targeting remote
// nodes. The optimizer will only plan a locality optimized lookup join if it
// is known that each lookup returns at most one row. This fact allows the
// execution engine to use the local conditions in lookupExpr first, and if a
// match is found locally for each input row, there is no need to search
// remote nodes. If a local match is not found for all input rows, the
// execution engine uses remoteLookupExpr to search remote nodes.
remoteLookupExpr tree.TypedExpr
// columns are the produced columns, namely the input columns and (unless the
// join type is semi or anti join) the columns in the table scanNode. It
// includes an additional continuation column when IsFirstJoinInPairedJoin
// is true.
columns colinfo.ResultColumns
// onCond is any ON condition to be used in conjunction with the implicit
// equality condition on eqCols or the conditions in lookupExpr.
onCond tree.TypedExpr
// At most one of is{First,Second}JoinInPairedJoiner can be true.
isFirstJoinInPairedJoiner bool
isSecondJoinInPairedJoiner bool
reqOrdering ReqOrdering
limitHint int64
}
func (lj *lookupJoinNode) startExec(params runParams) error {
panic("lookupJoinNode cannot be run in local mode")
}
func (lj *lookupJoinNode) Next(params runParams) (bool, error) {
panic("lookupJoinNode cannot be run in local mode")
}
func (lj *lookupJoinNode) Values() tree.Datums {
panic("lookupJoinNode cannot be run in local mode")
}
func (lj *lookupJoinNode) Close(ctx context.Context) {
lj.input.Close(ctx)
lj.table.Close(ctx)
}
| pkg/sql/lookup_join.go | 0 | https://github.com/cockroachdb/cockroach/commit/67e659223cd2a2ae4ea2e5fa6d9461fd1418c4de | [
0.00020679636509157717,
0.00017282625776715577,
0.00016375392442569137,
0.0001683994778431952,
0.0000119720616567065
] |
{
"id": 0,
"code_window": [
"\n",
"func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {\n",
"\tvar qs = r.URL.Query()\n",
"\tvar folder = qs.Get(\"folder\")\n",
"\n",
"\tfiles := m.NeedFolderFilesLimited(folder, 100, 2500) // max 100 files or 2500 blocks\n",
"\n",
"\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tfiles := m.NeedFolderFilesLimited(folder, 100) // max 100 files\n",
"\t// Convert the struct to a more loose structure, and inject the size.\n",
"\toutput := make([]map[string]interface{}, 0, len(files))\n",
"\tfor _, file := range files {\n",
"\t\toutput = append(output, map[string]interface{}{\n",
"\t\t\t\"Name\": file.Name,\n",
"\t\t\t\"Flags\": file.Flags,\n",
"\t\t\t\"Modified\": file.Modified,\n",
"\t\t\t\"Version\": file.Version,\n",
"\t\t\t\"LocalVersion\": file.LocalVersion,\n",
"\t\t\t\"NumBlocks\": file.NumBlocks,\n",
"\t\t\t\"Size\": protocol.BlocksToSize(file.NumBlocks),\n",
"\t\t})\n",
"\t}\n"
],
"file_path": "cmd/syncthing/gui.go",
"type": "replace",
"edit_start_line_idx": 293
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"mime"
"net"
"net/http"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"code.google.com/p/go.crypto/bcrypt"
"github.com/calmh/logger"
"github.com/syncthing/syncthing/internal/auto"
"github.com/syncthing/syncthing/internal/config"
"github.com/syncthing/syncthing/internal/discover"
"github.com/syncthing/syncthing/internal/events"
"github.com/syncthing/syncthing/internal/model"
"github.com/syncthing/syncthing/internal/osutil"
"github.com/syncthing/syncthing/internal/protocol"
"github.com/syncthing/syncthing/internal/upgrade"
"github.com/vitrun/qart/qr"
)
type guiError struct {
Time time.Time
Error string
}
var (
configInSync = true
guiErrors = []guiError{}
guiErrorsMut sync.Mutex
modt = time.Now().UTC().Format(http.TimeFormat)
eventSub *events.BufferedSubscription
)
func init() {
l.AddHandler(logger.LevelWarn, showGuiError)
sub := events.Default.Subscribe(events.AllEvents)
eventSub = events.NewBufferedSubscription(sub, 1000)
}
func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) error {
var err error
cert, err := loadCert(confDir, "https-")
if err != nil {
l.Infoln("Loading HTTPS certificate:", err)
l.Infoln("Creating new HTTPS certificate")
newCertificate(confDir, "https-")
cert, err = loadCert(confDir, "https-")
}
if err != nil {
return err
}
tlsCfg := &tls.Config{
Certificates: []tls.Certificate{cert},
ServerName: "syncthing",
}
rawListener, err := net.Listen("tcp", cfg.Address)
if err != nil {
return err
}
listener := &DowngradingListener{rawListener, tlsCfg}
// The GET handlers
getRestMux := http.NewServeMux()
getRestMux.HandleFunc("/rest/ping", restPing)
getRestMux.HandleFunc("/rest/completion", withModel(m, restGetCompletion))
getRestMux.HandleFunc("/rest/config", restGetConfig)
getRestMux.HandleFunc("/rest/config/sync", restGetConfigInSync)
getRestMux.HandleFunc("/rest/connections", withModel(m, restGetConnections))
getRestMux.HandleFunc("/rest/autocomplete/directory", restGetAutocompleteDirectory)
getRestMux.HandleFunc("/rest/discovery", restGetDiscovery)
getRestMux.HandleFunc("/rest/errors", restGetErrors)
getRestMux.HandleFunc("/rest/events", restGetEvents)
getRestMux.HandleFunc("/rest/ignores", withModel(m, restGetIgnores))
getRestMux.HandleFunc("/rest/lang", restGetLang)
getRestMux.HandleFunc("/rest/model", withModel(m, restGetModel))
getRestMux.HandleFunc("/rest/need", withModel(m, restGetNeed))
getRestMux.HandleFunc("/rest/deviceid", restGetDeviceID)
getRestMux.HandleFunc("/rest/report", withModel(m, restGetReport))
getRestMux.HandleFunc("/rest/system", restGetSystem)
getRestMux.HandleFunc("/rest/upgrade", restGetUpgrade)
getRestMux.HandleFunc("/rest/version", restGetVersion)
getRestMux.HandleFunc("/rest/stats/device", withModel(m, restGetDeviceStats))
// Debug endpoints, not for general use
getRestMux.HandleFunc("/rest/debug/peerCompletion", withModel(m, restGetPeerCompletion))
// The POST handlers
postRestMux := http.NewServeMux()
postRestMux.HandleFunc("/rest/ping", restPing)
postRestMux.HandleFunc("/rest/config", withModel(m, restPostConfig))
postRestMux.HandleFunc("/rest/discovery/hint", restPostDiscoveryHint)
postRestMux.HandleFunc("/rest/error", restPostError)
postRestMux.HandleFunc("/rest/error/clear", restClearErrors)
postRestMux.HandleFunc("/rest/ignores", withModel(m, restPostIgnores))
postRestMux.HandleFunc("/rest/model/override", withModel(m, restPostOverride))
postRestMux.HandleFunc("/rest/reset", restPostReset)
postRestMux.HandleFunc("/rest/restart", restPostRestart)
postRestMux.HandleFunc("/rest/shutdown", restPostShutdown)
postRestMux.HandleFunc("/rest/upgrade", restPostUpgrade)
postRestMux.HandleFunc("/rest/scan", withModel(m, restPostScan))
// A handler that splits requests between the two above and disables
// caching
restMux := noCacheMiddleware(getPostHandler(getRestMux, postRestMux))
// The main routing handler
mux := http.NewServeMux()
mux.Handle("/rest/", restMux)
mux.HandleFunc("/qr/", getQR)
// Serve compiled in assets unless an asset directory was set (for development)
mux.Handle("/", embeddedStatic(assetDir))
// Wrap everything in CSRF protection. The /rest prefix should be
// protected, other requests will grant cookies.
handler := csrfMiddleware("/rest", cfg.APIKey, mux)
// Add our version as a header to responses
handler = withVersionMiddleware(handler)
// Wrap everything in basic auth, if user/password is set.
if len(cfg.User) > 0 && len(cfg.Password) > 0 {
handler = basicAuthAndSessionMiddleware(cfg, handler)
}
// Redirect to HTTPS if we are supposed to
if cfg.UseTLS {
handler = redirectToHTTPSMiddleware(handler)
}
srv := http.Server{
Handler: handler,
ReadTimeout: 2 * time.Second,
}
go func() {
err := srv.Serve(listener)
if err != nil {
panic(err)
}
}()
return nil
}
func getPostHandler(get, post http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
get.ServeHTTP(w, r)
case "POST":
post.ServeHTTP(w, r)
default:
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
})
}
func redirectToHTTPSMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Add a generous access-control-allow-origin header since we may be
// redirecting REST requests over protocols
w.Header().Add("Access-Control-Allow-Origin", "*")
if r.TLS == nil {
// Redirect HTTP requests to HTTPS
r.URL.Host = r.Host
r.URL.Scheme = "https"
http.Redirect(w, r, r.URL.String(), http.StatusFound)
} else {
h.ServeHTTP(w, r)
}
})
}
func noCacheMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache")
h.ServeHTTP(w, r)
})
}
func withVersionMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Syncthing-Version", Version)
h.ServeHTTP(w, r)
})
}
func withModel(m *model.Model, h func(m *model.Model, w http.ResponseWriter, r *http.Request)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
h(m, w, r)
}
}
func restPing(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]string{
"ping": "pong",
})
}
func restGetVersion(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]string{
"version": Version,
"longVersion": LongVersion,
"os": runtime.GOOS,
"arch": runtime.GOARCH,
})
}
func restGetCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
var deviceStr = qs.Get("device")
device, err := protocol.DeviceIDFromString(deviceStr)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
res := map[string]float64{
"completion": m.Completion(device, folder),
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
var res = make(map[string]interface{})
res["invalid"] = cfg.Folders()[folder].Invalid
globalFiles, globalDeleted, globalBytes := m.GlobalSize(folder)
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
localFiles, localDeleted, localBytes := m.LocalSize(folder)
res["localFiles"], res["localDeleted"], res["localBytes"] = localFiles, localDeleted, localBytes
needFiles, needBytes := m.NeedSize(folder)
res["needFiles"], res["needBytes"] = needFiles, needBytes
res["inSyncFiles"], res["inSyncBytes"] = globalFiles-needFiles, globalBytes-needBytes
res["state"], res["stateChanged"] = m.State(folder)
res["version"] = m.CurrentLocalVersion(folder) + m.RemoteLocalVersion(folder)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restPostOverride(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
go m.Override(folder)
}
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
files := m.NeedFolderFilesLimited(folder, 100, 2500) // max 100 files or 2500 blocks
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(files)
}
func restGetConnections(m *model.Model, w http.ResponseWriter, r *http.Request) {
var res = m.ConnectionStats()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetDeviceStats(m *model.Model, w http.ResponseWriter, r *http.Request) {
var res = m.DeviceStatistics()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetConfig(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(cfg.Raw())
}
func restPostConfig(m *model.Model, w http.ResponseWriter, r *http.Request) {
var newCfg config.Configuration
err := json.NewDecoder(r.Body).Decode(&newCfg)
if err != nil {
l.Warnln("decoding posted config:", err)
http.Error(w, err.Error(), 500)
return
} else {
if newCfg.GUI.Password != cfg.GUI().Password {
if newCfg.GUI.Password != "" {
hash, err := bcrypt.GenerateFromPassword([]byte(newCfg.GUI.Password), 0)
if err != nil {
l.Warnln("bcrypting password:", err)
http.Error(w, err.Error(), 500)
return
} else {
newCfg.GUI.Password = string(hash)
}
}
}
// Start or stop usage reporting as appropriate
if curAcc := cfg.Options().URAccepted; newCfg.Options.URAccepted > curAcc {
// UR was enabled
newCfg.Options.URAccepted = usageReportVersion
err := sendUsageReport(m)
if err != nil {
l.Infoln("Usage report:", err)
}
go usageReportingLoop(m)
} else if newCfg.Options.URAccepted < curAcc {
// UR was disabled
newCfg.Options.URAccepted = -1
stopUsageReporting()
}
// Activate and save
configInSync = !config.ChangeRequiresRestart(cfg.Raw(), newCfg)
cfg.Replace(newCfg)
cfg.Save()
}
}
func restGetConfigInSync(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]bool{"configInSync": configInSync})
}
func restPostRestart(w http.ResponseWriter, r *http.Request) {
flushResponse(`{"ok": "restarting"}`, w)
go restart()
}
func restPostReset(w http.ResponseWriter, r *http.Request) {
flushResponse(`{"ok": "resetting folders"}`, w)
resetFolders()
go restart()
}
func restPostShutdown(w http.ResponseWriter, r *http.Request) {
flushResponse(`{"ok": "shutting down"}`, w)
go shutdown()
}
func flushResponse(s string, w http.ResponseWriter) {
w.Write([]byte(s + "\n"))
f := w.(http.Flusher)
f.Flush()
}
var cpuUsagePercent [10]float64 // The last ten seconds
var cpuUsageLock sync.RWMutex
func restGetSystem(w http.ResponseWriter, r *http.Request) {
var m runtime.MemStats
runtime.ReadMemStats(&m)
tilde, _ := osutil.ExpandTilde("~")
res := make(map[string]interface{})
res["myID"] = myID.String()
res["goroutines"] = runtime.NumGoroutine()
res["alloc"] = m.Alloc
res["sys"] = m.Sys - m.HeapReleased
res["tilde"] = tilde
if cfg.Options().GlobalAnnEnabled && discoverer != nil {
res["extAnnounceOK"] = discoverer.ExtAnnounceOK()
}
cpuUsageLock.RLock()
var cpusum float64
for _, p := range cpuUsagePercent {
cpusum += p
}
cpuUsageLock.RUnlock()
res["cpuPercent"] = cpusum / 10
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetErrors(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
guiErrorsMut.Lock()
json.NewEncoder(w).Encode(map[string][]guiError{"errors": guiErrors})
guiErrorsMut.Unlock()
}
func restPostError(w http.ResponseWriter, r *http.Request) {
bs, _ := ioutil.ReadAll(r.Body)
r.Body.Close()
showGuiError(0, string(bs))
}
func restClearErrors(w http.ResponseWriter, r *http.Request) {
guiErrorsMut.Lock()
guiErrors = []guiError{}
guiErrorsMut.Unlock()
}
func showGuiError(l logger.LogLevel, err string) {
guiErrorsMut.Lock()
guiErrors = append(guiErrors, guiError{time.Now(), err})
if len(guiErrors) > 5 {
guiErrors = guiErrors[len(guiErrors)-5:]
}
guiErrorsMut.Unlock()
}
func restPostDiscoveryHint(w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var device = qs.Get("device")
var addr = qs.Get("addr")
if len(device) != 0 && len(addr) != 0 && discoverer != nil {
discoverer.Hint(device, []string{addr})
}
}
func restGetDiscovery(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
devices := map[string][]discover.CacheEntry{}
if discoverer != nil {
// Device ids can't be marshalled as keys so we need to manually
// rebuild this map using strings. Discoverer may be nil if discovery
// has not started yet.
for device, entries := range discoverer.All() {
devices[device.String()] = entries
}
}
json.NewEncoder(w).Encode(devices)
}
func restGetReport(m *model.Model, w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(reportData(m))
}
func restGetIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
ignores, patterns, err := m.GetIgnores(qs.Get("folder"))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
json.NewEncoder(w).Encode(map[string][]string{
"ignore": ignores,
"patterns": patterns,
})
}
func restPostIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
var data map[string][]string
err := json.NewDecoder(r.Body).Decode(&data)
r.Body.Close()
if err != nil {
http.Error(w, err.Error(), 500)
return
}
err = m.SetIgnores(qs.Get("folder"), data["ignore"])
if err != nil {
http.Error(w, err.Error(), 500)
return
}
restGetIgnores(m, w, r)
}
func restGetEvents(w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
sinceStr := qs.Get("since")
limitStr := qs.Get("limit")
since, _ := strconv.Atoi(sinceStr)
limit, _ := strconv.Atoi(limitStr)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
// Flush before blocking, to indicate that we've received the request
// and that it should not be retried.
f := w.(http.Flusher)
f.Flush()
evs := eventSub.Since(since, nil)
if 0 < limit && limit < len(evs) {
evs = evs[len(evs)-limit:]
}
json.NewEncoder(w).Encode(evs)
}
func restGetUpgrade(w http.ResponseWriter, r *http.Request) {
rel, err := upgrade.LatestRelease(strings.Contains(Version, "-beta"))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
res := make(map[string]interface{})
res["running"] = Version
res["latest"] = rel.Tag
res["newer"] = upgrade.CompareVersions(rel.Tag, Version) == 1
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetDeviceID(w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
idStr := qs.Get("id")
id, err := protocol.DeviceIDFromString(idStr)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if err == nil {
json.NewEncoder(w).Encode(map[string]string{
"id": id.String(),
})
} else {
json.NewEncoder(w).Encode(map[string]string{
"error": err.Error(),
})
}
}
func restGetLang(w http.ResponseWriter, r *http.Request) {
lang := r.Header.Get("Accept-Language")
var langs []string
for _, l := range strings.Split(lang, ",") {
parts := strings.SplitN(l, ";", 2)
langs = append(langs, strings.ToLower(strings.TrimSpace(parts[0])))
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(langs)
}
func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
rel, err := upgrade.LatestRelease(strings.Contains(Version, "-beta"))
if err != nil {
l.Warnln("getting latest release:", err)
http.Error(w, err.Error(), 500)
return
}
if upgrade.CompareVersions(rel.Tag, Version) == 1 {
err = upgrade.UpgradeTo(rel, GoArchExtra)
if err != nil {
l.Warnln("upgrading:", err)
http.Error(w, err.Error(), 500)
return
}
flushResponse(`{"ok": "restarting"}`, w)
l.Infoln("Upgrading")
stop <- exitUpgrading
}
}
func restPostScan(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
folder := qs.Get("folder")
sub := qs.Get("sub")
err := m.ScanFolderSub(folder, sub)
if err != nil {
http.Error(w, err.Error(), 500)
}
}
func getQR(w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var text = qs.Get("text")
code, err := qr.Encode(text, qr.M)
if err != nil {
http.Error(w, "Invalid", 500)
return
}
w.Header().Set("Content-Type", "image/png")
w.Write(code.PNG())
}
func restGetPeerCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
tot := map[string]float64{}
count := map[string]float64{}
for _, folder := range cfg.Folders() {
for _, device := range folder.DeviceIDs() {
deviceStr := device.String()
if m.ConnectedTo(device) {
tot[deviceStr] += m.Completion(device, folder.ID)
} else {
tot[deviceStr] = 0
}
count[deviceStr]++
}
}
comp := map[string]int{}
for device := range tot {
comp[device] = int(tot[device] / count[device])
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(comp)
}
func restGetAutocompleteDirectory(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
qs := r.URL.Query()
current := qs.Get("current")
search, _ := osutil.ExpandTilde(current)
pathSeparator := string(os.PathSeparator)
if strings.HasSuffix(current, pathSeparator) && !strings.HasSuffix(search, pathSeparator) {
search = search + pathSeparator
}
subdirectories, _ := filepath.Glob(search + "*")
ret := make([]string, 0, 10)
for _, subdirectory := range subdirectories {
info, err := os.Stat(subdirectory)
if err == nil && info.IsDir() {
ret = append(ret, subdirectory + pathSeparator)
if len(ret) > 9 {
break
}
}
}
json.NewEncoder(w).Encode(ret)
}
func embeddedStatic(assetDir string) http.Handler {
assets := auto.Assets()
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
file := r.URL.Path
if file[0] == '/' {
file = file[1:]
}
if len(file) == 0 {
file = "index.html"
}
if assetDir != "" {
p := filepath.Join(assetDir, filepath.FromSlash(file))
_, err := os.Stat(p)
if err == nil {
http.ServeFile(w, r, p)
return
}
}
bs, ok := assets[file]
if !ok {
http.NotFound(w, r)
return
}
mtype := mimeTypeForFile(file)
if len(mtype) != 0 {
w.Header().Set("Content-Type", mtype)
}
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
w.Header().Set("Last-Modified", modt)
w.Write(bs)
})
}
func mimeTypeForFile(file string) string {
// We use a built in table of the common types since the system
// TypeByExtension might be unreliable. But if we don't know, we delegate
// to the system.
ext := filepath.Ext(file)
switch ext {
case ".htm", ".html":
return "text/html"
case ".css":
return "text/css"
case ".js":
return "application/javascript"
case ".json":
return "application/json"
case ".png":
return "image/png"
case ".ttf":
return "application/x-font-ttf"
case ".woff":
return "application/x-font-woff"
default:
return mime.TypeByExtension(ext)
}
}
| cmd/syncthing/gui.go | 1 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.9981247782707214,
0.15745119750499725,
0.00016539850912522525,
0.0004044293309561908,
0.335911363363266
] |
{
"id": 0,
"code_window": [
"\n",
"func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {\n",
"\tvar qs = r.URL.Query()\n",
"\tvar folder = qs.Get(\"folder\")\n",
"\n",
"\tfiles := m.NeedFolderFilesLimited(folder, 100, 2500) // max 100 files or 2500 blocks\n",
"\n",
"\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tfiles := m.NeedFolderFilesLimited(folder, 100) // max 100 files\n",
"\t// Convert the struct to a more loose structure, and inject the size.\n",
"\toutput := make([]map[string]interface{}, 0, len(files))\n",
"\tfor _, file := range files {\n",
"\t\toutput = append(output, map[string]interface{}{\n",
"\t\t\t\"Name\": file.Name,\n",
"\t\t\t\"Flags\": file.Flags,\n",
"\t\t\t\"Modified\": file.Modified,\n",
"\t\t\t\"Version\": file.Version,\n",
"\t\t\t\"LocalVersion\": file.LocalVersion,\n",
"\t\t\t\"NumBlocks\": file.NumBlocks,\n",
"\t\t\t\"Size\": protocol.BlocksToSize(file.NumBlocks),\n",
"\t\t})\n",
"\t}\n"
],
"file_path": "cmd/syncthing/gui.go",
"type": "replace",
"edit_start_line_idx": 293
} | // Copyright (c) 2014, Suryandaru Triandana <[email protected]>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"github.com/syndtr/goleveldb/leveldb/errors"
)
var (
ErrNotFound = errors.ErrNotFound
ErrSnapshotReleased = errors.New("leveldb: snapshot released")
ErrIterReleased = errors.New("leveldb: iterator released")
ErrClosed = errors.New("leveldb: closed")
)
| Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.00017611478688195348,
0.00017335425945930183,
0.00017059373203665018,
0.00017335425945930183,
0.0000027605274226516485
] |
{
"id": 0,
"code_window": [
"\n",
"func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {\n",
"\tvar qs = r.URL.Query()\n",
"\tvar folder = qs.Get(\"folder\")\n",
"\n",
"\tfiles := m.NeedFolderFilesLimited(folder, 100, 2500) // max 100 files or 2500 blocks\n",
"\n",
"\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tfiles := m.NeedFolderFilesLimited(folder, 100) // max 100 files\n",
"\t// Convert the struct to a more loose structure, and inject the size.\n",
"\toutput := make([]map[string]interface{}, 0, len(files))\n",
"\tfor _, file := range files {\n",
"\t\toutput = append(output, map[string]interface{}{\n",
"\t\t\t\"Name\": file.Name,\n",
"\t\t\t\"Flags\": file.Flags,\n",
"\t\t\t\"Modified\": file.Modified,\n",
"\t\t\t\"Version\": file.Version,\n",
"\t\t\t\"LocalVersion\": file.LocalVersion,\n",
"\t\t\t\"NumBlocks\": file.NumBlocks,\n",
"\t\t\t\"Size\": protocol.BlocksToSize(file.NumBlocks),\n",
"\t\t})\n",
"\t}\n"
],
"file_path": "cmd/syncthing/gui.go",
"type": "replace",
"edit_start_line_idx": 293
} | // Copyright (c) 2012, Suryandaru Triandana <[email protected]>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// +build go1.3
package leveldb
import (
"sync/atomic"
"testing"
)
func BenchmarkDBReadConcurrent(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
for pb.Next() && iter.Next() {
}
})
}
func BenchmarkDBReadConcurrent2(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
var dir uint32
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
if atomic.AddUint32(&dir, 1)%2 == 0 {
for pb.Next() && iter.Next() {
}
} else {
if pb.Next() && iter.Last() {
for pb.Next() && iter.Prev() {
}
}
}
})
}
| Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.0001765864435583353,
0.0001716554252197966,
0.00016751243674661964,
0.0001720425352687016,
0.0000031454710551770404
] |
{
"id": 0,
"code_window": [
"\n",
"func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {\n",
"\tvar qs = r.URL.Query()\n",
"\tvar folder = qs.Get(\"folder\")\n",
"\n",
"\tfiles := m.NeedFolderFilesLimited(folder, 100, 2500) // max 100 files or 2500 blocks\n",
"\n",
"\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tfiles := m.NeedFolderFilesLimited(folder, 100) // max 100 files\n",
"\t// Convert the struct to a more loose structure, and inject the size.\n",
"\toutput := make([]map[string]interface{}, 0, len(files))\n",
"\tfor _, file := range files {\n",
"\t\toutput = append(output, map[string]interface{}{\n",
"\t\t\t\"Name\": file.Name,\n",
"\t\t\t\"Flags\": file.Flags,\n",
"\t\t\t\"Modified\": file.Modified,\n",
"\t\t\t\"Version\": file.Version,\n",
"\t\t\t\"LocalVersion\": file.LocalVersion,\n",
"\t\t\t\"NumBlocks\": file.NumBlocks,\n",
"\t\t\t\"Size\": protocol.BlocksToSize(file.NumBlocks),\n",
"\t\t})\n",
"\t}\n"
],
"file_path": "cmd/syncthing/gui.go",
"type": "replace",
"edit_start_line_idx": 293
} | package table
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/syndtr/goleveldb/leveldb/testutil"
)
func TestTable(t *testing.T) {
testutil.RunDefer()
RegisterFailHandler(Fail)
RunSpecs(t, "Table Suite")
}
| Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.00017230339290108532,
0.0001702530717011541,
0.00016820276505313814,
0.0001702530717011541,
0.00000205031392397359
] |
{
"id": 1,
"code_window": [
"\n",
"\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n",
"\tjson.NewEncoder(w).Encode(files)\n",
"}\n",
"\n",
"func restGetConnections(m *model.Model, w http.ResponseWriter, r *http.Request) {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tjson.NewEncoder(w).Encode(output)\n"
],
"file_path": "cmd/syncthing/gui.go",
"type": "replace",
"edit_start_line_idx": 296
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"mime"
"net"
"net/http"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"code.google.com/p/go.crypto/bcrypt"
"github.com/calmh/logger"
"github.com/syncthing/syncthing/internal/auto"
"github.com/syncthing/syncthing/internal/config"
"github.com/syncthing/syncthing/internal/discover"
"github.com/syncthing/syncthing/internal/events"
"github.com/syncthing/syncthing/internal/model"
"github.com/syncthing/syncthing/internal/osutil"
"github.com/syncthing/syncthing/internal/protocol"
"github.com/syncthing/syncthing/internal/upgrade"
"github.com/vitrun/qart/qr"
)
type guiError struct {
Time time.Time
Error string
}
var (
configInSync = true
guiErrors = []guiError{}
guiErrorsMut sync.Mutex
modt = time.Now().UTC().Format(http.TimeFormat)
eventSub *events.BufferedSubscription
)
func init() {
l.AddHandler(logger.LevelWarn, showGuiError)
sub := events.Default.Subscribe(events.AllEvents)
eventSub = events.NewBufferedSubscription(sub, 1000)
}
func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) error {
var err error
cert, err := loadCert(confDir, "https-")
if err != nil {
l.Infoln("Loading HTTPS certificate:", err)
l.Infoln("Creating new HTTPS certificate")
newCertificate(confDir, "https-")
cert, err = loadCert(confDir, "https-")
}
if err != nil {
return err
}
tlsCfg := &tls.Config{
Certificates: []tls.Certificate{cert},
ServerName: "syncthing",
}
rawListener, err := net.Listen("tcp", cfg.Address)
if err != nil {
return err
}
listener := &DowngradingListener{rawListener, tlsCfg}
// The GET handlers
getRestMux := http.NewServeMux()
getRestMux.HandleFunc("/rest/ping", restPing)
getRestMux.HandleFunc("/rest/completion", withModel(m, restGetCompletion))
getRestMux.HandleFunc("/rest/config", restGetConfig)
getRestMux.HandleFunc("/rest/config/sync", restGetConfigInSync)
getRestMux.HandleFunc("/rest/connections", withModel(m, restGetConnections))
getRestMux.HandleFunc("/rest/autocomplete/directory", restGetAutocompleteDirectory)
getRestMux.HandleFunc("/rest/discovery", restGetDiscovery)
getRestMux.HandleFunc("/rest/errors", restGetErrors)
getRestMux.HandleFunc("/rest/events", restGetEvents)
getRestMux.HandleFunc("/rest/ignores", withModel(m, restGetIgnores))
getRestMux.HandleFunc("/rest/lang", restGetLang)
getRestMux.HandleFunc("/rest/model", withModel(m, restGetModel))
getRestMux.HandleFunc("/rest/need", withModel(m, restGetNeed))
getRestMux.HandleFunc("/rest/deviceid", restGetDeviceID)
getRestMux.HandleFunc("/rest/report", withModel(m, restGetReport))
getRestMux.HandleFunc("/rest/system", restGetSystem)
getRestMux.HandleFunc("/rest/upgrade", restGetUpgrade)
getRestMux.HandleFunc("/rest/version", restGetVersion)
getRestMux.HandleFunc("/rest/stats/device", withModel(m, restGetDeviceStats))
// Debug endpoints, not for general use
getRestMux.HandleFunc("/rest/debug/peerCompletion", withModel(m, restGetPeerCompletion))
// The POST handlers
postRestMux := http.NewServeMux()
postRestMux.HandleFunc("/rest/ping", restPing)
postRestMux.HandleFunc("/rest/config", withModel(m, restPostConfig))
postRestMux.HandleFunc("/rest/discovery/hint", restPostDiscoveryHint)
postRestMux.HandleFunc("/rest/error", restPostError)
postRestMux.HandleFunc("/rest/error/clear", restClearErrors)
postRestMux.HandleFunc("/rest/ignores", withModel(m, restPostIgnores))
postRestMux.HandleFunc("/rest/model/override", withModel(m, restPostOverride))
postRestMux.HandleFunc("/rest/reset", restPostReset)
postRestMux.HandleFunc("/rest/restart", restPostRestart)
postRestMux.HandleFunc("/rest/shutdown", restPostShutdown)
postRestMux.HandleFunc("/rest/upgrade", restPostUpgrade)
postRestMux.HandleFunc("/rest/scan", withModel(m, restPostScan))
// A handler that splits requests between the two above and disables
// caching
restMux := noCacheMiddleware(getPostHandler(getRestMux, postRestMux))
// The main routing handler
mux := http.NewServeMux()
mux.Handle("/rest/", restMux)
mux.HandleFunc("/qr/", getQR)
// Serve compiled in assets unless an asset directory was set (for development)
mux.Handle("/", embeddedStatic(assetDir))
// Wrap everything in CSRF protection. The /rest prefix should be
// protected, other requests will grant cookies.
handler := csrfMiddleware("/rest", cfg.APIKey, mux)
// Add our version as a header to responses
handler = withVersionMiddleware(handler)
// Wrap everything in basic auth, if user/password is set.
if len(cfg.User) > 0 && len(cfg.Password) > 0 {
handler = basicAuthAndSessionMiddleware(cfg, handler)
}
// Redirect to HTTPS if we are supposed to
if cfg.UseTLS {
handler = redirectToHTTPSMiddleware(handler)
}
srv := http.Server{
Handler: handler,
ReadTimeout: 2 * time.Second,
}
go func() {
err := srv.Serve(listener)
if err != nil {
panic(err)
}
}()
return nil
}
func getPostHandler(get, post http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
get.ServeHTTP(w, r)
case "POST":
post.ServeHTTP(w, r)
default:
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
})
}
func redirectToHTTPSMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Add a generous access-control-allow-origin header since we may be
// redirecting REST requests over protocols
w.Header().Add("Access-Control-Allow-Origin", "*")
if r.TLS == nil {
// Redirect HTTP requests to HTTPS
r.URL.Host = r.Host
r.URL.Scheme = "https"
http.Redirect(w, r, r.URL.String(), http.StatusFound)
} else {
h.ServeHTTP(w, r)
}
})
}
func noCacheMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache")
h.ServeHTTP(w, r)
})
}
func withVersionMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Syncthing-Version", Version)
h.ServeHTTP(w, r)
})
}
func withModel(m *model.Model, h func(m *model.Model, w http.ResponseWriter, r *http.Request)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
h(m, w, r)
}
}
func restPing(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]string{
"ping": "pong",
})
}
func restGetVersion(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]string{
"version": Version,
"longVersion": LongVersion,
"os": runtime.GOOS,
"arch": runtime.GOARCH,
})
}
func restGetCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
var deviceStr = qs.Get("device")
device, err := protocol.DeviceIDFromString(deviceStr)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
res := map[string]float64{
"completion": m.Completion(device, folder),
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
var res = make(map[string]interface{})
res["invalid"] = cfg.Folders()[folder].Invalid
globalFiles, globalDeleted, globalBytes := m.GlobalSize(folder)
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
localFiles, localDeleted, localBytes := m.LocalSize(folder)
res["localFiles"], res["localDeleted"], res["localBytes"] = localFiles, localDeleted, localBytes
needFiles, needBytes := m.NeedSize(folder)
res["needFiles"], res["needBytes"] = needFiles, needBytes
res["inSyncFiles"], res["inSyncBytes"] = globalFiles-needFiles, globalBytes-needBytes
res["state"], res["stateChanged"] = m.State(folder)
res["version"] = m.CurrentLocalVersion(folder) + m.RemoteLocalVersion(folder)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restPostOverride(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
go m.Override(folder)
}
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
files := m.NeedFolderFilesLimited(folder, 100, 2500) // max 100 files or 2500 blocks
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(files)
}
func restGetConnections(m *model.Model, w http.ResponseWriter, r *http.Request) {
var res = m.ConnectionStats()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetDeviceStats(m *model.Model, w http.ResponseWriter, r *http.Request) {
var res = m.DeviceStatistics()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetConfig(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(cfg.Raw())
}
func restPostConfig(m *model.Model, w http.ResponseWriter, r *http.Request) {
var newCfg config.Configuration
err := json.NewDecoder(r.Body).Decode(&newCfg)
if err != nil {
l.Warnln("decoding posted config:", err)
http.Error(w, err.Error(), 500)
return
} else {
if newCfg.GUI.Password != cfg.GUI().Password {
if newCfg.GUI.Password != "" {
hash, err := bcrypt.GenerateFromPassword([]byte(newCfg.GUI.Password), 0)
if err != nil {
l.Warnln("bcrypting password:", err)
http.Error(w, err.Error(), 500)
return
} else {
newCfg.GUI.Password = string(hash)
}
}
}
// Start or stop usage reporting as appropriate
if curAcc := cfg.Options().URAccepted; newCfg.Options.URAccepted > curAcc {
// UR was enabled
newCfg.Options.URAccepted = usageReportVersion
err := sendUsageReport(m)
if err != nil {
l.Infoln("Usage report:", err)
}
go usageReportingLoop(m)
} else if newCfg.Options.URAccepted < curAcc {
// UR was disabled
newCfg.Options.URAccepted = -1
stopUsageReporting()
}
// Activate and save
configInSync = !config.ChangeRequiresRestart(cfg.Raw(), newCfg)
cfg.Replace(newCfg)
cfg.Save()
}
}
func restGetConfigInSync(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]bool{"configInSync": configInSync})
}
func restPostRestart(w http.ResponseWriter, r *http.Request) {
flushResponse(`{"ok": "restarting"}`, w)
go restart()
}
func restPostReset(w http.ResponseWriter, r *http.Request) {
flushResponse(`{"ok": "resetting folders"}`, w)
resetFolders()
go restart()
}
func restPostShutdown(w http.ResponseWriter, r *http.Request) {
flushResponse(`{"ok": "shutting down"}`, w)
go shutdown()
}
func flushResponse(s string, w http.ResponseWriter) {
w.Write([]byte(s + "\n"))
f := w.(http.Flusher)
f.Flush()
}
var cpuUsagePercent [10]float64 // The last ten seconds
var cpuUsageLock sync.RWMutex
func restGetSystem(w http.ResponseWriter, r *http.Request) {
var m runtime.MemStats
runtime.ReadMemStats(&m)
tilde, _ := osutil.ExpandTilde("~")
res := make(map[string]interface{})
res["myID"] = myID.String()
res["goroutines"] = runtime.NumGoroutine()
res["alloc"] = m.Alloc
res["sys"] = m.Sys - m.HeapReleased
res["tilde"] = tilde
if cfg.Options().GlobalAnnEnabled && discoverer != nil {
res["extAnnounceOK"] = discoverer.ExtAnnounceOK()
}
cpuUsageLock.RLock()
var cpusum float64
for _, p := range cpuUsagePercent {
cpusum += p
}
cpuUsageLock.RUnlock()
res["cpuPercent"] = cpusum / 10
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetErrors(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
guiErrorsMut.Lock()
json.NewEncoder(w).Encode(map[string][]guiError{"errors": guiErrors})
guiErrorsMut.Unlock()
}
func restPostError(w http.ResponseWriter, r *http.Request) {
bs, _ := ioutil.ReadAll(r.Body)
r.Body.Close()
showGuiError(0, string(bs))
}
func restClearErrors(w http.ResponseWriter, r *http.Request) {
guiErrorsMut.Lock()
guiErrors = []guiError{}
guiErrorsMut.Unlock()
}
func showGuiError(l logger.LogLevel, err string) {
guiErrorsMut.Lock()
guiErrors = append(guiErrors, guiError{time.Now(), err})
if len(guiErrors) > 5 {
guiErrors = guiErrors[len(guiErrors)-5:]
}
guiErrorsMut.Unlock()
}
func restPostDiscoveryHint(w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var device = qs.Get("device")
var addr = qs.Get("addr")
if len(device) != 0 && len(addr) != 0 && discoverer != nil {
discoverer.Hint(device, []string{addr})
}
}
func restGetDiscovery(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
devices := map[string][]discover.CacheEntry{}
if discoverer != nil {
// Device ids can't be marshalled as keys so we need to manually
// rebuild this map using strings. Discoverer may be nil if discovery
// has not started yet.
for device, entries := range discoverer.All() {
devices[device.String()] = entries
}
}
json.NewEncoder(w).Encode(devices)
}
func restGetReport(m *model.Model, w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(reportData(m))
}
func restGetIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
ignores, patterns, err := m.GetIgnores(qs.Get("folder"))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
json.NewEncoder(w).Encode(map[string][]string{
"ignore": ignores,
"patterns": patterns,
})
}
func restPostIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
var data map[string][]string
err := json.NewDecoder(r.Body).Decode(&data)
r.Body.Close()
if err != nil {
http.Error(w, err.Error(), 500)
return
}
err = m.SetIgnores(qs.Get("folder"), data["ignore"])
if err != nil {
http.Error(w, err.Error(), 500)
return
}
restGetIgnores(m, w, r)
}
func restGetEvents(w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
sinceStr := qs.Get("since")
limitStr := qs.Get("limit")
since, _ := strconv.Atoi(sinceStr)
limit, _ := strconv.Atoi(limitStr)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
// Flush before blocking, to indicate that we've received the request
// and that it should not be retried.
f := w.(http.Flusher)
f.Flush()
evs := eventSub.Since(since, nil)
if 0 < limit && limit < len(evs) {
evs = evs[len(evs)-limit:]
}
json.NewEncoder(w).Encode(evs)
}
func restGetUpgrade(w http.ResponseWriter, r *http.Request) {
rel, err := upgrade.LatestRelease(strings.Contains(Version, "-beta"))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
res := make(map[string]interface{})
res["running"] = Version
res["latest"] = rel.Tag
res["newer"] = upgrade.CompareVersions(rel.Tag, Version) == 1
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetDeviceID(w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
idStr := qs.Get("id")
id, err := protocol.DeviceIDFromString(idStr)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if err == nil {
json.NewEncoder(w).Encode(map[string]string{
"id": id.String(),
})
} else {
json.NewEncoder(w).Encode(map[string]string{
"error": err.Error(),
})
}
}
func restGetLang(w http.ResponseWriter, r *http.Request) {
lang := r.Header.Get("Accept-Language")
var langs []string
for _, l := range strings.Split(lang, ",") {
parts := strings.SplitN(l, ";", 2)
langs = append(langs, strings.ToLower(strings.TrimSpace(parts[0])))
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(langs)
}
func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
rel, err := upgrade.LatestRelease(strings.Contains(Version, "-beta"))
if err != nil {
l.Warnln("getting latest release:", err)
http.Error(w, err.Error(), 500)
return
}
if upgrade.CompareVersions(rel.Tag, Version) == 1 {
err = upgrade.UpgradeTo(rel, GoArchExtra)
if err != nil {
l.Warnln("upgrading:", err)
http.Error(w, err.Error(), 500)
return
}
flushResponse(`{"ok": "restarting"}`, w)
l.Infoln("Upgrading")
stop <- exitUpgrading
}
}
func restPostScan(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
folder := qs.Get("folder")
sub := qs.Get("sub")
err := m.ScanFolderSub(folder, sub)
if err != nil {
http.Error(w, err.Error(), 500)
}
}
func getQR(w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var text = qs.Get("text")
code, err := qr.Encode(text, qr.M)
if err != nil {
http.Error(w, "Invalid", 500)
return
}
w.Header().Set("Content-Type", "image/png")
w.Write(code.PNG())
}
func restGetPeerCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
tot := map[string]float64{}
count := map[string]float64{}
for _, folder := range cfg.Folders() {
for _, device := range folder.DeviceIDs() {
deviceStr := device.String()
if m.ConnectedTo(device) {
tot[deviceStr] += m.Completion(device, folder.ID)
} else {
tot[deviceStr] = 0
}
count[deviceStr]++
}
}
comp := map[string]int{}
for device := range tot {
comp[device] = int(tot[device] / count[device])
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(comp)
}
func restGetAutocompleteDirectory(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
qs := r.URL.Query()
current := qs.Get("current")
search, _ := osutil.ExpandTilde(current)
pathSeparator := string(os.PathSeparator)
if strings.HasSuffix(current, pathSeparator) && !strings.HasSuffix(search, pathSeparator) {
search = search + pathSeparator
}
subdirectories, _ := filepath.Glob(search + "*")
ret := make([]string, 0, 10)
for _, subdirectory := range subdirectories {
info, err := os.Stat(subdirectory)
if err == nil && info.IsDir() {
ret = append(ret, subdirectory + pathSeparator)
if len(ret) > 9 {
break
}
}
}
json.NewEncoder(w).Encode(ret)
}
func embeddedStatic(assetDir string) http.Handler {
assets := auto.Assets()
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
file := r.URL.Path
if file[0] == '/' {
file = file[1:]
}
if len(file) == 0 {
file = "index.html"
}
if assetDir != "" {
p := filepath.Join(assetDir, filepath.FromSlash(file))
_, err := os.Stat(p)
if err == nil {
http.ServeFile(w, r, p)
return
}
}
bs, ok := assets[file]
if !ok {
http.NotFound(w, r)
return
}
mtype := mimeTypeForFile(file)
if len(mtype) != 0 {
w.Header().Set("Content-Type", mtype)
}
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
w.Header().Set("Last-Modified", modt)
w.Write(bs)
})
}
func mimeTypeForFile(file string) string {
// We use a built in table of the common types since the system
// TypeByExtension might be unreliable. But if we don't know, we delegate
// to the system.
ext := filepath.Ext(file)
switch ext {
case ".htm", ".html":
return "text/html"
case ".css":
return "text/css"
case ".js":
return "application/javascript"
case ".json":
return "application/json"
case ".png":
return "image/png"
case ".ttf":
return "application/x-font-ttf"
case ".woff":
return "application/x-font-woff"
default:
return mime.TypeByExtension(ext)
}
}
| cmd/syncthing/gui.go | 1 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.9991111159324646,
0.04350699856877327,
0.00016655967920087278,
0.0004034677112940699,
0.19215571880340576
] |
{
"id": 1,
"code_window": [
"\n",
"\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n",
"\tjson.NewEncoder(w).Encode(files)\n",
"}\n",
"\n",
"func restGetConnections(m *model.Model, w http.ResponseWriter, r *http.Request) {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tjson.NewEncoder(w).Encode(output)\n"
],
"file_path": "cmd/syncthing/gui.go",
"type": "replace",
"edit_start_line_idx": 296
} | // Copyright (c) 2014, Suryandaru Triandana <[email protected]>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package testutil
import (
"fmt"
"math/rand"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/util"
)
func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) {
if rnd == nil {
rnd = NewRand()
}
if p == nil {
BeforeEach(func() {
p = setup(kv)
})
AfterEach(func() {
teardown(p)
})
}
It("Should find all keys with Find", func() {
if db, ok := p.(Find); ok {
ShuffledIndex(nil, kv.Len(), 1, func(i int) {
key_, key, value := kv.IndexInexact(i)
// Using exact key.
rkey, rvalue, err := db.TestFind(key)
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
Expect(rkey).Should(Equal(key), "Key")
Expect(rvalue).Should(Equal(value), "Value for key %q", key)
// Using inexact key.
rkey, rvalue, err = db.TestFind(key_)
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key)
Expect(rkey).Should(Equal(key))
Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key)
})
}
})
It("Should return error if the key is not present", func() {
if db, ok := p.(Find); ok {
var key []byte
if kv.Len() > 0 {
key_, _ := kv.Index(kv.Len() - 1)
key = BytesAfter(key_)
}
rkey, _, err := db.TestFind(key)
Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey)
Expect(err).Should(Equal(errors.ErrNotFound))
}
})
It("Should only find exact key with Get", func() {
if db, ok := p.(Get); ok {
ShuffledIndex(nil, kv.Len(), 1, func(i int) {
key_, key, value := kv.IndexInexact(i)
// Using exact key.
rvalue, err := db.TestGet(key)
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
Expect(rvalue).Should(Equal(value), "Value for key %q", key)
// Using inexact key.
if len(key_) > 0 {
_, err = db.TestGet(key_)
Expect(err).Should(HaveOccurred(), "Error for key %q", key_)
Expect(err).Should(Equal(errors.ErrNotFound))
}
})
}
})
TestIter := func(r *util.Range, _kv KeyValue) {
if db, ok := p.(NewIterator); ok {
iter := db.TestNewIterator(r)
Expect(iter.Error()).ShouldNot(HaveOccurred())
t := IteratorTesting{
KeyValue: _kv,
Iter: iter,
}
DoIteratorTesting(&t)
}
}
It("Should iterates and seeks correctly", func(done Done) {
TestIter(nil, kv.Clone())
done <- true
}, 3.0)
RandomIndex(rnd, kv.Len(), kv.Len(), func(i int) {
type slice struct {
r *util.Range
start, limit int
}
key_, _, _ := kv.IndexInexact(i)
for _, x := range []slice{
{&util.Range{Start: key_, Limit: nil}, i, kv.Len()},
{&util.Range{Start: nil, Limit: key_}, 0, i},
} {
It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) {
TestIter(x.r, kv.Slice(x.start, x.limit))
done <- true
}, 3.0)
}
})
RandomRange(rnd, kv.Len(), kv.Len(), func(start, limit int) {
It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) {
r := kv.Range(start, limit)
TestIter(&r, kv.Slice(start, limit))
done <- true
}, 3.0)
})
}
func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) {
Test := func(kv *KeyValue) func() {
return func() {
var p DB
if body != nil {
p = body(*kv)
}
KeyValueTesting(rnd, *kv, p, setup, teardown)
}
}
Describe("with no key/value (empty)", Test(&KeyValue{}))
Describe("with empty key", Test(KeyValue_EmptyKey()))
Describe("with empty value", Test(KeyValue_EmptyValue()))
Describe("with one key/value", Test(KeyValue_OneKeyValue()))
Describe("with big value", Test(KeyValue_BigValue()))
Describe("with special key", Test(KeyValue_SpecialKey()))
Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue()))
}
| Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.00017618751735426486,
0.00017095997463911772,
0.00016550056170672178,
0.0001711010409053415,
0.0000027560706712392857
] |
{
"id": 1,
"code_window": [
"\n",
"\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n",
"\tjson.NewEncoder(w).Encode(files)\n",
"}\n",
"\n",
"func restGetConnections(m *model.Model, w http.ResponseWriter, r *http.Request) {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tjson.NewEncoder(w).Encode(output)\n"
],
"file_path": "cmd/syncthing/gui.go",
"type": "replace",
"edit_start_line_idx": 296
} | <?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
<svg xmlns="http://www.w3.org/2000/svg">
<metadata></metadata>
<defs>
<font id="glyphicons_halflingsregular" horiz-adv-x="1200" >
<font-face units-per-em="1200" ascent="960" descent="-240" />
<missing-glyph horiz-adv-x="500" />
<glyph />
<glyph />
<glyph unicode="
" />
<glyph unicode=" " />
<glyph unicode="*" d="M100 500v200h259l-183 183l141 141l183 -183v259h200v-259l183 183l141 -141l-183 -183h259v-200h-259l183 -183l-141 -141l-183 183v-259h-200v259l-183 -183l-141 141l183 183h-259z" />
<glyph unicode="+" d="M0 400v300h400v400h300v-400h400v-300h-400v-400h-300v400h-400z" />
<glyph unicode=" " />
<glyph unicode=" " horiz-adv-x="652" />
<glyph unicode=" " horiz-adv-x="1304" />
<glyph unicode=" " horiz-adv-x="652" />
<glyph unicode=" " horiz-adv-x="1304" />
<glyph unicode=" " horiz-adv-x="434" />
<glyph unicode=" " horiz-adv-x="326" />
<glyph unicode=" " horiz-adv-x="217" />
<glyph unicode=" " horiz-adv-x="217" />
<glyph unicode=" " horiz-adv-x="163" />
<glyph unicode=" " horiz-adv-x="260" />
<glyph unicode=" " horiz-adv-x="72" />
<glyph unicode=" " horiz-adv-x="260" />
<glyph unicode=" " horiz-adv-x="326" />
<glyph unicode="€" d="M100 500l100 100h113q0 47 5 100h-218l100 100h135q37 167 112 257q117 141 297 141q242 0 354 -189q60 -103 66 -209h-181q0 55 -25.5 99t-63.5 68t-75 36.5t-67 12.5q-24 0 -52.5 -10t-62.5 -32t-65.5 -67t-50.5 -107h379l-100 -100h-300q-6 -46 -6 -100h406l-100 -100 h-300q9 -74 33 -132t52.5 -91t62 -54.5t59 -29t46.5 -7.5q29 0 66 13t75 37t63.5 67.5t25.5 96.5h174q-31 -172 -128 -278q-107 -117 -274 -117q-205 0 -324 158q-36 46 -69 131.5t-45 205.5h-217z" />
<glyph unicode="−" d="M200 400h900v300h-900v-300z" />
<glyph unicode="☁" d="M-14 494q0 -80 56.5 -137t135.5 -57h750q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5z" />
<glyph unicode="✉" d="M0 100l400 400l200 -200l200 200l400 -400h-1200zM0 300v600l300 -300zM0 1100l600 -603l600 603h-1200zM900 600l300 300v-600z" />
<glyph unicode="✏" d="M-13 -13l333 112l-223 223zM187 403l214 -214l614 614l-214 214zM887 1103l214 -214l99 92q13 13 13 32.5t-13 33.5l-153 153q-15 13 -33 13t-33 -13z" />
<glyph unicode="" horiz-adv-x="500" d="M0 0z" />
<glyph unicode="" d="M0 1200h1200l-500 -550v-550h300v-100h-800v100h300v550z" />
<glyph unicode="" d="M14 84q18 -55 86 -75.5t147 5.5q65 21 109 69t44 90v606l600 155v-521q-64 16 -138 -7q-79 -26 -122.5 -83t-25.5 -111q17 -55 85.5 -75.5t147.5 4.5q70 23 111.5 63.5t41.5 95.5v881q0 10 -7 15.5t-17 2.5l-752 -193q-10 -3 -17 -12.5t-7 -19.5v-689q-64 17 -138 -7 q-79 -25 -122.5 -82t-25.5 -112z" />
<glyph unicode="" d="M23 693q0 200 142 342t342 142t342 -142t142 -342q0 -142 -78 -261l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233z" />
<glyph unicode="" d="M100 784q0 64 28 123t73 100.5t104.5 64t119 20.5t120 -38.5t104.5 -104.5q48 69 109.5 105t121.5 38t118.5 -20.5t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-149.5 152.5t-126.5 127.5 t-94 124.5t-33.5 117.5z" />
<glyph unicode="" d="M-72 800h479l146 400h2l146 -400h472l-382 -278l145 -449l-384 275l-382 -275l146 447zM168 71l2 1z" />
<glyph unicode="" d="M-72 800h479l146 400h2l146 -400h472l-382 -278l145 -449l-384 275l-382 -275l146 447zM168 71l2 1zM237 700l196 -142l-73 -226l192 140l195 -141l-74 229l193 140h-235l-77 211l-78 -211h-239z" />
<glyph unicode="" d="M0 0v143l400 257v100q-37 0 -68.5 74.5t-31.5 125.5v200q0 124 88 212t212 88t212 -88t88 -212v-200q0 -51 -31.5 -125.5t-68.5 -74.5v-100l400 -257v-143h-1200z" />
<glyph unicode="" d="M0 0v1100h1200v-1100h-1200zM100 100h100v100h-100v-100zM100 300h100v100h-100v-100zM100 500h100v100h-100v-100zM100 700h100v100h-100v-100zM100 900h100v100h-100v-100zM300 100h600v400h-600v-400zM300 600h600v400h-600v-400zM1000 100h100v100h-100v-100z M1000 300h100v100h-100v-100zM1000 500h100v100h-100v-100zM1000 700h100v100h-100v-100zM1000 900h100v100h-100v-100z" />
<glyph unicode="" d="M0 50v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5zM0 650v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5zM600 50v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5zM600 650v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400 q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5z" />
<glyph unicode="" d="M0 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM0 450v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200 q-21 0 -35.5 14.5t-14.5 35.5zM0 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5 t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 450v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5 v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 450v200q0 21 14.5 35.5t35.5 14.5h200 q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5z" />
<glyph unicode="" d="M0 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM0 450q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v200q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5 t-14.5 -35.5v-200zM0 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 50v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5 t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5zM400 450v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5zM400 850v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5 v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5z" />
<glyph unicode="" d="M29 454l419 -420l818 820l-212 212l-607 -607l-206 207z" />
<glyph unicode="" d="M106 318l282 282l-282 282l212 212l282 -282l282 282l212 -212l-282 -282l282 -282l-212 -212l-282 282l-282 -282z" />
<glyph unicode="" d="M23 693q0 200 142 342t342 142t342 -142t142 -342q0 -142 -78 -261l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233zM300 600v200h100v100h200v-100h100v-200h-100v-100h-200v100h-100z" />
<glyph unicode="" d="M23 694q0 200 142 342t342 142t342 -142t142 -342q0 -141 -78 -262l300 -299q7 -7 7 -18t-7 -18l-109 -109q-8 -8 -18 -8t-18 8l-300 299q-120 -77 -261 -77q-200 0 -342 142t-142 342zM176 694q0 -136 97 -233t234 -97t233.5 97t96.5 233t-96.5 233t-233.5 97t-234 -97 t-97 -233zM300 601h400v200h-400v-200z" />
<glyph unicode="" d="M23 600q0 183 105 331t272 210v-166q-103 -55 -165 -155t-62 -220q0 -177 125 -302t302 -125t302 125t125 302q0 120 -62 220t-165 155v166q167 -62 272 -210t105 -331q0 -118 -45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5 zM500 750q0 -21 14.5 -35.5t35.5 -14.5h100q21 0 35.5 14.5t14.5 35.5v400q0 21 -14.5 35.5t-35.5 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-400z" />
<glyph unicode="" d="M100 1h200v300h-200v-300zM400 1v500h200v-500h-200zM700 1v800h200v-800h-200zM1000 1v1200h200v-1200h-200z" />
<glyph unicode="" d="M26 601q0 -33 6 -74l151 -38l2 -6q14 -49 38 -93l3 -5l-80 -134q45 -59 105 -105l133 81l5 -3q45 -26 94 -39l5 -2l38 -151q40 -5 74 -5q27 0 74 5l38 151l6 2q46 13 93 39l5 3l134 -81q56 44 104 105l-80 134l3 5q24 44 39 93l1 6l152 38q5 40 5 74q0 28 -5 73l-152 38 l-1 6q-16 51 -39 93l-3 5l80 134q-44 58 -104 105l-134 -81l-5 3q-45 25 -93 39l-6 1l-38 152q-40 5 -74 5q-27 0 -74 -5l-38 -152l-5 -1q-50 -14 -94 -39l-5 -3l-133 81q-59 -47 -105 -105l80 -134l-3 -5q-25 -47 -38 -93l-2 -6l-151 -38q-6 -48 -6 -73zM385 601 q0 88 63 151t152 63t152 -63t63 -151q0 -89 -63 -152t-152 -63t-152 63t-63 152z" />
<glyph unicode="" d="M100 1025v50q0 10 7.5 17.5t17.5 7.5h275v100q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5v-100h275q10 0 17.5 -7.5t7.5 -17.5v-50q0 -11 -7 -18t-18 -7h-1050q-11 0 -18 7t-7 18zM200 100v800h900v-800q0 -41 -29.5 -71t-70.5 -30h-700q-41 0 -70.5 30 t-29.5 71zM300 100h100v700h-100v-700zM500 100h100v700h-100v-700zM500 1100h300v100h-300v-100zM700 100h100v700h-100v-700zM900 100h100v700h-100v-700z" />
<glyph unicode="" d="M1 601l656 644l644 -644h-200v-600h-300v400h-300v-400h-300v600h-200z" />
<glyph unicode="" d="M100 25v1150q0 11 7 18t18 7h475v-500h400v-675q0 -11 -7 -18t-18 -7h-850q-11 0 -18 7t-7 18zM700 800v300l300 -300h-300z" />
<glyph unicode="" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM500 500v400h100 v-300h200v-100h-300z" />
<glyph unicode="" d="M-100 0l431 1200h209l-21 -300h162l-20 300h208l431 -1200h-538l-41 400h-242l-40 -400h-539zM488 500h224l-27 300h-170z" />
<glyph unicode="" d="M0 0v400h490l-290 300h200v500h300v-500h200l-290 -300h490v-400h-1100zM813 200h175v100h-175v-100z" />
<glyph unicode="" d="M1 600q0 122 47.5 233t127.5 191t191 127.5t233 47.5t233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233zM188 600q0 -170 121 -291t291 -121t291 121t121 291t-121 291t-291 121 t-291 -121t-121 -291zM350 600h150v300h200v-300h150l-250 -300z" />
<glyph unicode="" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM350 600l250 300 l250 -300h-150v-300h-200v300h-150z" />
<glyph unicode="" d="M0 25v475l200 700h800q199 -700 200 -700v-475q0 -11 -7 -18t-18 -7h-1150q-11 0 -18 7t-7 18zM200 500h200l50 -200h300l50 200h200l-97 500h-606z" />
<glyph unicode="" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM500 397v401 l297 -200z" />
<glyph unicode="" d="M23 600q0 -118 45.5 -224.5t123 -184t184 -123t224.5 -45.5t224.5 45.5t184 123t123 184t45.5 224.5h-150q0 -177 -125 -302t-302 -125t-302 125t-125 302t125 302t302 125q136 0 246 -81l-146 -146h400v400l-145 -145q-157 122 -355 122q-118 0 -224.5 -45.5t-184 -123 t-123 -184t-45.5 -224.5z" />
<glyph unicode="" d="M23 600q0 118 45.5 224.5t123 184t184 123t224.5 45.5q198 0 355 -122l145 145v-400h-400l147 147q-112 80 -247 80q-177 0 -302 -125t-125 -302h-150zM100 0v400h400l-147 -147q112 -80 247 -80q177 0 302 125t125 302h150q0 -118 -45.5 -224.5t-123 -184t-184 -123 t-224.5 -45.5q-198 0 -355 122z" />
<glyph unicode="" d="M100 0h1100v1200h-1100v-1200zM200 100v900h900v-900h-900zM300 200v100h100v-100h-100zM300 400v100h100v-100h-100zM300 600v100h100v-100h-100zM300 800v100h100v-100h-100zM500 200h500v100h-500v-100zM500 400v100h500v-100h-500zM500 600v100h500v-100h-500z M500 800v100h500v-100h-500z" />
<glyph unicode="" d="M0 100v600q0 41 29.5 70.5t70.5 29.5h100v200q0 82 59 141t141 59h300q82 0 141 -59t59 -141v-200h100q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-900q-41 0 -70.5 29.5t-29.5 70.5zM400 800h300v150q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-150z" />
<glyph unicode="" d="M100 0v1100h100v-1100h-100zM300 400q60 60 127.5 84t127.5 17.5t122 -23t119 -30t110 -11t103 42t91 120.5v500q-40 -81 -101.5 -115.5t-127.5 -29.5t-138 25t-139.5 40t-125.5 25t-103 -29.5t-65 -115.5v-500z" />
<glyph unicode="" d="M0 275q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 127 70.5 231.5t184.5 161.5t245 57t245 -57t184.5 -161.5t70.5 -231.5v-300q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 116 -49.5 227t-131 192.5t-192.5 131t-227 49.5t-227 -49.5t-192.5 -131t-131 -192.5 t-49.5 -227v-300zM200 20v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14zM800 20v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14z" />
<glyph unicode="" d="M0 400h300l300 -200v800l-300 -200h-300v-400zM688 459l141 141l-141 141l71 71l141 -141l141 141l71 -71l-141 -141l141 -141l-71 -71l-141 141l-141 -141z" />
<glyph unicode="" d="M0 400h300l300 -200v800l-300 -200h-300v-400zM700 857l69 53q111 -135 111 -310q0 -169 -106 -302l-67 54q86 110 86 248q0 146 -93 257z" />
<glyph unicode="" d="M0 401v400h300l300 200v-800l-300 200h-300zM702 858l69 53q111 -135 111 -310q0 -170 -106 -303l-67 55q86 110 86 248q0 145 -93 257zM889 951l7 -8q123 -151 123 -344q0 -189 -119 -339l-7 -8l81 -66l6 8q142 178 142 405q0 230 -144 408l-6 8z" />
<glyph unicode="" d="M0 0h500v500h-200v100h-100v-100h-200v-500zM0 600h100v100h400v100h100v100h-100v300h-500v-600zM100 100v300h300v-300h-300zM100 800v300h300v-300h-300zM200 200v100h100v-100h-100zM200 900h100v100h-100v-100zM500 500v100h300v-300h200v-100h-100v-100h-200v100 h-100v100h100v200h-200zM600 0v100h100v-100h-100zM600 1000h100v-300h200v-300h300v200h-200v100h200v500h-600v-200zM800 800v300h300v-300h-300zM900 0v100h300v-100h-300zM900 900v100h100v-100h-100zM1100 200v100h100v-100h-100z" />
<glyph unicode="" d="M0 200h100v1000h-100v-1000zM100 0v100h300v-100h-300zM200 200v1000h100v-1000h-100zM500 0v91h100v-91h-100zM500 200v1000h200v-1000h-200zM700 0v91h100v-91h-100zM800 200v1000h100v-1000h-100zM900 0v91h200v-91h-200zM1000 200v1000h200v-1000h-200z" />
<glyph unicode="" d="M1 700v475q0 10 7.5 17.5t17.5 7.5h474l700 -700l-500 -500zM148 953q0 -42 29 -71q30 -30 71.5 -30t71.5 30q29 29 29 71t-29 71q-30 30 -71.5 30t-71.5 -30q-29 -29 -29 -71z" />
<glyph unicode="" d="M2 700v475q0 11 7 18t18 7h474l700 -700l-500 -500zM148 953q0 -42 30 -71q29 -30 71 -30t71 30q30 29 30 71t-30 71q-29 30 -71 30t-71 -30q-30 -29 -30 -71zM701 1200h100l700 -700l-500 -500l-50 50l450 450z" />
<glyph unicode="" d="M100 0v1025l175 175h925v-1000l-100 -100v1000h-750l-100 -100h750v-1000h-900z" />
<glyph unicode="" d="M200 0l450 444l450 -443v1150q0 20 -14.5 35t-35.5 15h-800q-21 0 -35.5 -15t-14.5 -35v-1151z" />
<glyph unicode="" d="M0 100v700h200l100 -200h600l100 200h200v-700h-200v200h-800v-200h-200zM253 829l40 -124h592l62 124l-94 346q-2 11 -10 18t-18 7h-450q-10 0 -18 -7t-10 -18zM281 24l38 152q2 10 11.5 17t19.5 7h500q10 0 19.5 -7t11.5 -17l38 -152q2 -10 -3.5 -17t-15.5 -7h-600 q-10 0 -15.5 7t-3.5 17z" />
<glyph unicode="" d="M0 200q0 -41 29.5 -70.5t70.5 -29.5h1000q41 0 70.5 29.5t29.5 70.5v600q0 41 -29.5 70.5t-70.5 29.5h-150q-4 8 -11.5 21.5t-33 48t-53 61t-69 48t-83.5 21.5h-200q-41 0 -82 -20.5t-70 -50t-52 -59t-34 -50.5l-12 -20h-150q-41 0 -70.5 -29.5t-29.5 -70.5v-600z M356 500q0 100 72 172t172 72t172 -72t72 -172t-72 -172t-172 -72t-172 72t-72 172zM494 500q0 -44 31 -75t75 -31t75 31t31 75t-31 75t-75 31t-75 -31t-31 -75zM900 700v100h100v-100h-100z" />
<glyph unicode="" d="M53 0h365v66q-41 0 -72 11t-49 38t1 71l92 234h391l82 -222q16 -45 -5.5 -88.5t-74.5 -43.5v-66h417v66q-34 1 -74 43q-18 19 -33 42t-21 37l-6 13l-385 998h-93l-399 -1006q-24 -48 -52 -75q-12 -12 -33 -25t-36 -20l-15 -7v-66zM416 521l178 457l46 -140l116 -317h-340 z" />
<glyph unicode="" d="M100 0v89q41 7 70.5 32.5t29.5 65.5v827q0 28 -1 39.5t-5.5 26t-15.5 21t-29 14t-49 14.5v70h471q120 0 213 -88t93 -228q0 -55 -11.5 -101.5t-28 -74t-33.5 -47.5t-28 -28l-12 -7q8 -3 21.5 -9t48 -31.5t60.5 -58t47.5 -91.5t21.5 -129q0 -84 -59 -156.5t-142 -111 t-162 -38.5h-500zM400 200h161q89 0 153 48.5t64 132.5q0 90 -62.5 154.5t-156.5 64.5h-159v-400zM400 700h139q76 0 130 61.5t54 138.5q0 82 -84 130.5t-239 48.5v-379z" />
<glyph unicode="" d="M200 0v57q77 7 134.5 40.5t65.5 80.5l173 849q10 56 -10 74t-91 37q-6 1 -10.5 2.5t-9.5 2.5v57h425l2 -57q-33 -8 -62 -25.5t-46 -37t-29.5 -38t-17.5 -30.5l-5 -12l-128 -825q-10 -52 14 -82t95 -36v-57h-500z" />
<glyph unicode="" d="M-75 200h75v800h-75l125 167l125 -167h-75v-800h75l-125 -167zM300 900v300h150h700h150v-300h-50q0 29 -8 48.5t-18.5 30t-33.5 15t-39.5 5.5t-50.5 1h-200v-850l100 -50v-100h-400v100l100 50v850h-200q-34 0 -50.5 -1t-40 -5.5t-33.5 -15t-18.5 -30t-8.5 -48.5h-49z " />
<glyph unicode="" d="M33 51l167 125v-75h800v75l167 -125l-167 -125v75h-800v-75zM100 901v300h150h700h150v-300h-50q0 29 -8 48.5t-18 30t-33.5 15t-40 5.5t-50.5 1h-200v-650l100 -50v-100h-400v100l100 50v650h-200q-34 0 -50.5 -1t-39.5 -5.5t-33.5 -15t-18.5 -30t-8 -48.5h-50z" />
<glyph unicode="" d="M0 50q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 350q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5 v-100zM0 650q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1000q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 950q0 -20 14.5 -35t35.5 -15h600q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-600q-21 0 -35.5 -14.5 t-14.5 -35.5v-100z" />
<glyph unicode="" d="M0 50q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 650q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5 v-100zM200 350q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM200 950q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5 t-14.5 -35.5v-100z" />
<glyph unicode="" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM100 650v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1000q-21 0 -35.5 15 t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM500 950v100q0 21 14.5 35.5t35.5 14.5h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-600 q-21 0 -35.5 15t-14.5 35z" />
<glyph unicode="" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 950v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100 q-21 0 -35.5 15t-14.5 35z" />
<glyph unicode="" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM0 950v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15 t-14.5 35zM300 50v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800 q-21 0 -35.5 15t-14.5 35zM300 650v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM300 950v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15 h-800q-21 0 -35.5 15t-14.5 35z" />
<glyph unicode="" d="M-101 500v100h201v75l166 -125l-166 -125v75h-201zM300 0h100v1100h-100v-1100zM500 50q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 350q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35 v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 650q0 -20 14.5 -35t35.5 -15h500q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 950q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35v100 q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100z" />
<glyph unicode="" d="M1 50q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 350q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 650 q0 -20 14.5 -35t35.5 -15h500q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 950q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM801 0v1100h100v-1100 h-100zM934 550l167 -125v75h200v100h-200v75z" />
<glyph unicode="" d="M0 275v650q0 31 22 53t53 22h750q31 0 53 -22t22 -53v-650q0 -31 -22 -53t-53 -22h-750q-31 0 -53 22t-22 53zM900 600l300 300v-600z" />
<glyph unicode="" d="M0 44v1012q0 18 13 31t31 13h1112q19 0 31.5 -13t12.5 -31v-1012q0 -18 -12.5 -31t-31.5 -13h-1112q-18 0 -31 13t-13 31zM100 263l247 182l298 -131l-74 156l293 318l236 -288v500h-1000v-737zM208 750q0 56 39 95t95 39t95 -39t39 -95t-39 -95t-95 -39t-95 39t-39 95z " />
<glyph unicode="" d="M148 745q0 124 60.5 231.5t165 172t226.5 64.5q123 0 227 -63t164.5 -169.5t60.5 -229.5t-73 -272q-73 -114 -166.5 -237t-150.5 -189l-57 -66q-10 9 -27 26t-66.5 70.5t-96 109t-104 135.5t-100.5 155q-63 139 -63 262zM342 772q0 -107 75.5 -182.5t181.5 -75.5 q107 0 182.5 75.5t75.5 182.5t-75.5 182t-182.5 75t-182 -75.5t-75 -181.5z" />
<glyph unicode="" d="M1 600q0 122 47.5 233t127.5 191t191 127.5t233 47.5t233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233zM173 600q0 -177 125.5 -302t301.5 -125v854q-176 0 -301.5 -125 t-125.5 -302z" />
<glyph unicode="" d="M117 406q0 94 34 186t88.5 172.5t112 159t115 177t87.5 194.5q21 -71 57.5 -142.5t76 -130.5t83 -118.5t82 -117t70 -116t50 -125.5t18.5 -136q0 -89 -39 -165.5t-102 -126.5t-140 -79.5t-156 -33.5q-114 6 -211.5 53t-161.5 138.5t-64 210.5zM243 414q14 -82 59.5 -136 t136.5 -80l16 98q-7 6 -18 17t-34 48t-33 77q-15 73 -14 143.5t10 122.5l9 51q-92 -110 -119.5 -185t-12.5 -156z" />
<glyph unicode="" d="M0 400v300q0 165 117.5 282.5t282.5 117.5q366 -6 397 -14l-186 -186h-311q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v125l200 200v-225q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5 t-117.5 282.5zM436 341l161 50l412 412l-114 113l-405 -405zM995 1015l113 -113l113 113l-21 85l-92 28z" />
<glyph unicode="" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h261l2 -80q-133 -32 -218 -120h-145q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5l200 153v-53q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5 zM423 524q30 38 81.5 64t103 35.5t99 14t77.5 3.5l29 -1v-209l360 324l-359 318v-216q-7 0 -19 -1t-48 -8t-69.5 -18.5t-76.5 -37t-76.5 -59t-62 -88t-39.5 -121.5z" />
<glyph unicode="" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q60 0 127 -23l-178 -177h-349q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v69l200 200v-169q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5 t-117.5 282.5zM342 632l283 -284l566 567l-136 137l-430 -431l-147 147z" />
<glyph unicode="" d="M0 603l300 296v-198h200v200h-200l300 300l295 -300h-195v-200h200v198l300 -296l-300 -300v198h-200v-200h195l-295 -300l-300 300h200v200h-200v-198z" />
<glyph unicode="" d="M200 50v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-1100l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5z" />
<glyph unicode="" d="M0 50v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-487l500 487v-1100l-500 488v-488l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5z" />
<glyph unicode="" d="M136 550l564 550v-487l500 487v-1100l-500 488v-488z" />
<glyph unicode="" d="M200 0l900 550l-900 550v-1100z" />
<glyph unicode="" d="M200 150q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v800q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-800zM600 150q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v800q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-800z" />
<glyph unicode="" d="M200 150q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35v800q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5v-800z" />
<glyph unicode="" d="M0 0v1100l500 -487v487l564 -550l-564 -550v488z" />
<glyph unicode="" d="M0 0v1100l500 -487v487l500 -487v437q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-500 -488v488z" />
<glyph unicode="" d="M300 0v1100l500 -487v437q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438z" />
<glyph unicode="" d="M100 250v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5zM100 500h1100l-550 564z" />
<glyph unicode="" d="M185 599l592 -592l240 240l-353 353l353 353l-240 240z" />
<glyph unicode="" d="M272 194l353 353l-353 353l241 240l572 -571l21 -22l-1 -1v-1l-592 -591z" />
<glyph unicode="" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM300 500h200v-200h200v200h200v200h-200v200h-200v-200h-200v-200z" />
<glyph unicode="" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM300 500h600v200h-600v-200z" />
<glyph unicode="" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM246 459l213 -213l141 142l141 -142l213 213l-142 141l142 141l-213 212l-141 -141l-141 142l-212 -213l141 -141z" />
<glyph unicode="" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5zM270 551l276 -277l411 411l-175 174l-236 -236l-102 102z" />
<glyph unicode="" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM363 700h144q4 0 11.5 -1t11 -1t6.5 3t3 9t1 11t3.5 8.5t3.5 6t5.5 4t6.5 2.5t9 1.5t9 0.5h11.5h12.5q19 0 30 -10t11 -26 q0 -22 -4 -28t-27 -22q-5 -1 -12.5 -3t-27 -13.5t-34 -27t-26.5 -46t-11 -68.5h200q5 3 14 8t31.5 25.5t39.5 45.5t31 69t14 94q0 51 -17.5 89t-42 58t-58.5 32t-58.5 15t-51.5 3q-105 0 -172 -56t-67 -183zM500 300h200v100h-200v-100z" />
<glyph unicode="" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM400 300h400v100h-100v300h-300v-100h100v-200h-100v-100zM500 800h200v100h-200v-100z" />
<glyph unicode="" d="M0 500v200h194q15 60 36 104.5t55.5 86t88 69t126.5 40.5v200h200v-200q54 -20 113 -60t112.5 -105.5t71.5 -134.5h203v-200h-203q-25 -102 -116.5 -186t-180.5 -117v-197h-200v197q-140 27 -208 102.5t-98 200.5h-194zM290 500q24 -73 79.5 -127.5t130.5 -78.5v206h200 v-206q149 48 201 206h-201v200h200q-25 74 -76 127.5t-124 76.5v-204h-200v203q-75 -24 -130 -77.5t-79 -125.5h209v-200h-210z" />
<glyph unicode="" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM356 465l135 135 l-135 135l109 109l135 -135l135 135l109 -109l-135 -135l135 -135l-109 -109l-135 135l-135 -135z" />
<glyph unicode="" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM322 537l141 141 l87 -87l204 205l142 -142l-346 -345z" />
<glyph unicode="" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -115 62 -215l568 567q-100 62 -216 62q-171 0 -292.5 -121.5t-121.5 -292.5zM391 245q97 -59 209 -59q171 0 292.5 121.5t121.5 292.5 q0 112 -59 209z" />
<glyph unicode="" d="M0 547l600 453v-300h600v-300h-600v-301z" />
<glyph unicode="" d="M0 400v300h600v300l600 -453l-600 -448v301h-600z" />
<glyph unicode="" d="M204 600l450 600l444 -600h-298v-600h-300v600h-296z" />
<glyph unicode="" d="M104 600h296v600h300v-600h298l-449 -600z" />
<glyph unicode="" d="M0 200q6 132 41 238.5t103.5 193t184 138t271.5 59.5v271l600 -453l-600 -448v301q-95 -2 -183 -20t-170 -52t-147 -92.5t-100 -135.5z" />
<glyph unicode="" d="M0 0v400l129 -129l294 294l142 -142l-294 -294l129 -129h-400zM635 777l142 -142l294 294l129 -129v400h-400l129 -129z" />
<glyph unicode="" d="M34 176l295 295l-129 129h400v-400l-129 130l-295 -295zM600 600v400l129 -129l295 295l142 -141l-295 -295l129 -130h-400z" />
<glyph unicode="" d="M23 600q0 118 45.5 224.5t123 184t184 123t224.5 45.5t224.5 -45.5t184 -123t123 -184t45.5 -224.5t-45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5zM456 851l58 -302q4 -20 21.5 -34.5t37.5 -14.5h54q20 0 37.5 14.5 t21.5 34.5l58 302q4 20 -8 34.5t-33 14.5h-207q-20 0 -32 -14.5t-8 -34.5zM500 300h200v100h-200v-100z" />
<glyph unicode="" d="M0 800h100v-200h400v300h200v-300h400v200h100v100h-111v6t-1 15t-3 18l-34 172q-11 39 -41.5 63t-69.5 24q-32 0 -61 -17l-239 -144q-22 -13 -40 -35q-19 24 -40 36l-238 144q-33 18 -62 18q-39 0 -69.5 -23t-40.5 -61l-35 -177q-2 -8 -3 -18t-1 -15v-6h-111v-100z M100 0h400v400h-400v-400zM200 900q-3 0 14 48t35 96l18 47l214 -191h-281zM700 0v400h400v-400h-400zM731 900l202 197q5 -12 12 -32.5t23 -64t25 -72t7 -28.5h-269z" />
<glyph unicode="" d="M0 -22v143l216 193q-9 53 -13 83t-5.5 94t9 113t38.5 114t74 124q47 60 99.5 102.5t103 68t127.5 48t145.5 37.5t184.5 43.5t220 58.5q0 -189 -22 -343t-59 -258t-89 -181.5t-108.5 -120t-122 -68t-125.5 -30t-121.5 -1.5t-107.5 12.5t-87.5 17t-56.5 7.5l-99 -55z M238.5 300.5q19.5 -6.5 86.5 76.5q55 66 367 234q70 38 118.5 69.5t102 79t99 111.5t86.5 148q22 50 24 60t-6 19q-7 5 -17 5t-26.5 -14.5t-33.5 -39.5q-35 -51 -113.5 -108.5t-139.5 -89.5l-61 -32q-369 -197 -458 -401q-48 -111 -28.5 -117.5z" />
<glyph unicode="" d="M111 408q0 -33 5 -63q9 -56 44 -119.5t105 -108.5q31 -21 64 -16t62 23.5t57 49.5t48 61.5t35 60.5q32 66 39 184.5t-13 157.5q79 -80 122 -164t26 -184q-5 -33 -20.5 -69.5t-37.5 -80.5q-10 -19 -14.5 -29t-12 -26t-9 -23.5t-3 -19t2.5 -15.5t11 -9.5t19.5 -5t30.5 2.5 t42 8q57 20 91 34t87.5 44.5t87 64t65.5 88.5t47 122q38 172 -44.5 341.5t-246.5 278.5q22 -44 43 -129q39 -159 -32 -154q-15 2 -33 9q-79 33 -120.5 100t-44 175.5t48.5 257.5q-13 -8 -34 -23.5t-72.5 -66.5t-88.5 -105.5t-60 -138t-8 -166.5q2 -12 8 -41.5t8 -43t6 -39.5 t3.5 -39.5t-1 -33.5t-6 -31.5t-13.5 -24t-21 -20.5t-31 -12q-38 -10 -67 13t-40.5 61.5t-15 81.5t10.5 75q-52 -46 -83.5 -101t-39 -107t-7.5 -85z" />
<glyph unicode="" d="M-61 600l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5t145.5 -23.5t132.5 -59t116.5 -83.5t97 -90t74.5 -85.5t49 -63.5t20 -30l26 -40l-26 -40q-6 -10 -20 -30t-49 -63.5t-74.5 -85.5t-97 -90t-116.5 -83.5t-132.5 -59t-145.5 -23.5 t-145.5 23.5t-132.5 59t-116.5 83.5t-97 90t-74.5 85.5t-49 63.5t-20 30zM120 600q7 -10 40.5 -58t56 -78.5t68 -77.5t87.5 -75t103 -49.5t125 -21.5t123.5 20t100.5 45.5t85.5 71.5t66.5 75.5t58 81.5t47 66q-1 1 -28.5 37.5t-42 55t-43.5 53t-57.5 63.5t-58.5 54 q49 -74 49 -163q0 -124 -88 -212t-212 -88t-212 88t-88 212q0 85 46 158q-102 -87 -226 -258zM377 656q49 -124 154 -191l105 105q-37 24 -75 72t-57 84l-20 36z" />
<glyph unicode="" d="M-61 600l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5q61 0 121 -17l37 142h148l-314 -1200h-148l37 143q-82 21 -165 71.5t-140 102t-109.5 112t-72 88.5t-29.5 43zM120 600q210 -282 393 -336l37 141q-107 18 -178.5 101.5t-71.5 193.5 q0 85 46 158q-102 -87 -226 -258zM377 656q49 -124 154 -191l47 47l23 87q-30 28 -59 69t-44 68l-14 26zM780 161l38 145q22 15 44.5 34t46 44t40.5 44t41 50.5t33.5 43.5t33 44t24.5 34q-97 127 -140 175l39 146q67 -54 131.5 -125.5t87.5 -103.5t36 -52l26 -40l-26 -40 q-7 -12 -25.5 -38t-63.5 -79.5t-95.5 -102.5t-124 -100t-146.5 -79z" />
<glyph unicode="" d="M-97.5 34q13.5 -34 50.5 -34h1294q37 0 50.5 35.5t-7.5 67.5l-642 1056q-20 33 -48 36t-48 -29l-642 -1066q-21 -32 -7.5 -66zM155 200l445 723l445 -723h-345v100h-200v-100h-345zM500 600l100 -300l100 300v100h-200v-100z" />
<glyph unicode="" d="M100 262v41q0 20 11 44.5t26 38.5l363 325v339q0 62 44 106t106 44t106 -44t44 -106v-339l363 -325q15 -14 26 -38.5t11 -44.5v-41q0 -20 -12 -26.5t-29 5.5l-359 249v-263q100 -91 100 -113v-64q0 -21 -13 -29t-32 1l-94 78h-222l-94 -78q-19 -9 -32 -1t-13 29v64 q0 22 100 113v263l-359 -249q-17 -12 -29 -5.5t-12 26.5z" />
<glyph unicode="" d="M0 50q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v750h-1100v-750zM0 900h1100v150q0 21 -14.5 35.5t-35.5 14.5h-150v100h-100v-100h-500v100h-100v-100h-150q-21 0 -35.5 -14.5t-14.5 -35.5v-150zM100 100v100h100v-100h-100zM100 300v100h100v-100h-100z M100 500v100h100v-100h-100zM300 100v100h100v-100h-100zM300 300v100h100v-100h-100zM300 500v100h100v-100h-100zM500 100v100h100v-100h-100zM500 300v100h100v-100h-100zM500 500v100h100v-100h-100zM700 100v100h100v-100h-100zM700 300v100h100v-100h-100zM700 500 v100h100v-100h-100zM900 100v100h100v-100h-100zM900 300v100h100v-100h-100zM900 500v100h100v-100h-100z" />
<glyph unicode="" d="M0 200v200h259l600 600h241v198l300 -295l-300 -300v197h-159l-600 -600h-341zM0 800h259l122 -122l141 142l-181 180h-341v-200zM678 381l141 142l122 -123h159v198l300 -295l-300 -300v197h-241z" />
<glyph unicode="" d="M0 400v600q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-596l-304 -300v300h-100q-41 0 -70.5 29.5t-29.5 70.5z" />
<glyph unicode="" d="M100 600v200h300v-250q0 -113 6 -145q17 -92 102 -117q39 -11 92 -11q37 0 66.5 5.5t50 15.5t36 24t24 31.5t14 37.5t7 42t2.5 45t0 47v25v250h300v-200q0 -42 -3 -83t-15 -104t-31.5 -116t-58 -109.5t-89 -96.5t-129 -65.5t-174.5 -25.5t-174.5 25.5t-129 65.5t-89 96.5 t-58 109.5t-31.5 116t-15 104t-3 83zM100 900v300h300v-300h-300zM800 900v300h300v-300h-300z" />
<glyph unicode="" d="M-30 411l227 -227l352 353l353 -353l226 227l-578 579z" />
<glyph unicode="" d="M70 797l580 -579l578 579l-226 227l-353 -353l-352 353z" />
<glyph unicode="" d="M-198 700l299 283l300 -283h-203v-400h385l215 -200h-800v600h-196zM402 1000l215 -200h381v-400h-198l299 -283l299 283h-200v600h-796z" />
<glyph unicode="" d="M18 939q-5 24 10 42q14 19 39 19h896l38 162q5 17 18.5 27.5t30.5 10.5h94q20 0 35 -14.5t15 -35.5t-15 -35.5t-35 -14.5h-54l-201 -961q-2 -4 -6 -10.5t-19 -17.5t-33 -11h-31v-50q0 -20 -14.5 -35t-35.5 -15t-35.5 15t-14.5 35v50h-300v-50q0 -20 -14.5 -35t-35.5 -15 t-35.5 15t-14.5 35v50h-50q-21 0 -35.5 15t-14.5 35q0 21 14.5 35.5t35.5 14.5h535l48 200h-633q-32 0 -54.5 21t-27.5 43z" />
<glyph unicode="" d="M0 0v800h1200v-800h-1200zM0 900v100h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-100h-1200z" />
<glyph unicode="" d="M1 0l300 700h1200l-300 -700h-1200zM1 400v600h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-200h-1000z" />
<glyph unicode="" d="M302 300h198v600h-198l298 300l298 -300h-198v-600h198l-298 -300z" />
<glyph unicode="" d="M0 600l300 298v-198h600v198l300 -298l-300 -297v197h-600v-197z" />
<glyph unicode="" d="M0 100v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM31 400l172 739q5 22 23 41.5t38 19.5h672q19 0 37.5 -22.5t23.5 -45.5l172 -732h-1138zM800 100h100v100h-100v-100z M1000 100h100v100h-100v-100z" />
<glyph unicode="" d="M-101 600v50q0 24 25 49t50 38l25 13v-250l-11 5.5t-24 14t-30 21.5t-24 27.5t-11 31.5zM99 500v250v5q0 13 0.5 18.5t2.5 13t8 10.5t15 3h200l675 250v-850l-675 200h-38l47 -276q2 -12 -3 -17.5t-11 -6t-21 -0.5h-8h-83q-20 0 -34.5 14t-18.5 35q-56 337 -56 351z M1100 200v850q0 21 14.5 35.5t35.5 14.5q20 0 35 -14.5t15 -35.5v-850q0 -20 -15 -35t-35 -15q-21 0 -35.5 15t-14.5 35z" />
<glyph unicode="" d="M74 350q0 21 13.5 35.5t33.5 14.5h17l118 173l63 327q15 77 76 140t144 83l-18 32q-6 19 3 32t29 13h94q20 0 29 -10.5t3 -29.5l-18 -37q83 -19 144 -82.5t76 -140.5l63 -327l118 -173h17q20 0 33.5 -14.5t13.5 -35.5q0 -20 -13 -40t-31 -27q-22 -9 -63 -23t-167.5 -37 t-251.5 -23t-245.5 20.5t-178.5 41.5l-58 20q-18 7 -31 27.5t-13 40.5zM497 110q12 -49 40 -79.5t63 -30.5t63 30.5t39 79.5q-48 -6 -102 -6t-103 6z" />
<glyph unicode="" d="M21 445l233 -45l-78 -224l224 78l45 -233l155 179l155 -179l45 233l224 -78l-78 224l234 45l-180 155l180 156l-234 44l78 225l-224 -78l-45 233l-155 -180l-155 180l-45 -233l-224 78l78 -225l-233 -44l179 -156z" />
<glyph unicode="" d="M0 200h200v600h-200v-600zM300 275q0 -75 100 -75h61q123 -100 139 -100h250q46 0 83 57l238 344q29 31 29 74v100q0 44 -30.5 84.5t-69.5 40.5h-328q28 118 28 125v150q0 44 -30.5 84.5t-69.5 40.5h-50q-27 0 -51 -20t-38 -48l-96 -198l-145 -196q-20 -26 -20 -63v-400z M400 300v375l150 212l100 213h50v-175l-50 -225h450v-125l-250 -375h-214l-136 100h-100z" />
<glyph unicode="" d="M0 400v600h200v-600h-200zM300 525v400q0 75 100 75h61q123 100 139 100h250q46 0 83 -57l238 -344q29 -31 29 -74v-100q0 -44 -30.5 -84.5t-69.5 -40.5h-328q28 -118 28 -125v-150q0 -44 -30.5 -84.5t-69.5 -40.5h-50q-27 0 -51 20t-38 48l-96 198l-145 196 q-20 26 -20 63zM400 525l150 -212l100 -213h50v175l-50 225h450v125l-250 375h-214l-136 -100h-100v-375z" />
<glyph unicode="" d="M8 200v600h200v-600h-200zM308 275v525q0 17 14 35.5t28 28.5l14 9l362 230q14 6 25 6q17 0 29 -12l109 -112q14 -14 14 -34q0 -18 -11 -32l-85 -121h302q85 0 138.5 -38t53.5 -110t-54.5 -111t-138.5 -39h-107l-130 -339q-7 -22 -20.5 -41.5t-28.5 -19.5h-341 q-7 0 -90 81t-83 94zM408 289l100 -89h293l131 339q6 21 19.5 41t28.5 20h203q16 0 25 15t9 36q0 20 -9 34.5t-25 14.5h-457h-6.5h-7.5t-6.5 0.5t-6 1t-5 1.5t-5.5 2.5t-4 4t-4 5.5q-5 12 -5 20q0 14 10 27l147 183l-86 83l-339 -236v-503z" />
<glyph unicode="" d="M-101 651q0 72 54 110t139 37h302l-85 121q-11 16 -11 32q0 21 14 34l109 113q13 12 29 12q11 0 25 -6l365 -230q7 -4 16.5 -10.5t26 -26t16.5 -36.5v-526q0 -13 -85.5 -93.5t-93.5 -80.5h-342q-15 0 -28.5 20t-19.5 41l-131 339h-106q-84 0 -139 39t-55 111zM-1 601h222 q15 0 28.5 -20.5t19.5 -40.5l131 -339h293l106 89v502l-342 237l-87 -83l145 -184q10 -11 10 -26q0 -11 -5 -20q-1 -3 -3.5 -5.5l-4 -4t-5 -2.5t-5.5 -1.5t-6.5 -1t-6.5 -0.5h-7.5h-6.5h-476v-100zM999 201v600h200v-600h-200z" />
<glyph unicode="" d="M97 719l230 -363q4 -6 10.5 -15.5t26 -25t36.5 -15.5h525q13 0 94 83t81 90v342q0 15 -20 28.5t-41 19.5l-339 131v106q0 84 -39 139t-111 55t-110 -53.5t-38 -138.5v-302l-121 84q-15 12 -33.5 11.5t-32.5 -13.5l-112 -110q-22 -22 -6 -53zM172 739l83 86l183 -146 q22 -18 47 -5q3 1 5.5 3.5l4 4t2.5 5t1.5 5.5t1 6.5t0.5 6v7.5v7v456q0 22 25 31t50 -0.5t25 -30.5v-202q0 -16 20 -29.5t41 -19.5l339 -130v-294l-89 -100h-503zM400 0v200h600v-200h-600z" />
<glyph unicode="" d="M1 585q-15 -31 7 -53l112 -110q13 -13 32 -13.5t34 10.5l121 85l-1 -302q0 -84 38.5 -138t110.5 -54t111 55t39 139v106l339 131q20 6 40.5 19.5t20.5 28.5v342q0 7 -81 90t-94 83h-525q-17 0 -35.5 -14t-28.5 -28l-10 -15zM76 565l237 339h503l89 -100v-294l-340 -130 q-20 -6 -40 -20t-20 -29v-202q0 -22 -25 -31t-50 0t-25 31v456v14.5t-1.5 11.5t-5 12t-9.5 7q-24 13 -46 -5l-184 -146zM305 1104v200h600v-200h-600z" />
<glyph unicode="" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q162 0 299.5 -80t217.5 -218t80 -300t-80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 500h300l-2 -194l402 294l-402 298v-197h-298v-201z" />
<glyph unicode="" d="M0 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t231.5 47.5q122 0 232.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-218 -217.5t-300 -80t-299.5 80t-217.5 217.5t-80 299.5zM200 600l400 -294v194h302v201h-300v197z" />
<glyph unicode="" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 600h200v-300h200v300h200l-300 400z" />
<glyph unicode="" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 600l300 -400l300 400h-200v300h-200v-300h-200z" />
<glyph unicode="" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM254 780q-8 -34 5.5 -93t7.5 -87q0 -9 17 -44t16 -60q12 0 23 -5.5 t23 -15t20 -13.5q20 -10 108 -42q22 -8 53 -31.5t59.5 -38.5t57.5 -11q8 -18 -15 -55.5t-20 -57.5q12 -21 22.5 -34.5t28 -27t36.5 -17.5q0 -6 -3 -15.5t-3.5 -14.5t4.5 -17q101 -2 221 111q31 30 47 48t34 49t21 62q-14 9 -37.5 9.5t-35.5 7.5q-14 7 -49 15t-52 19 q-9 0 -39.5 -0.5t-46.5 -1.5t-39 -6.5t-39 -16.5q-50 -35 -66 -12q-4 2 -3.5 25.5t0.5 25.5q-6 13 -26.5 17t-24.5 7q2 22 -2 41t-16.5 28t-38.5 -20q-23 -25 -42 4q-19 28 -8 58q8 16 22 22q6 -1 26 -1.5t33.5 -4.5t19.5 -13q12 -19 32 -37.5t34 -27.5l14 -8q0 3 9.5 39.5 t5.5 57.5q-4 23 14.5 44.5t22.5 31.5q5 14 10 35t8.5 31t15.5 22.5t34 21.5q-6 18 10 37q8 0 23.5 -1.5t24.5 -1.5t20.5 4.5t20.5 15.5q-10 23 -30.5 42.5t-38 30t-49 26.5t-43.5 23q11 41 1 44q31 -13 58.5 -14.5t39.5 3.5l11 4q6 36 -17 53.5t-64 28.5t-56 23 q-19 -3 -37 0q-15 -12 -36.5 -21t-34.5 -12t-44 -8t-39 -6q-15 -3 -46 0t-45 -3q-20 -6 -51.5 -25.5t-34.5 -34.5q-3 -11 6.5 -22.5t8.5 -18.5q-3 -34 -27.5 -91t-29.5 -79zM518 915q3 12 16 30.5t16 25.5q10 -10 18.5 -10t14 6t14.5 14.5t16 12.5q0 -18 8 -42.5t16.5 -44 t9.5 -23.5q-6 1 -39 5t-53.5 10t-36.5 16z" />
<glyph unicode="" d="M0 164.5q0 21.5 15 37.5l600 599q-33 101 6 201.5t135 154.5q164 92 306 -9l-259 -138l145 -232l251 126q13 -175 -151 -267q-123 -70 -253 -23l-596 -596q-15 -16 -36.5 -16t-36.5 16l-111 110q-15 15 -15 36.5z" />
<glyph unicode="" horiz-adv-x="1220" d="M0 196v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM0 596v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5zM0 996v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM600 596h500v100h-500v-100zM800 196h300v100h-300v-100zM900 996h200v100h-200v-100z" />
<glyph unicode="" d="M100 1100v100h1000v-100h-1000zM150 1000h900l-350 -500v-300l-200 -200v500z" />
<glyph unicode="" d="M0 200v200h1200v-200q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM0 500v400q0 41 29.5 70.5t70.5 29.5h300v100q0 41 29.5 70.5t70.5 29.5h200q41 0 70.5 -29.5t29.5 -70.5v-100h300q41 0 70.5 -29.5t29.5 -70.5v-400h-500v100h-200v-100h-500z M500 1000h200v100h-200v-100z" />
<glyph unicode="" d="M0 0v400l129 -129l200 200l142 -142l-200 -200l129 -129h-400zM0 800l129 129l200 -200l142 142l-200 200l129 129h-400v-400zM729 329l142 142l200 -200l129 129v-400h-400l129 129zM729 871l200 200l-129 129h400v-400l-129 129l-200 -200z" />
<glyph unicode="" d="M0 596q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM182 596q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM291 655 q0 23 15.5 38.5t38.5 15.5t39 -16t16 -38q0 -23 -16 -39t-39 -16q-22 0 -38 16t-16 39zM400 850q0 22 16 38.5t39 16.5q22 0 38 -16t16 -39t-16 -39t-38 -16q-23 0 -39 16.5t-16 38.5zM513 609q0 32 21 56.5t52 29.5l122 126l1 1q-9 14 -9 28q0 22 16 38.5t39 16.5 q22 0 38 -16t16 -39t-16 -39t-38 -16q-16 0 -29 10l-55 -145q17 -22 17 -51q0 -36 -25.5 -61.5t-61.5 -25.5q-37 0 -62.5 25.5t-25.5 61.5zM800 655q0 22 16 38t39 16t38.5 -15.5t15.5 -38.5t-16 -39t-38 -16q-23 0 -39 16t-16 39z" />
<glyph unicode="" d="M-40 375q-13 -95 35 -173q35 -57 94 -89t129 -32q63 0 119 28q33 16 65 40.5t52.5 45.5t59.5 64q40 44 57 61l394 394q35 35 47 84t-3 96q-27 87 -117 104q-20 2 -29 2q-46 0 -79.5 -17t-67.5 -51l-388 -396l-7 -7l69 -67l377 373q20 22 39 38q23 23 50 23q38 0 53 -36 q16 -39 -20 -75l-547 -547q-52 -52 -125 -52q-55 0 -100 33t-54 96q-5 35 2.5 66t31.5 63t42 50t56 54q24 21 44 41l348 348q52 52 82.5 79.5t84 54t107.5 26.5q25 0 48 -4q95 -17 154 -94.5t51 -175.5q-7 -101 -98 -192l-252 -249l-253 -256l7 -7l69 -60l517 511 q67 67 95 157t11 183q-16 87 -67 154t-130 103q-69 33 -152 33q-107 0 -197 -55q-40 -24 -111 -95l-512 -512q-68 -68 -81 -163z" />
<glyph unicode="" d="M79 784q0 131 99 229.5t230 98.5q144 0 242 -129q103 129 245 129q130 0 227 -98.5t97 -229.5q0 -46 -17.5 -91t-61 -99t-77 -89.5t-104.5 -105.5q-197 -191 -293 -322l-17 -23l-16 23q-43 58 -100 122.5t-92 99.5t-101 100l-84.5 84.5t-68 74t-60 78t-33.5 70.5t-15 78z M250 784q0 -27 30.5 -70t61.5 -75.5t95 -94.5l22 -22q93 -90 190 -201q82 92 195 203l12 12q64 62 97.5 97t64.5 79t31 72q0 71 -48 119.5t-106 48.5q-73 0 -131 -83l-118 -171l-114 174q-51 80 -124 80q-59 0 -108.5 -49.5t-49.5 -118.5z" />
<glyph unicode="" d="M57 353q0 -94 66 -160l141 -141q66 -66 159 -66q95 0 159 66l283 283q66 66 66 159t-66 159l-141 141q-12 12 -19 17l-105 -105l212 -212l-389 -389l-247 248l95 95l-18 18q-46 45 -75 101l-55 -55q-66 -66 -66 -159zM269 706q0 -93 66 -159l141 -141l19 -17l105 105 l-212 212l389 389l247 -247l-95 -96l18 -18q46 -46 77 -99l29 29q35 35 62.5 88t27.5 96q0 93 -66 159l-141 141q-66 66 -159 66q-95 0 -159 -66l-283 -283q-66 -64 -66 -159z" />
<glyph unicode="" d="M200 100v953q0 21 30 46t81 48t129 38t163 15t162 -15t127 -38t79 -48t29 -46v-953q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-41 0 -70.5 29.5t-29.5 70.5zM300 300h600v700h-600v-700zM496 150q0 -43 30.5 -73.5t73.5 -30.5t73.5 30.5t30.5 73.5t-30.5 73.5t-73.5 30.5 t-73.5 -30.5t-30.5 -73.5z" />
<glyph unicode="" d="M0 0l303 380l207 208l-210 212h300l267 279l-35 36q-15 14 -15 35t15 35q14 15 35 15t35 -15l283 -282q15 -15 15 -36t-15 -35q-14 -15 -35 -15t-35 15l-36 35l-279 -267v-300l-212 210l-208 -207z" />
<glyph unicode="" d="M295 433h139q5 -77 48.5 -126.5t117.5 -64.5v335l-27 7q-46 14 -79 26.5t-72 36t-62.5 52t-40 72.5t-16.5 99q0 92 44 159.5t109 101t144 40.5v78h100v-79q38 -4 72.5 -13.5t75.5 -31.5t71 -53.5t51.5 -84t24.5 -118.5h-159q-8 72 -35 109.5t-101 50.5v-307l64 -14 q34 -7 64 -16.5t70 -31.5t67.5 -52t47.5 -80.5t20 -112.5q0 -139 -89 -224t-244 -96v-77h-100v78q-152 17 -237 104q-40 40 -52.5 93.5t-15.5 139.5zM466 889q0 -29 8 -51t16.5 -34t29.5 -22.5t31 -13.5t38 -10q7 -2 11 -3v274q-61 -8 -97.5 -37.5t-36.5 -102.5zM700 237 q170 18 170 151q0 64 -44 99.5t-126 60.5v-311z" />
<glyph unicode="" d="M100 600v100h166q-24 49 -44 104q-10 26 -14.5 55.5t-3 72.5t25 90t68.5 87q97 88 263 88q129 0 230 -89t101 -208h-153q0 52 -34 89.5t-74 51.5t-76 14q-37 0 -79 -14.5t-62 -35.5q-41 -44 -41 -101q0 -11 2.5 -24.5t5.5 -24t9.5 -26.5t10.5 -25t14 -27.5t14 -25.5 t15.5 -27t13.5 -24h242v-100h-197q8 -50 -2.5 -115t-31.5 -94q-41 -59 -99 -113q35 11 84 18t70 7q32 1 102 -16t104 -17q76 0 136 30l50 -147q-41 -25 -80.5 -36.5t-59 -13t-61.5 -1.5q-23 0 -128 33t-155 29q-39 -4 -82 -17t-66 -25l-24 -11l-55 145l16.5 11t15.5 10 t13.5 9.5t14.5 12t14.5 14t17.5 18.5q48 55 54 126.5t-30 142.5h-221z" />
<glyph unicode="" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM602 900l298 300l298 -300h-198v-900h-200v900h-198z" />
<glyph unicode="" d="M2 300h198v900h200v-900h198l-298 -300zM700 0v200h100v-100h200v-100h-300zM700 400v100h300v-200h-99v-100h-100v100h99v100h-200zM700 700v500h300v-500h-100v100h-100v-100h-100zM801 900h100v200h-100v-200z" />
<glyph unicode="" d="M2 300h198v900h200v-900h198l-298 -300zM700 0v500h300v-500h-100v100h-100v-100h-100zM700 700v200h100v-100h200v-100h-300zM700 1100v100h300v-200h-99v-100h-100v100h99v100h-200zM801 200h100v200h-100v-200z" />
<glyph unicode="" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM800 100v400h300v-500h-100v100h-200zM800 1100v100h200v-500h-100v400h-100zM901 200h100v200h-100v-200z" />
<glyph unicode="" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM800 400v100h200v-500h-100v400h-100zM800 800v400h300v-500h-100v100h-200zM901 900h100v200h-100v-200z" />
<glyph unicode="" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM700 100v200h500v-200h-500zM700 400v200h400v-200h-400zM700 700v200h300v-200h-300zM700 1000v200h200v-200h-200z" />
<glyph unicode="" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM700 100v200h200v-200h-200zM700 400v200h300v-200h-300zM700 700v200h400v-200h-400zM700 1000v200h500v-200h-500z" />
<glyph unicode="" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q162 0 281 -118.5t119 -281.5v-300q0 -165 -118.5 -282.5t-281.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500z" />
<glyph unicode="" d="M0 400v300q0 163 119 281.5t281 118.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-163 0 -281.5 117.5t-118.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM400 300l333 250l-333 250v-500z" />
<glyph unicode="" d="M0 400v300q0 163 117.5 281.5t282.5 118.5h300q163 0 281.5 -119t118.5 -281v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM300 700l250 -333l250 333h-500z" />
<glyph unicode="" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -162 -118.5 -281t-281.5 -119h-300q-165 0 -282.5 118.5t-117.5 281.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM300 400h500l-250 333z" />
<glyph unicode="" d="M0 400v300h300v200l400 -350l-400 -350v200h-300zM500 0v200h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-500v200h400q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-400z" />
<glyph unicode="" d="M216 519q10 -19 32 -19h302q-155 -438 -160 -458q-5 -21 4 -32l9 -8l9 -1q13 0 26 16l538 630q15 19 6 36q-8 18 -32 16h-300q1 4 78 219.5t79 227.5q2 17 -6 27l-8 8h-9q-16 0 -25 -15q-4 -5 -98.5 -111.5t-228 -257t-209.5 -238.5q-17 -19 -7 -40z" />
<glyph unicode="" d="M0 400q0 -165 117.5 -282.5t282.5 -117.5h300q47 0 100 15v185h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h500v185q-14 4 -114 7.5t-193 5.5l-93 2q-165 0 -282.5 -117.5t-117.5 -282.5v-300zM600 400v300h300v200l400 -350l-400 -350v200h-300z " />
<glyph unicode="" d="M0 400q0 -165 117.5 -282.5t282.5 -117.5h300q163 0 281.5 117.5t118.5 282.5v98l-78 73l-122 -123v-148q0 -41 -29.5 -70.5t-70.5 -29.5h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h156l118 122l-74 78h-100q-165 0 -282.5 -117.5t-117.5 -282.5 v-300zM496 709l353 342l-149 149h500v-500l-149 149l-342 -353z" />
<glyph unicode="" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM406 600 q0 80 57 137t137 57t137 -57t57 -137t-57 -137t-137 -57t-137 57t-57 137z" />
<glyph unicode="" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 800l445 -500l450 500h-295v400h-300v-400h-300zM900 150h100v50h-100v-50z" />
<glyph unicode="" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 700h300v-300h300v300h295l-445 500zM900 150h100v50h-100v-50z" />
<glyph unicode="" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 705l305 -305l596 596l-154 155l-442 -442l-150 151zM900 150h100v50h-100v-50z" />
<glyph unicode="" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 988l97 -98l212 213l-97 97zM200 401h700v699l-250 -239l-149 149l-212 -212l149 -149zM900 150h100v50h-100v-50z" />
<glyph unicode="" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM200 612l212 -212l98 97l-213 212zM300 1200l239 -250l-149 -149l212 -212l149 148l248 -237v700h-699zM900 150h100v50h-100v-50z" />
<glyph unicode="" d="M23 415l1177 784v-1079l-475 272l-310 -393v416h-392zM494 210l672 938l-672 -712v-226z" />
<glyph unicode="" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-850q0 -21 -15 -35.5t-35 -14.5h-150v400h-700v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 1000h100v200h-100v-200z" />
<glyph unicode="" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-218l-276 -275l-120 120l-126 -127h-378v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM581 306l123 123l120 -120l353 352l123 -123l-475 -476zM600 1000h100v200h-100v-200z" />
<glyph unicode="" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-269l-103 -103l-170 170l-298 -298h-329v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 1000h100v200h-100v-200zM700 133l170 170l-170 170l127 127l170 -170l170 170l127 -128l-170 -169l170 -170 l-127 -127l-170 170l-170 -170z" />
<glyph unicode="" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-300h-400v-200h-500v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 300l300 -300l300 300h-200v300h-200v-300h-200zM600 1000v200h100v-200h-100z" />
<glyph unicode="" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-402l-200 200l-298 -298h-402v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 300h200v-300h200v300h200l-300 300zM600 1000v200h100v-200h-100z" />
<glyph unicode="" d="M0 250q0 -21 14.5 -35.5t35.5 -14.5h1100q21 0 35.5 14.5t14.5 35.5v550h-1200v-550zM0 900h1200v150q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-150zM100 300v200h400v-200h-400z" />
<glyph unicode="" d="M0 400l300 298v-198h400v-200h-400v-198zM100 800v200h100v-200h-100zM300 800v200h100v-200h-100zM500 800v200h400v198l300 -298l-300 -298v198h-400zM800 300v200h100v-200h-100zM1000 300h100v200h-100v-200z" />
<glyph unicode="" d="M100 700v400l50 100l50 -100v-300h100v300l50 100l50 -100v-300h100v300l50 100l50 -100v-400l-100 -203v-447q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v447zM800 597q0 -29 10.5 -55.5t25 -43t29 -28.5t25.5 -18l10 -5v-397q0 -21 14.5 -35.5 t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v1106q0 31 -18 40.5t-44 -7.5l-276 -117q-25 -16 -43.5 -50.5t-18.5 -65.5v-359z" />
<glyph unicode="" d="M100 0h400v56q-75 0 -87.5 6t-12.5 44v394h500v-394q0 -38 -12.5 -44t-87.5 -6v-56h400v56q-4 0 -11 0.5t-24 3t-30 7t-24 15t-11 24.5v888q0 22 25 34.5t50 13.5l25 2v56h-400v-56q75 0 87.5 -6t12.5 -44v-394h-500v394q0 38 12.5 44t87.5 6v56h-400v-56q4 0 11 -0.5 t24 -3t30 -7t24 -15t11 -24.5v-888q0 -22 -25 -34.5t-50 -13.5l-25 -2v-56z" />
<glyph unicode="" d="M0 300q0 -41 29.5 -70.5t70.5 -29.5h300q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-300q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM100 100h400l200 200h105l295 98v-298h-425l-100 -100h-375zM100 300v200h300v-200h-300zM100 600v200h300v-200h-300z M100 1000h400l200 -200v-98l295 98h105v200h-425l-100 100h-375zM700 402v163l400 133v-163z" />
<glyph unicode="" d="M16.5 974.5q0.5 -21.5 16 -90t46.5 -140t104 -177.5t175 -208q103 -103 207.5 -176t180 -103.5t137 -47t92.5 -16.5l31 1l163 162q16 17 13 40.5t-22 37.5l-192 136q-19 14 -45 12t-42 -19l-119 -118q-143 103 -267 227q-126 126 -227 268l118 118q17 17 20 41.5 t-11 44.5l-139 194q-14 19 -36.5 22t-40.5 -14l-162 -162q-1 -11 -0.5 -32.5z" />
<glyph unicode="" d="M0 50v212q0 20 10.5 45.5t24.5 39.5l365 303v50q0 4 1 10.5t12 22.5t30 28.5t60 23t97 10.5t97 -10t60 -23.5t30 -27.5t12 -24l1 -10v-50l365 -303q14 -14 24.5 -39.5t10.5 -45.5v-212q0 -21 -15 -35.5t-35 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5zM0 712 q0 -21 14.5 -33.5t34.5 -8.5l202 33q20 4 34.5 21t14.5 38v146q141 24 300 24t300 -24v-146q0 -21 14.5 -38t34.5 -21l202 -33q20 -4 34.5 8.5t14.5 33.5v200q-6 8 -19 20.5t-63 45t-112 57t-171 45t-235 20.5q-92 0 -175 -10.5t-141.5 -27t-108.5 -36.5t-81.5 -40 t-53.5 -36.5t-31 -27.5l-9 -10v-200z" />
<glyph unicode="" d="M100 0v100h1100v-100h-1100zM175 200h950l-125 150v250l100 100v400h-100v-200h-100v200h-200v-200h-100v200h-200v-200h-100v200h-100v-400l100 -100v-250z" />
<glyph unicode="" d="M100 0h300v400q0 41 -29.5 70.5t-70.5 29.5h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-400zM500 0v1000q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-1000h-300zM900 0v700q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-700h-300z" />
<glyph unicode="" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v300h-200v100h200v100h-300v-300h200v-100h-200v-100zM600 300h200v100h100v300h-100v100h-200v-500 zM700 400v300h100v-300h-100z" />
<glyph unicode="" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h100v200h100v-200h100v500h-100v-200h-100v200h-100v-500zM600 300h200v100h100v300h-100v100h-200v-500 zM700 400v300h100v-300h-100z" />
<glyph unicode="" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v100h-200v300h200v100h-300v-500zM600 300h300v100h-200v300h200v100h-300v-500z" />
<glyph unicode="" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 550l300 -150v300zM600 400l300 150l-300 150v-300z" />
<glyph unicode="" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300v500h700v-500h-700zM300 400h130q41 0 68 42t27 107t-28.5 108t-66.5 43h-130v-300zM575 549 q0 -65 27 -107t68 -42h130v300h-130q-38 0 -66.5 -43t-28.5 -108z" />
<glyph unicode="" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v300h-200v100h200v100h-300v-300h200v-100h-200v-100zM601 300h100v100h-100v-100zM700 700h100 v-400h100v500h-200v-100z" />
<glyph unicode="" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v400h-200v100h-100v-500zM301 400v200h100v-200h-100zM601 300h100v100h-100v-100zM700 700h100 v-400h100v500h-200v-100z" />
<glyph unicode="" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 700v100h300v-300h-99v-100h-100v100h99v200h-200zM201 300v100h100v-100h-100zM601 300v100h100v-100h-100z M700 700v100h200v-500h-100v400h-100z" />
<glyph unicode="" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM400 500v200 l100 100h300v-100h-300v-200h300v-100h-300z" />
<glyph unicode="" d="M0 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM182 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM400 400v400h300 l100 -100v-100h-100v100h-200v-100h200v-100h-200v-100h-100zM700 400v100h100v-100h-100z" />
<glyph unicode="" d="M-14 494q0 -80 56.5 -137t135.5 -57h222v300h400v-300h128q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5zM300 200h200v300h200v-300 h200l-300 -300z" />
<glyph unicode="" d="M-14 494q0 -80 56.5 -137t135.5 -57h8l414 414l403 -403q94 26 154.5 104t60.5 178q0 121 -85 207.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5zM300 200l300 300 l300 -300h-200v-300h-200v300h-200z" />
<glyph unicode="" d="M100 200h400v-155l-75 -45h350l-75 45v155h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170z" />
<glyph unicode="" d="M121 700q0 -53 28.5 -97t75.5 -65q-4 -16 -4 -38q0 -74 52.5 -126.5t126.5 -52.5q56 0 100 30v-306l-75 -45h350l-75 45v306q46 -30 100 -30q74 0 126.5 52.5t52.5 126.5q0 24 -9 55q50 32 79.5 83t29.5 112q0 90 -61.5 155.5t-150.5 71.5q-26 89 -99.5 145.5 t-167.5 56.5q-116 0 -197.5 -81.5t-81.5 -197.5q0 -4 1 -12t1 -11q-14 2 -23 2q-74 0 -126.5 -52.5t-52.5 -126.5z" />
</font>
</defs></svg> | gui/bootstrap/fonts/glyphicons-halflings-regular.svg | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.004581725224852562,
0.0011477461084723473,
0.00016918797336984426,
0.0008375534671358764,
0.0010856103617697954
] |
{
"id": 1,
"code_window": [
"\n",
"\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n",
"\tjson.NewEncoder(w).Encode(files)\n",
"}\n",
"\n",
"func restGetConnections(m *model.Model, w http.ResponseWriter, r *http.Request) {\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tjson.NewEncoder(w).Encode(output)\n"
],
"file_path": "cmd/syncthing/gui.go",
"type": "replace",
"edit_start_line_idx": 296
} | dir2/dfile
#include further-excludes
| internal/scanner/testdata/excludes | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.00017512832710053772,
0.00017512832710053772,
0.00017512832710053772,
0.00017512832710053772,
0
] |
{
"id": 2,
"code_window": [
"\tret := make([]string, 0, 10)\n",
"\tfor _, subdirectory := range subdirectories {\n",
"\t\tinfo, err := os.Stat(subdirectory)\n",
"\t\tif err == nil && info.IsDir() {\n",
"\t\t\tret = append(ret, subdirectory + pathSeparator)\n",
"\t\t\tif len(ret) > 9 {\n",
"\t\t\t\tbreak\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tret = append(ret, subdirectory+pathSeparator)\n"
],
"file_path": "cmd/syncthing/gui.go",
"type": "replace",
"edit_start_line_idx": 660
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package model
import (
"bufio"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/syncthing/syncthing/internal/config"
"github.com/syncthing/syncthing/internal/events"
"github.com/syncthing/syncthing/internal/files"
"github.com/syncthing/syncthing/internal/ignore"
"github.com/syncthing/syncthing/internal/lamport"
"github.com/syncthing/syncthing/internal/osutil"
"github.com/syncthing/syncthing/internal/protocol"
"github.com/syncthing/syncthing/internal/scanner"
"github.com/syncthing/syncthing/internal/stats"
"github.com/syncthing/syncthing/internal/symlinks"
"github.com/syncthing/syncthing/internal/versioner"
"github.com/syndtr/goleveldb/leveldb"
)
type folderState int
const (
FolderIdle folderState = iota
FolderScanning
FolderSyncing
FolderCleaning
)
func (s folderState) String() string {
switch s {
case FolderIdle:
return "idle"
case FolderScanning:
return "scanning"
case FolderCleaning:
return "cleaning"
case FolderSyncing:
return "syncing"
default:
return "unknown"
}
}
// How many files to send in each Index/IndexUpdate message.
const (
indexTargetSize = 250 * 1024 // Aim for making index messages no larger than 250 KiB (uncompressed)
indexPerFileSize = 250 // Each FileInfo is approximately this big, in bytes, excluding BlockInfos
IndexPerBlockSize = 40 // Each BlockInfo is approximately this big
indexBatchSize = 1000 // Either way, don't include more files than this
)
type service interface {
Serve()
Stop()
}
type Model struct {
cfg *config.ConfigWrapper
db *leveldb.DB
finder *files.BlockFinder
deviceName string
clientName string
clientVersion string
folderCfgs map[string]config.FolderConfiguration // folder -> cfg
folderFiles map[string]*files.Set // folder -> files
folderDevices map[string][]protocol.DeviceID // folder -> deviceIDs
deviceFolders map[protocol.DeviceID][]string // deviceID -> folders
deviceStatRefs map[protocol.DeviceID]*stats.DeviceStatisticsReference // deviceID -> statsRef
folderIgnores map[string]*ignore.Matcher // folder -> matcher object
folderRunners map[string]service // folder -> puller or scanner
fmut sync.RWMutex // protects the above
folderState map[string]folderState // folder -> state
folderStateChanged map[string]time.Time // folder -> time when state changed
smut sync.RWMutex
protoConn map[protocol.DeviceID]protocol.Connection
rawConn map[protocol.DeviceID]io.Closer
deviceVer map[protocol.DeviceID]string
pmut sync.RWMutex // protects protoConn and rawConn
addedFolder bool
started bool
}
var (
ErrNoSuchFile = errors.New("no such file")
ErrInvalid = errors.New("file is invalid")
SymlinkWarning = sync.Once{}
)
// NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests
// for file data without altering the local folder in any way.
func NewModel(cfg *config.ConfigWrapper, deviceName, clientName, clientVersion string, db *leveldb.DB) *Model {
m := &Model{
cfg: cfg,
db: db,
deviceName: deviceName,
clientName: clientName,
clientVersion: clientVersion,
folderCfgs: make(map[string]config.FolderConfiguration),
folderFiles: make(map[string]*files.Set),
folderDevices: make(map[string][]protocol.DeviceID),
deviceFolders: make(map[protocol.DeviceID][]string),
deviceStatRefs: make(map[protocol.DeviceID]*stats.DeviceStatisticsReference),
folderIgnores: make(map[string]*ignore.Matcher),
folderRunners: make(map[string]service),
folderState: make(map[string]folderState),
folderStateChanged: make(map[string]time.Time),
protoConn: make(map[protocol.DeviceID]protocol.Connection),
rawConn: make(map[protocol.DeviceID]io.Closer),
deviceVer: make(map[protocol.DeviceID]string),
finder: files.NewBlockFinder(db, cfg),
}
var timeout = 20 * 60 // seconds
if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
it, err := strconv.Atoi(t)
if err == nil {
timeout = it
}
}
deadlockDetect(&m.fmut, time.Duration(timeout)*time.Second)
deadlockDetect(&m.smut, time.Duration(timeout)*time.Second)
deadlockDetect(&m.pmut, time.Duration(timeout)*time.Second)
return m
}
// StartRW starts read/write processing on the current model. When in
// read/write mode the model will attempt to keep in sync with the cluster by
// pulling needed files from peer devices.
func (m *Model) StartFolderRW(folder string) {
m.fmut.Lock()
cfg, ok := m.folderCfgs[folder]
if !ok {
panic("cannot start nonexistent folder " + folder)
}
_, ok = m.folderRunners[folder]
if ok {
panic("cannot start already running folder " + folder)
}
p := &Puller{
folder: folder,
dir: cfg.Path,
scanIntv: time.Duration(cfg.RescanIntervalS) * time.Second,
model: m,
ignorePerms: cfg.IgnorePerms,
lenientMtimes: cfg.LenientMtimes,
}
m.folderRunners[folder] = p
m.fmut.Unlock()
if len(cfg.Versioning.Type) > 0 {
factory, ok := versioner.Factories[cfg.Versioning.Type]
if !ok {
l.Fatalf("Requested versioning type %q that does not exist", cfg.Versioning.Type)
}
p.versioner = factory(folder, cfg.Path, cfg.Versioning.Params)
}
if cfg.LenientMtimes {
l.Infof("Folder %q is running with LenientMtimes workaround. Syncing may not work properly.", folder)
}
go p.Serve()
}
// StartRO starts read only processing on the current model. When in
// read only mode the model will announce files to the cluster but not
// pull in any external changes.
func (m *Model) StartFolderRO(folder string) {
m.fmut.Lock()
cfg, ok := m.folderCfgs[folder]
if !ok {
panic("cannot start nonexistent folder " + folder)
}
_, ok = m.folderRunners[folder]
if ok {
panic("cannot start already running folder " + folder)
}
s := &Scanner{
folder: folder,
intv: time.Duration(cfg.RescanIntervalS) * time.Second,
model: m,
}
m.folderRunners[folder] = s
m.fmut.Unlock()
go s.Serve()
}
type ConnectionInfo struct {
protocol.Statistics
Address string
ClientVersion string
}
// ConnectionStats returns a map with connection statistics for each connected device.
func (m *Model) ConnectionStats() map[string]ConnectionInfo {
type remoteAddrer interface {
RemoteAddr() net.Addr
}
m.pmut.RLock()
m.fmut.RLock()
var res = make(map[string]ConnectionInfo)
for device, conn := range m.protoConn {
ci := ConnectionInfo{
Statistics: conn.Statistics(),
ClientVersion: m.deviceVer[device],
}
if nc, ok := m.rawConn[device].(remoteAddrer); ok {
ci.Address = nc.RemoteAddr().String()
}
res[device.String()] = ci
}
m.fmut.RUnlock()
m.pmut.RUnlock()
in, out := protocol.TotalInOut()
res["total"] = ConnectionInfo{
Statistics: protocol.Statistics{
At: time.Now(),
InBytesTotal: in,
OutBytesTotal: out,
},
}
return res
}
// Returns statistics about each device
func (m *Model) DeviceStatistics() map[string]stats.DeviceStatistics {
var res = make(map[string]stats.DeviceStatistics)
for id := range m.cfg.Devices() {
res[id.String()] = m.deviceStatRef(id).GetStatistics()
}
return res
}
// Returns the completion status, in percent, for the given device and folder.
func (m *Model) Completion(device protocol.DeviceID, folder string) float64 {
defer m.leveldbPanicWorkaround()
var tot int64
m.fmut.RLock()
rf, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if !ok {
return 0 // Folder doesn't exist, so we hardly have any of it
}
rf.WithGlobalTruncated(func(f protocol.FileIntf) bool {
if !f.IsDeleted() {
tot += f.Size()
}
return true
})
if tot == 0 {
return 100 // Folder is empty, so we have all of it
}
var need int64
rf.WithNeedTruncated(device, func(f protocol.FileIntf) bool {
if !f.IsDeleted() {
need += f.Size()
}
return true
})
res := 100 * (1 - float64(need)/float64(tot))
if debug {
l.Debugf("%v Completion(%s, %q): %f (%d / %d)", m, device, folder, res, need, tot)
}
return res
}
func sizeOf(fs []protocol.FileInfo) (files, deleted int, bytes int64) {
for _, f := range fs {
fs, de, by := sizeOfFile(f)
files += fs
deleted += de
bytes += by
}
return
}
func sizeOfFile(f protocol.FileIntf) (files, deleted int, bytes int64) {
if !f.IsDeleted() {
files++
} else {
deleted++
}
bytes += f.Size()
return
}
// GlobalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
func (m *Model) GlobalSize(folder string) (files, deleted int, bytes int64) {
defer m.leveldbPanicWorkaround()
m.fmut.RLock()
defer m.fmut.RUnlock()
if rf, ok := m.folderFiles[folder]; ok {
rf.WithGlobalTruncated(func(f protocol.FileIntf) bool {
fs, de, by := sizeOfFile(f)
files += fs
deleted += de
bytes += by
return true
})
}
return
}
// LocalSize returns the number of files, deleted files and total bytes for all
// files in the local folder.
func (m *Model) LocalSize(folder string) (files, deleted int, bytes int64) {
defer m.leveldbPanicWorkaround()
m.fmut.RLock()
defer m.fmut.RUnlock()
if rf, ok := m.folderFiles[folder]; ok {
rf.WithHaveTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
if f.IsInvalid() {
return true
}
fs, de, by := sizeOfFile(f)
files += fs
deleted += de
bytes += by
return true
})
}
return
}
// NeedSize returns the number and total size of currently needed files.
func (m *Model) NeedSize(folder string) (files int, bytes int64) {
defer m.leveldbPanicWorkaround()
m.fmut.RLock()
defer m.fmut.RUnlock()
if rf, ok := m.folderFiles[folder]; ok {
rf.WithNeedTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
fs, de, by := sizeOfFile(f)
files += fs + de
bytes += by
return true
})
}
if debug {
l.Debugf("%v NeedSize(%q): %d %d", m, folder, files, bytes)
}
return
}
// NeedFiles returns the list of currently needed files, stopping at maxFiles
// files or maxBlocks blocks. Limits <= 0 are ignored.
func (m *Model) NeedFolderFilesLimited(folder string, maxFiles, maxBlocks int) []protocol.FileInfo {
defer m.leveldbPanicWorkaround()
m.fmut.RLock()
defer m.fmut.RUnlock()
nblocks := 0
if rf, ok := m.folderFiles[folder]; ok {
fs := make([]protocol.FileInfo, 0, maxFiles)
rf.WithNeed(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
fi := f.(protocol.FileInfo)
fs = append(fs, fi)
nblocks += len(fi.Blocks)
return (maxFiles <= 0 || len(fs) < maxFiles) && (maxBlocks <= 0 || nblocks < maxBlocks)
})
return fs
}
return nil
}
// Index is called when a new device is connected and we receive their full index.
// Implements the protocol.Model interface.
func (m *Model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.FileInfo) {
if debug {
l.Debugf("IDX(in): %s %q: %d files", deviceID, folder, len(fs))
}
if !m.folderSharedWith(folder, deviceID) {
events.Default.Log(events.FolderRejected, map[string]string{
"folder": folder,
"device": deviceID.String(),
})
l.Warnf("Unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration.", folder, deviceID)
return
}
m.fmut.RLock()
files, ok := m.folderFiles[folder]
ignores, _ := m.folderIgnores[folder]
m.fmut.RUnlock()
if !ok {
l.Fatalf("Index for nonexistant folder %q", folder)
}
for i := 0; i < len(fs); {
lamport.Default.Tick(fs[i].Version)
if (ignores != nil && ignores.Match(fs[i].Name)) || symlinkInvalid(fs[i].IsSymlink()) {
if debug {
l.Debugln("dropping update for ignored/unsupported symlink", fs[i])
}
fs[i] = fs[len(fs)-1]
fs = fs[:len(fs)-1]
} else {
i++
}
}
files.Replace(deviceID, fs)
events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
"device": deviceID.String(),
"folder": folder,
"items": len(fs),
"version": files.LocalVersion(deviceID),
})
}
// IndexUpdate is called for incremental updates to connected devices' indexes.
// Implements the protocol.Model interface.
func (m *Model) IndexUpdate(deviceID protocol.DeviceID, folder string, fs []protocol.FileInfo) {
if debug {
l.Debugf("%v IDXUP(in): %s / %q: %d files", m, deviceID, folder, len(fs))
}
if !m.folderSharedWith(folder, deviceID) {
l.Infof("Update for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration.", folder, deviceID)
return
}
m.fmut.RLock()
files, ok := m.folderFiles[folder]
ignores, _ := m.folderIgnores[folder]
m.fmut.RUnlock()
if !ok {
l.Fatalf("IndexUpdate for nonexistant folder %q", folder)
}
for i := 0; i < len(fs); {
lamport.Default.Tick(fs[i].Version)
if (ignores != nil && ignores.Match(fs[i].Name)) || symlinkInvalid(fs[i].IsSymlink()) {
if debug {
l.Debugln("dropping update for ignored/unsupported symlink", fs[i])
}
fs[i] = fs[len(fs)-1]
fs = fs[:len(fs)-1]
} else {
i++
}
}
files.Update(deviceID, fs)
events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
"device": deviceID.String(),
"folder": folder,
"items": len(fs),
"version": files.LocalVersion(deviceID),
})
}
func (m *Model) folderSharedWith(folder string, deviceID protocol.DeviceID) bool {
m.fmut.RLock()
defer m.fmut.RUnlock()
for _, nfolder := range m.deviceFolders[deviceID] {
if nfolder == folder {
return true
}
}
return false
}
func (m *Model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterConfigMessage) {
m.pmut.Lock()
if cm.ClientName == "syncthing" {
m.deviceVer[deviceID] = cm.ClientVersion
} else {
m.deviceVer[deviceID] = cm.ClientName + " " + cm.ClientVersion
}
m.pmut.Unlock()
l.Infof(`Device %s client is "%s %s"`, deviceID, cm.ClientName, cm.ClientVersion)
var changed bool
if name := cm.GetOption("name"); name != "" {
l.Infof("Device %s name is %q", deviceID, name)
device, ok := m.cfg.Devices()[deviceID]
if ok && device.Name == "" {
device.Name = name
m.cfg.SetDevice(device)
changed = true
}
}
if m.cfg.Devices()[deviceID].Introducer {
// This device is an introducer. Go through the announced lists of folders
// and devices and add what we are missing.
for _, folder := range cm.Folders {
// If we don't have this folder yet, skip it. Ideally, we'd
// offer up something in the GUI to create the folder, but for the
// moment we only handle folders that we already have.
if _, ok := m.folderDevices[folder.ID]; !ok {
continue
}
nextDevice:
for _, device := range folder.Devices {
var id protocol.DeviceID
copy(id[:], device.ID)
if _, ok := m.cfg.Devices()[id]; !ok {
// The device is currently unknown. Add it to the config.
l.Infof("Adding device %v to config (vouched for by introducer %v)", id, deviceID)
newDeviceCfg := config.DeviceConfiguration{
DeviceID: id,
Compression: true,
Addresses: []string{"dynamic"},
}
// The introducers' introducers are also our introducers.
if device.Flags&protocol.FlagIntroducer != 0 {
l.Infof("Device %v is now also an introducer", id)
newDeviceCfg.Introducer = true
}
m.cfg.SetDevice(newDeviceCfg)
changed = true
}
for _, er := range m.deviceFolders[id] {
if er == folder.ID {
// We already share the folder with this device, so
// nothing to do.
continue nextDevice
}
}
// We don't yet share this folder with this device. Add the device
// to sharing list of the folder.
l.Infof("Adding device %v to share %q (vouched for by introducer %v)", id, folder.ID, deviceID)
m.deviceFolders[id] = append(m.deviceFolders[id], folder.ID)
m.folderDevices[folder.ID] = append(m.folderDevices[folder.ID], id)
folderCfg := m.cfg.Folders()[folder.ID]
folderCfg.Devices = append(folderCfg.Devices, config.FolderDeviceConfiguration{
DeviceID: id,
})
m.cfg.SetFolder(folderCfg)
changed = true
}
}
}
if changed {
m.cfg.Save()
}
}
// Close removes the peer from the model and closes the underlying connection if possible.
// Implements the protocol.Model interface.
func (m *Model) Close(device protocol.DeviceID, err error) {
l.Infof("Connection to %s closed: %v", device, err)
events.Default.Log(events.DeviceDisconnected, map[string]string{
"id": device.String(),
"error": err.Error(),
})
m.pmut.Lock()
m.fmut.RLock()
for _, folder := range m.deviceFolders[device] {
m.folderFiles[folder].Replace(device, nil)
}
m.fmut.RUnlock()
conn, ok := m.rawConn[device]
if ok {
if conn, ok := conn.(*tls.Conn); ok {
// If the underlying connection is a *tls.Conn, Close() does more
// than it says on the tin. Specifically, it sends a TLS alert
// message, which might block forever if the connection is dead
// and we don't have a deadline site.
conn.SetWriteDeadline(time.Now().Add(250 * time.Millisecond))
}
conn.Close()
}
delete(m.protoConn, device)
delete(m.rawConn, device)
delete(m.deviceVer, device)
m.pmut.Unlock()
}
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
func (m *Model) Request(deviceID protocol.DeviceID, folder, name string, offset int64, size int) ([]byte, error) {
// Verify that the requested file exists in the local model.
m.fmut.RLock()
r, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if !ok {
l.Warnf("Request from %s for file %s in nonexistent folder %q", deviceID, name, folder)
return nil, ErrNoSuchFile
}
lf := r.Get(protocol.LocalDeviceID, name)
if lf.IsInvalid() || lf.IsDeleted() {
if debug {
l.Debugf("%v REQ(in): %s: %q / %q o=%d s=%d; invalid: %v", m, deviceID, folder, name, offset, size, lf)
}
return nil, ErrInvalid
}
if offset > lf.Size() {
if debug {
l.Debugf("%v REQ(in; nonexistent): %s: %q o=%d s=%d", m, deviceID, name, offset, size)
}
return nil, ErrNoSuchFile
}
if debug && deviceID != protocol.LocalDeviceID {
l.Debugf("%v REQ(in): %s: %q / %q o=%d s=%d", m, deviceID, folder, name, offset, size)
}
m.fmut.RLock()
fn := filepath.Join(m.folderCfgs[folder].Path, name)
m.fmut.RUnlock()
var reader io.ReaderAt
var err error
if lf.IsSymlink() {
target, _, err := symlinks.Read(fn)
if err != nil {
return nil, err
}
reader = strings.NewReader(target)
} else {
reader, err = os.Open(fn) // XXX: Inefficient, should cache fd?
if err != nil {
return nil, err
}
defer reader.(*os.File).Close()
}
buf := make([]byte, size)
_, err = reader.ReadAt(buf, offset)
if err != nil {
return nil, err
}
return buf, nil
}
// ReplaceLocal replaces the local folder index with the given list of files.
func (m *Model) ReplaceLocal(folder string, fs []protocol.FileInfo) {
m.fmut.RLock()
m.folderFiles[folder].ReplaceWithDelete(protocol.LocalDeviceID, fs)
m.fmut.RUnlock()
}
func (m *Model) CurrentFolderFile(folder string, file string) protocol.FileInfo {
m.fmut.RLock()
f := m.folderFiles[folder].Get(protocol.LocalDeviceID, file)
m.fmut.RUnlock()
return f
}
func (m *Model) CurrentGlobalFile(folder string, file string) protocol.FileInfo {
m.fmut.RLock()
f := m.folderFiles[folder].GetGlobal(file)
m.fmut.RUnlock()
return f
}
type cFiler struct {
m *Model
r string
}
// Implements scanner.CurrentFiler
func (cf cFiler) CurrentFile(file string) protocol.FileInfo {
return cf.m.CurrentFolderFile(cf.r, file)
}
// ConnectedTo returns true if we are connected to the named device.
func (m *Model) ConnectedTo(deviceID protocol.DeviceID) bool {
m.pmut.RLock()
_, ok := m.protoConn[deviceID]
m.pmut.RUnlock()
if ok {
m.deviceWasSeen(deviceID)
}
return ok
}
func (m *Model) GetIgnores(folder string) ([]string, []string, error) {
var lines []string
m.fmut.RLock()
cfg, ok := m.folderCfgs[folder]
m.fmut.RUnlock()
if !ok {
return lines, nil, fmt.Errorf("Folder %s does not exist", folder)
}
fd, err := os.Open(filepath.Join(cfg.Path, ".stignore"))
if err != nil {
if os.IsNotExist(err) {
return lines, nil, nil
}
l.Warnln("Loading .stignore:", err)
return lines, nil, err
}
defer fd.Close()
scanner := bufio.NewScanner(fd)
for scanner.Scan() {
lines = append(lines, strings.TrimSpace(scanner.Text()))
}
var patterns []string
if matcher := m.folderIgnores[folder]; matcher != nil {
patterns = matcher.Patterns()
}
return lines, patterns, nil
}
func (m *Model) SetIgnores(folder string, content []string) error {
cfg, ok := m.folderCfgs[folder]
if !ok {
return fmt.Errorf("Folder %s does not exist", folder)
}
fd, err := ioutil.TempFile(cfg.Path, ".syncthing.stignore-"+folder)
if err != nil {
l.Warnln("Saving .stignore:", err)
return err
}
defer os.Remove(fd.Name())
for _, line := range content {
_, err = fmt.Fprintln(fd, line)
if err != nil {
l.Warnln("Saving .stignore:", err)
return err
}
}
err = fd.Close()
if err != nil {
l.Warnln("Saving .stignore:", err)
return err
}
file := filepath.Join(cfg.Path, ".stignore")
err = osutil.Rename(fd.Name(), file)
if err != nil {
l.Warnln("Saving .stignore:", err)
return err
}
return m.ScanFolder(folder)
}
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
// folder changes.
func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection) {
deviceID := protoConn.ID()
m.pmut.Lock()
if _, ok := m.protoConn[deviceID]; ok {
panic("add existing device")
}
m.protoConn[deviceID] = protoConn
if _, ok := m.rawConn[deviceID]; ok {
panic("add existing device")
}
m.rawConn[deviceID] = rawConn
cm := m.clusterConfig(deviceID)
protoConn.ClusterConfig(cm)
m.fmut.RLock()
for _, folder := range m.deviceFolders[deviceID] {
fs := m.folderFiles[folder]
go sendIndexes(protoConn, folder, fs, m.folderIgnores[folder])
}
m.fmut.RUnlock()
m.pmut.Unlock()
m.deviceWasSeen(deviceID)
}
func (m *Model) deviceStatRef(deviceID protocol.DeviceID) *stats.DeviceStatisticsReference {
m.fmut.Lock()
defer m.fmut.Unlock()
if sr, ok := m.deviceStatRefs[deviceID]; ok {
return sr
} else {
sr = stats.NewDeviceStatisticsReference(m.db, deviceID)
m.deviceStatRefs[deviceID] = sr
return sr
}
}
func (m *Model) deviceWasSeen(deviceID protocol.DeviceID) {
m.deviceStatRef(deviceID).WasSeen()
}
func sendIndexes(conn protocol.Connection, folder string, fs *files.Set, ignores *ignore.Matcher) {
deviceID := conn.ID()
name := conn.Name()
var err error
if debug {
l.Debugf("sendIndexes for %s-%s/%q starting", deviceID, name, folder)
}
minLocalVer, err := sendIndexTo(true, 0, conn, folder, fs, ignores)
for err == nil {
time.Sleep(5 * time.Second)
if fs.LocalVersion(protocol.LocalDeviceID) <= minLocalVer {
continue
}
minLocalVer, err = sendIndexTo(false, minLocalVer, conn, folder, fs, ignores)
}
if debug {
l.Debugf("sendIndexes for %s-%s/%q exiting: %v", deviceID, name, folder, err)
}
}
func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, folder string, fs *files.Set, ignores *ignore.Matcher) (uint64, error) {
deviceID := conn.ID()
name := conn.Name()
batch := make([]protocol.FileInfo, 0, indexBatchSize)
currentBatchSize := 0
maxLocalVer := uint64(0)
var err error
fs.WithHave(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
if f.LocalVersion <= minLocalVer {
return true
}
if f.LocalVersion > maxLocalVer {
maxLocalVer = f.LocalVersion
}
if (ignores != nil && ignores.Match(f.Name)) || symlinkInvalid(f.IsSymlink()) {
if debug {
l.Debugln("not sending update for ignored/unsupported symlink", f)
}
return true
}
if len(batch) == indexBatchSize || currentBatchSize > indexTargetSize {
if initial {
if err = conn.Index(folder, batch); err != nil {
return false
}
if debug {
l.Debugf("sendIndexes for %s-%s/%q: %d files (<%d bytes) (initial index)", deviceID, name, folder, len(batch), currentBatchSize)
}
initial = false
} else {
if err = conn.IndexUpdate(folder, batch); err != nil {
return false
}
if debug {
l.Debugf("sendIndexes for %s-%s/%q: %d files (<%d bytes) (batched update)", deviceID, name, folder, len(batch), currentBatchSize)
}
}
batch = make([]protocol.FileInfo, 0, indexBatchSize)
currentBatchSize = 0
}
batch = append(batch, f)
currentBatchSize += indexPerFileSize + len(f.Blocks)*IndexPerBlockSize
return true
})
if initial && err == nil {
err = conn.Index(folder, batch)
if debug && err == nil {
l.Debugf("sendIndexes for %s-%s/%q: %d files (small initial index)", deviceID, name, folder, len(batch))
}
} else if len(batch) > 0 && err == nil {
err = conn.IndexUpdate(folder, batch)
if debug && err == nil {
l.Debugf("sendIndexes for %s-%s/%q: %d files (last batch)", deviceID, name, folder, len(batch))
}
}
return maxLocalVer, err
}
func (m *Model) updateLocal(folder string, f protocol.FileInfo) {
f.LocalVersion = 0
m.fmut.RLock()
m.folderFiles[folder].Update(protocol.LocalDeviceID, []protocol.FileInfo{f})
m.fmut.RUnlock()
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
"folder": folder,
"name": f.Name,
"modified": time.Unix(f.Modified, 0),
"flags": fmt.Sprintf("0%o", f.Flags),
"size": f.Size(),
})
}
func (m *Model) requestGlobal(deviceID protocol.DeviceID, folder, name string, offset int64, size int, hash []byte) ([]byte, error) {
m.pmut.RLock()
nc, ok := m.protoConn[deviceID]
m.pmut.RUnlock()
if !ok {
return nil, fmt.Errorf("requestGlobal: no such device: %s", deviceID)
}
if debug {
l.Debugf("%v REQ(out): %s: %q / %q o=%d s=%d h=%x", m, deviceID, folder, name, offset, size, hash)
}
return nc.Request(folder, name, offset, size)
}
func (m *Model) AddFolder(cfg config.FolderConfiguration) {
if m.started {
panic("cannot add folder to started model")
}
if len(cfg.ID) == 0 {
panic("cannot add empty folder id")
}
m.fmut.Lock()
m.folderCfgs[cfg.ID] = cfg
m.folderFiles[cfg.ID] = files.NewSet(cfg.ID, m.db)
m.folderDevices[cfg.ID] = make([]protocol.DeviceID, len(cfg.Devices))
for i, device := range cfg.Devices {
m.folderDevices[cfg.ID][i] = device.DeviceID
m.deviceFolders[device.DeviceID] = append(m.deviceFolders[device.DeviceID], cfg.ID)
}
m.addedFolder = true
m.fmut.Unlock()
}
func (m *Model) ScanFolders() {
m.fmut.RLock()
var folders = make([]string, 0, len(m.folderCfgs))
for folder := range m.folderCfgs {
folders = append(folders, folder)
}
m.fmut.RUnlock()
var wg sync.WaitGroup
wg.Add(len(folders))
for _, folder := range folders {
folder := folder
go func() {
err := m.ScanFolder(folder)
if err != nil {
m.cfg.InvalidateFolder(folder, err.Error())
}
wg.Done()
}()
}
wg.Wait()
}
func (m *Model) ScanFolder(folder string) error {
return m.ScanFolderSub(folder, "")
}
func (m *Model) ScanFolderSub(folder, sub string) error {
if p := filepath.Clean(filepath.Join(folder, sub)); !strings.HasPrefix(p, folder) {
return errors.New("invalid subpath")
}
m.fmut.RLock()
fs, ok := m.folderFiles[folder]
dir := m.folderCfgs[folder].Path
ignores, _ := ignore.Load(filepath.Join(dir, ".stignore"), m.cfg.Options().CacheIgnoredFiles)
m.folderIgnores[folder] = ignores
w := &scanner.Walker{
Dir: dir,
Sub: sub,
Matcher: ignores,
BlockSize: protocol.BlockSize,
TempNamer: defTempNamer,
CurrentFiler: cFiler{m, folder},
IgnorePerms: m.folderCfgs[folder].IgnorePerms,
}
m.fmut.RUnlock()
if !ok {
return errors.New("no such folder")
}
m.setState(folder, FolderScanning)
fchan, err := w.Walk()
if err != nil {
return err
}
batchSize := 100
batch := make([]protocol.FileInfo, 0, batchSize)
for f := range fchan {
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
"folder": folder,
"name": f.Name,
"modified": time.Unix(f.Modified, 0),
"flags": fmt.Sprintf("0%o", f.Flags),
"size": f.Size(),
})
if len(batch) == batchSize {
fs.Update(protocol.LocalDeviceID, batch)
batch = batch[:0]
}
batch = append(batch, f)
}
if len(batch) > 0 {
fs.Update(protocol.LocalDeviceID, batch)
}
batch = batch[:0]
// TODO: We should limit the Have scanning to start at sub
seenPrefix := false
fs.WithHaveTruncated(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfoTruncated)
if !strings.HasPrefix(f.Name, sub) {
// Return true so that we keep iterating, until we get to the part
// of the tree we are interested in. Then return false so we stop
// iterating when we've passed the end of the subtree.
return !seenPrefix
}
seenPrefix = true
if !f.IsDeleted() {
if f.IsInvalid() {
return true
}
if len(batch) == batchSize {
fs.Update(protocol.LocalDeviceID, batch)
batch = batch[:0]
}
if (ignores != nil && ignores.Match(f.Name)) || symlinkInvalid(f.IsSymlink()) {
// File has been ignored or an unsupported symlink. Set invalid bit.
l.Debugln("setting invalid bit on ignored", f)
nf := protocol.FileInfo{
Name: f.Name,
Flags: f.Flags | protocol.FlagInvalid,
Modified: f.Modified,
Version: f.Version, // The file is still the same, so don't bump version
}
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
"folder": folder,
"name": f.Name,
"modified": time.Unix(f.Modified, 0),
"flags": fmt.Sprintf("0%o", f.Flags),
"size": f.Size(),
})
batch = append(batch, nf)
} else if _, err := os.Lstat(filepath.Join(dir, f.Name)); err != nil && os.IsNotExist(err) {
// File has been deleted
nf := protocol.FileInfo{
Name: f.Name,
Flags: f.Flags | protocol.FlagDeleted,
Modified: f.Modified,
Version: lamport.Default.Tick(f.Version),
}
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
"folder": folder,
"name": f.Name,
"modified": time.Unix(f.Modified, 0),
"flags": fmt.Sprintf("0%o", f.Flags),
"size": f.Size(),
})
batch = append(batch, nf)
}
}
return true
})
if len(batch) > 0 {
fs.Update(protocol.LocalDeviceID, batch)
}
m.setState(folder, FolderIdle)
return nil
}
// clusterConfig returns a ClusterConfigMessage that is correct for the given peer device
func (m *Model) clusterConfig(device protocol.DeviceID) protocol.ClusterConfigMessage {
cm := protocol.ClusterConfigMessage{
ClientName: m.clientName,
ClientVersion: m.clientVersion,
Options: []protocol.Option{
{
Key: "name",
Value: m.deviceName,
},
},
}
m.fmut.RLock()
for _, folder := range m.deviceFolders[device] {
cr := protocol.Folder{
ID: folder,
}
for _, device := range m.folderDevices[folder] {
// DeviceID is a value type, but with an underlying array. Copy it
// so we don't grab aliases to the same array later on in device[:]
device := device
// TODO: Set read only bit when relevant
cn := protocol.Device{
ID: device[:],
Flags: protocol.FlagShareTrusted,
}
if deviceCfg := m.cfg.Devices()[device]; deviceCfg.Introducer {
cn.Flags |= protocol.FlagIntroducer
}
cr.Devices = append(cr.Devices, cn)
}
cm.Folders = append(cm.Folders, cr)
}
m.fmut.RUnlock()
return cm
}
func (m *Model) setState(folder string, state folderState) {
m.smut.Lock()
oldState := m.folderState[folder]
changed, ok := m.folderStateChanged[folder]
if state != oldState {
m.folderState[folder] = state
m.folderStateChanged[folder] = time.Now()
eventData := map[string]interface{}{
"folder": folder,
"to": state.String(),
}
if ok {
eventData["duration"] = time.Since(changed).Seconds()
eventData["from"] = oldState.String()
}
events.Default.Log(events.StateChanged, eventData)
}
m.smut.Unlock()
}
func (m *Model) State(folder string) (string, time.Time) {
m.smut.RLock()
state := m.folderState[folder]
changed := m.folderStateChanged[folder]
m.smut.RUnlock()
return state.String(), changed
}
func (m *Model) Override(folder string) {
m.fmut.RLock()
fs := m.folderFiles[folder]
m.fmut.RUnlock()
m.setState(folder, FolderScanning)
batch := make([]protocol.FileInfo, 0, indexBatchSize)
fs.WithNeed(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
need := fi.(protocol.FileInfo)
if len(batch) == indexBatchSize {
fs.Update(protocol.LocalDeviceID, batch)
batch = batch[:0]
}
have := fs.Get(protocol.LocalDeviceID, need.Name)
if have.Name != need.Name {
// We are missing the file
need.Flags |= protocol.FlagDeleted
need.Blocks = nil
} else {
// We have the file, replace with our version
need = have
}
need.Version = lamport.Default.Tick(need.Version)
need.LocalVersion = 0
batch = append(batch, need)
return true
})
if len(batch) > 0 {
fs.Update(protocol.LocalDeviceID, batch)
}
m.setState(folder, FolderIdle)
}
// CurrentLocalVersion returns the change version for the given folder.
// This is guaranteed to increment if the contents of the local folder has
// changed.
func (m *Model) CurrentLocalVersion(folder string) uint64 {
m.fmut.RLock()
fs, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if !ok {
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
return 0
}
return fs.LocalVersion(protocol.LocalDeviceID)
}
// RemoteLocalVersion returns the change version for the given folder, as
// sent by remote peers. This is guaranteed to increment if the contents of
// the remote or global folder has changed.
func (m *Model) RemoteLocalVersion(folder string) uint64 {
m.fmut.RLock()
defer m.fmut.RUnlock()
fs, ok := m.folderFiles[folder]
if !ok {
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
return 0
}
var ver uint64
for _, n := range m.folderDevices[folder] {
ver += fs.LocalVersion(n)
}
return ver
}
func (m *Model) availability(folder string, file string) []protocol.DeviceID {
// Acquire this lock first, as the value returned from foldersFiles can
// gen heavily modified on Close()
m.pmut.RLock()
defer m.pmut.RUnlock()
m.fmut.RLock()
fs, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if !ok {
return nil
}
availableDevices := []protocol.DeviceID{}
for _, device := range fs.Availability(file) {
_, ok := m.protoConn[device]
if ok {
availableDevices = append(availableDevices, device)
}
}
return availableDevices
}
func (m *Model) String() string {
return fmt.Sprintf("model@%p", m)
}
func (m *Model) leveldbPanicWorkaround() {
// When an inconsistency is detected in leveldb we panic(). This is
// appropriate because it should never happen, but currently it does for
// some reason. However it only seems to trigger in the asynchronous full-
// database scans that happen due to REST and usage-reporting calls. In
// those places we defer to this workaround to catch the panic instead of
// taking down syncthing.
// This is just a band-aid and should be removed as soon as we have found
// a real root cause.
if pnc := recover(); pnc != nil {
if err, ok := pnc.(error); ok && strings.Contains(err.Error(), "leveldb") {
l.Infoln("recovered:", err)
} else {
// Any non-leveldb error is genuine and should continue panicing.
panic(err)
}
}
}
func symlinkInvalid(isLink bool) bool {
if !symlinks.Supported && isLink {
SymlinkWarning.Do(func() {
l.Warnln("Symlinks are unsupported as they require Administrator priviledges. This might cause your folder to appear out of sync.")
})
return true
}
return false
}
| internal/model/model.go | 1 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.0012859274866059422,
0.0001963478425750509,
0.00016244989819824696,
0.00017184807802550495,
0.00010716900578700006
] |
{
"id": 2,
"code_window": [
"\tret := make([]string, 0, 10)\n",
"\tfor _, subdirectory := range subdirectories {\n",
"\t\tinfo, err := os.Stat(subdirectory)\n",
"\t\tif err == nil && info.IsDir() {\n",
"\t\t\tret = append(ret, subdirectory + pathSeparator)\n",
"\t\t\tif len(ret) > 9 {\n",
"\t\t\t\tbreak\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tret = append(ret, subdirectory+pathSeparator)\n"
],
"file_path": "cmd/syncthing/gui.go",
"type": "replace",
"edit_start_line_idx": 660
} | // Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// Package xdr implements an XDR (RFC 4506) encoder/decoder.
package xdr
| Godeps/_workspace/src/github.com/calmh/xdr/doc.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.00017764975200407207,
0.00017764975200407207,
0.00017764975200407207,
0.00017764975200407207,
0
] |
{
"id": 2,
"code_window": [
"\tret := make([]string, 0, 10)\n",
"\tfor _, subdirectory := range subdirectories {\n",
"\t\tinfo, err := os.Stat(subdirectory)\n",
"\t\tif err == nil && info.IsDir() {\n",
"\t\t\tret = append(ret, subdirectory + pathSeparator)\n",
"\t\t\tif len(ret) > 9 {\n",
"\t\t\t\tbreak\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tret = append(ret, subdirectory+pathSeparator)\n"
],
"file_path": "cmd/syncthing/gui.go",
"type": "replace",
"edit_start_line_idx": 660
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package files
import (
"bytes"
"testing"
)
func TestDeviceKey(t *testing.T) {
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
dev := []byte("device67890123456789012345678901")
name := []byte("name")
key := deviceKey(fld, dev, name)
fld2 := deviceKeyFolder(key)
if bytes.Compare(fld2, fld) != 0 {
t.Errorf("wrong folder %q != %q", fld2, fld)
}
dev2 := deviceKeyDevice(key)
if bytes.Compare(dev2, dev) != 0 {
t.Errorf("wrong device %q != %q", dev2, dev)
}
name2 := deviceKeyName(key)
if bytes.Compare(name2, name) != 0 {
t.Errorf("wrong name %q != %q", name2, name)
}
}
func TestGlobalKey(t *testing.T) {
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
name := []byte("name")
key := globalKey(fld, name)
fld2 := globalKeyFolder(key)
if bytes.Compare(fld2, fld) != 0 {
t.Errorf("wrong folder %q != %q", fld2, fld)
}
name2 := globalKeyName(key)
if bytes.Compare(name2, name) != 0 {
t.Errorf("wrong name %q != %q", name2, name)
}
}
| internal/files/leveldb_test.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.00017810208373703063,
0.00017379061318933964,
0.00017063799896277487,
0.00017369716078974307,
0.0000023252885057445383
] |
{
"id": 2,
"code_window": [
"\tret := make([]string, 0, 10)\n",
"\tfor _, subdirectory := range subdirectories {\n",
"\t\tinfo, err := os.Stat(subdirectory)\n",
"\t\tif err == nil && info.IsDir() {\n",
"\t\t\tret = append(ret, subdirectory + pathSeparator)\n",
"\t\t\tif len(ret) > 9 {\n",
"\t\t\t\tbreak\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tret = append(ret, subdirectory+pathSeparator)\n"
],
"file_path": "cmd/syncthing/gui.go",
"type": "replace",
"edit_start_line_idx": 660
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"encoding/base64"
"math/rand"
"net/http"
"strings"
"sync"
"time"
"code.google.com/p/go.crypto/bcrypt"
"github.com/syncthing/syncthing/internal/config"
)
var (
sessions = make(map[string]bool)
sessionsMut sync.Mutex
)
func basicAuthAndSessionMiddleware(cfg config.GUIConfiguration, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if cfg.APIKey != "" && r.Header.Get("X-API-Key") == cfg.APIKey {
next.ServeHTTP(w, r)
return
}
cookie, err := r.Cookie("sessionid")
if err == nil && cookie != nil {
sessionsMut.Lock()
_, ok := sessions[cookie.Value]
sessionsMut.Unlock()
if ok {
next.ServeHTTP(w, r)
return
}
}
error := func() {
time.Sleep(time.Duration(rand.Intn(100)+100) * time.Millisecond)
w.Header().Set("WWW-Authenticate", "Basic realm=\"Authorization Required\"")
http.Error(w, "Not Authorized", http.StatusUnauthorized)
}
hdr := r.Header.Get("Authorization")
if !strings.HasPrefix(hdr, "Basic ") {
error()
return
}
hdr = hdr[6:]
bs, err := base64.StdEncoding.DecodeString(hdr)
if err != nil {
error()
return
}
fields := bytes.SplitN(bs, []byte(":"), 2)
if len(fields) != 2 {
error()
return
}
if string(fields[0]) != cfg.User {
error()
return
}
if err := bcrypt.CompareHashAndPassword([]byte(cfg.Password), fields[1]); err != nil {
error()
return
}
sessionid := randomString(32)
sessionsMut.Lock()
sessions[sessionid] = true
sessionsMut.Unlock()
http.SetCookie(w, &http.Cookie{
Name: "sessionid",
Value: sessionid,
MaxAge: 0,
})
next.ServeHTTP(w, r)
})
}
| cmd/syncthing/gui_auth.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.00017810208373703063,
0.00017077186203096062,
0.00016567630518693477,
0.00017035560449585319,
0.000004094692030776059
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\treturn\n",
"}\n",
"\n",
"// NeedFiles returns the list of currently needed files, stopping at maxFiles\n",
"// files or maxBlocks blocks. Limits <= 0 are ignored.\n",
"func (m *Model) NeedFolderFilesLimited(folder string, maxFiles, maxBlocks int) []protocol.FileInfo {\n",
"\tdefer m.leveldbPanicWorkaround()\n",
"\n",
"\tm.fmut.RLock()\n",
"\tdefer m.fmut.RUnlock()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// files. Limit <= 0 is ignored.\n",
"func (m *Model) NeedFolderFilesLimited(folder string, maxFiles int) []protocol.FileInfoTruncated {\n"
],
"file_path": "internal/model/model.go",
"type": "replace",
"edit_start_line_idx": 398
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"mime"
"net"
"net/http"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"code.google.com/p/go.crypto/bcrypt"
"github.com/calmh/logger"
"github.com/syncthing/syncthing/internal/auto"
"github.com/syncthing/syncthing/internal/config"
"github.com/syncthing/syncthing/internal/discover"
"github.com/syncthing/syncthing/internal/events"
"github.com/syncthing/syncthing/internal/model"
"github.com/syncthing/syncthing/internal/osutil"
"github.com/syncthing/syncthing/internal/protocol"
"github.com/syncthing/syncthing/internal/upgrade"
"github.com/vitrun/qart/qr"
)
type guiError struct {
Time time.Time
Error string
}
var (
configInSync = true
guiErrors = []guiError{}
guiErrorsMut sync.Mutex
modt = time.Now().UTC().Format(http.TimeFormat)
eventSub *events.BufferedSubscription
)
func init() {
l.AddHandler(logger.LevelWarn, showGuiError)
sub := events.Default.Subscribe(events.AllEvents)
eventSub = events.NewBufferedSubscription(sub, 1000)
}
func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) error {
var err error
cert, err := loadCert(confDir, "https-")
if err != nil {
l.Infoln("Loading HTTPS certificate:", err)
l.Infoln("Creating new HTTPS certificate")
newCertificate(confDir, "https-")
cert, err = loadCert(confDir, "https-")
}
if err != nil {
return err
}
tlsCfg := &tls.Config{
Certificates: []tls.Certificate{cert},
ServerName: "syncthing",
}
rawListener, err := net.Listen("tcp", cfg.Address)
if err != nil {
return err
}
listener := &DowngradingListener{rawListener, tlsCfg}
// The GET handlers
getRestMux := http.NewServeMux()
getRestMux.HandleFunc("/rest/ping", restPing)
getRestMux.HandleFunc("/rest/completion", withModel(m, restGetCompletion))
getRestMux.HandleFunc("/rest/config", restGetConfig)
getRestMux.HandleFunc("/rest/config/sync", restGetConfigInSync)
getRestMux.HandleFunc("/rest/connections", withModel(m, restGetConnections))
getRestMux.HandleFunc("/rest/autocomplete/directory", restGetAutocompleteDirectory)
getRestMux.HandleFunc("/rest/discovery", restGetDiscovery)
getRestMux.HandleFunc("/rest/errors", restGetErrors)
getRestMux.HandleFunc("/rest/events", restGetEvents)
getRestMux.HandleFunc("/rest/ignores", withModel(m, restGetIgnores))
getRestMux.HandleFunc("/rest/lang", restGetLang)
getRestMux.HandleFunc("/rest/model", withModel(m, restGetModel))
getRestMux.HandleFunc("/rest/need", withModel(m, restGetNeed))
getRestMux.HandleFunc("/rest/deviceid", restGetDeviceID)
getRestMux.HandleFunc("/rest/report", withModel(m, restGetReport))
getRestMux.HandleFunc("/rest/system", restGetSystem)
getRestMux.HandleFunc("/rest/upgrade", restGetUpgrade)
getRestMux.HandleFunc("/rest/version", restGetVersion)
getRestMux.HandleFunc("/rest/stats/device", withModel(m, restGetDeviceStats))
// Debug endpoints, not for general use
getRestMux.HandleFunc("/rest/debug/peerCompletion", withModel(m, restGetPeerCompletion))
// The POST handlers
postRestMux := http.NewServeMux()
postRestMux.HandleFunc("/rest/ping", restPing)
postRestMux.HandleFunc("/rest/config", withModel(m, restPostConfig))
postRestMux.HandleFunc("/rest/discovery/hint", restPostDiscoveryHint)
postRestMux.HandleFunc("/rest/error", restPostError)
postRestMux.HandleFunc("/rest/error/clear", restClearErrors)
postRestMux.HandleFunc("/rest/ignores", withModel(m, restPostIgnores))
postRestMux.HandleFunc("/rest/model/override", withModel(m, restPostOverride))
postRestMux.HandleFunc("/rest/reset", restPostReset)
postRestMux.HandleFunc("/rest/restart", restPostRestart)
postRestMux.HandleFunc("/rest/shutdown", restPostShutdown)
postRestMux.HandleFunc("/rest/upgrade", restPostUpgrade)
postRestMux.HandleFunc("/rest/scan", withModel(m, restPostScan))
// A handler that splits requests between the two above and disables
// caching
restMux := noCacheMiddleware(getPostHandler(getRestMux, postRestMux))
// The main routing handler
mux := http.NewServeMux()
mux.Handle("/rest/", restMux)
mux.HandleFunc("/qr/", getQR)
// Serve compiled in assets unless an asset directory was set (for development)
mux.Handle("/", embeddedStatic(assetDir))
// Wrap everything in CSRF protection. The /rest prefix should be
// protected, other requests will grant cookies.
handler := csrfMiddleware("/rest", cfg.APIKey, mux)
// Add our version as a header to responses
handler = withVersionMiddleware(handler)
// Wrap everything in basic auth, if user/password is set.
if len(cfg.User) > 0 && len(cfg.Password) > 0 {
handler = basicAuthAndSessionMiddleware(cfg, handler)
}
// Redirect to HTTPS if we are supposed to
if cfg.UseTLS {
handler = redirectToHTTPSMiddleware(handler)
}
srv := http.Server{
Handler: handler,
ReadTimeout: 2 * time.Second,
}
go func() {
err := srv.Serve(listener)
if err != nil {
panic(err)
}
}()
return nil
}
func getPostHandler(get, post http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
get.ServeHTTP(w, r)
case "POST":
post.ServeHTTP(w, r)
default:
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
})
}
func redirectToHTTPSMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Add a generous access-control-allow-origin header since we may be
// redirecting REST requests over protocols
w.Header().Add("Access-Control-Allow-Origin", "*")
if r.TLS == nil {
// Redirect HTTP requests to HTTPS
r.URL.Host = r.Host
r.URL.Scheme = "https"
http.Redirect(w, r, r.URL.String(), http.StatusFound)
} else {
h.ServeHTTP(w, r)
}
})
}
func noCacheMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache")
h.ServeHTTP(w, r)
})
}
func withVersionMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Syncthing-Version", Version)
h.ServeHTTP(w, r)
})
}
func withModel(m *model.Model, h func(m *model.Model, w http.ResponseWriter, r *http.Request)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
h(m, w, r)
}
}
func restPing(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]string{
"ping": "pong",
})
}
func restGetVersion(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]string{
"version": Version,
"longVersion": LongVersion,
"os": runtime.GOOS,
"arch": runtime.GOARCH,
})
}
func restGetCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
var deviceStr = qs.Get("device")
device, err := protocol.DeviceIDFromString(deviceStr)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
res := map[string]float64{
"completion": m.Completion(device, folder),
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
var res = make(map[string]interface{})
res["invalid"] = cfg.Folders()[folder].Invalid
globalFiles, globalDeleted, globalBytes := m.GlobalSize(folder)
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
localFiles, localDeleted, localBytes := m.LocalSize(folder)
res["localFiles"], res["localDeleted"], res["localBytes"] = localFiles, localDeleted, localBytes
needFiles, needBytes := m.NeedSize(folder)
res["needFiles"], res["needBytes"] = needFiles, needBytes
res["inSyncFiles"], res["inSyncBytes"] = globalFiles-needFiles, globalBytes-needBytes
res["state"], res["stateChanged"] = m.State(folder)
res["version"] = m.CurrentLocalVersion(folder) + m.RemoteLocalVersion(folder)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restPostOverride(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
go m.Override(folder)
}
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
files := m.NeedFolderFilesLimited(folder, 100, 2500) // max 100 files or 2500 blocks
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(files)
}
func restGetConnections(m *model.Model, w http.ResponseWriter, r *http.Request) {
var res = m.ConnectionStats()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetDeviceStats(m *model.Model, w http.ResponseWriter, r *http.Request) {
var res = m.DeviceStatistics()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetConfig(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(cfg.Raw())
}
func restPostConfig(m *model.Model, w http.ResponseWriter, r *http.Request) {
var newCfg config.Configuration
err := json.NewDecoder(r.Body).Decode(&newCfg)
if err != nil {
l.Warnln("decoding posted config:", err)
http.Error(w, err.Error(), 500)
return
} else {
if newCfg.GUI.Password != cfg.GUI().Password {
if newCfg.GUI.Password != "" {
hash, err := bcrypt.GenerateFromPassword([]byte(newCfg.GUI.Password), 0)
if err != nil {
l.Warnln("bcrypting password:", err)
http.Error(w, err.Error(), 500)
return
} else {
newCfg.GUI.Password = string(hash)
}
}
}
// Start or stop usage reporting as appropriate
if curAcc := cfg.Options().URAccepted; newCfg.Options.URAccepted > curAcc {
// UR was enabled
newCfg.Options.URAccepted = usageReportVersion
err := sendUsageReport(m)
if err != nil {
l.Infoln("Usage report:", err)
}
go usageReportingLoop(m)
} else if newCfg.Options.URAccepted < curAcc {
// UR was disabled
newCfg.Options.URAccepted = -1
stopUsageReporting()
}
// Activate and save
configInSync = !config.ChangeRequiresRestart(cfg.Raw(), newCfg)
cfg.Replace(newCfg)
cfg.Save()
}
}
func restGetConfigInSync(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]bool{"configInSync": configInSync})
}
func restPostRestart(w http.ResponseWriter, r *http.Request) {
flushResponse(`{"ok": "restarting"}`, w)
go restart()
}
func restPostReset(w http.ResponseWriter, r *http.Request) {
flushResponse(`{"ok": "resetting folders"}`, w)
resetFolders()
go restart()
}
func restPostShutdown(w http.ResponseWriter, r *http.Request) {
flushResponse(`{"ok": "shutting down"}`, w)
go shutdown()
}
func flushResponse(s string, w http.ResponseWriter) {
w.Write([]byte(s + "\n"))
f := w.(http.Flusher)
f.Flush()
}
var cpuUsagePercent [10]float64 // The last ten seconds
var cpuUsageLock sync.RWMutex
func restGetSystem(w http.ResponseWriter, r *http.Request) {
var m runtime.MemStats
runtime.ReadMemStats(&m)
tilde, _ := osutil.ExpandTilde("~")
res := make(map[string]interface{})
res["myID"] = myID.String()
res["goroutines"] = runtime.NumGoroutine()
res["alloc"] = m.Alloc
res["sys"] = m.Sys - m.HeapReleased
res["tilde"] = tilde
if cfg.Options().GlobalAnnEnabled && discoverer != nil {
res["extAnnounceOK"] = discoverer.ExtAnnounceOK()
}
cpuUsageLock.RLock()
var cpusum float64
for _, p := range cpuUsagePercent {
cpusum += p
}
cpuUsageLock.RUnlock()
res["cpuPercent"] = cpusum / 10
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetErrors(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
guiErrorsMut.Lock()
json.NewEncoder(w).Encode(map[string][]guiError{"errors": guiErrors})
guiErrorsMut.Unlock()
}
func restPostError(w http.ResponseWriter, r *http.Request) {
bs, _ := ioutil.ReadAll(r.Body)
r.Body.Close()
showGuiError(0, string(bs))
}
func restClearErrors(w http.ResponseWriter, r *http.Request) {
guiErrorsMut.Lock()
guiErrors = []guiError{}
guiErrorsMut.Unlock()
}
func showGuiError(l logger.LogLevel, err string) {
guiErrorsMut.Lock()
guiErrors = append(guiErrors, guiError{time.Now(), err})
if len(guiErrors) > 5 {
guiErrors = guiErrors[len(guiErrors)-5:]
}
guiErrorsMut.Unlock()
}
func restPostDiscoveryHint(w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var device = qs.Get("device")
var addr = qs.Get("addr")
if len(device) != 0 && len(addr) != 0 && discoverer != nil {
discoverer.Hint(device, []string{addr})
}
}
func restGetDiscovery(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
devices := map[string][]discover.CacheEntry{}
if discoverer != nil {
// Device ids can't be marshalled as keys so we need to manually
// rebuild this map using strings. Discoverer may be nil if discovery
// has not started yet.
for device, entries := range discoverer.All() {
devices[device.String()] = entries
}
}
json.NewEncoder(w).Encode(devices)
}
func restGetReport(m *model.Model, w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(reportData(m))
}
func restGetIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
ignores, patterns, err := m.GetIgnores(qs.Get("folder"))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
json.NewEncoder(w).Encode(map[string][]string{
"ignore": ignores,
"patterns": patterns,
})
}
func restPostIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
var data map[string][]string
err := json.NewDecoder(r.Body).Decode(&data)
r.Body.Close()
if err != nil {
http.Error(w, err.Error(), 500)
return
}
err = m.SetIgnores(qs.Get("folder"), data["ignore"])
if err != nil {
http.Error(w, err.Error(), 500)
return
}
restGetIgnores(m, w, r)
}
func restGetEvents(w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
sinceStr := qs.Get("since")
limitStr := qs.Get("limit")
since, _ := strconv.Atoi(sinceStr)
limit, _ := strconv.Atoi(limitStr)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
// Flush before blocking, to indicate that we've received the request
// and that it should not be retried.
f := w.(http.Flusher)
f.Flush()
evs := eventSub.Since(since, nil)
if 0 < limit && limit < len(evs) {
evs = evs[len(evs)-limit:]
}
json.NewEncoder(w).Encode(evs)
}
func restGetUpgrade(w http.ResponseWriter, r *http.Request) {
rel, err := upgrade.LatestRelease(strings.Contains(Version, "-beta"))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
res := make(map[string]interface{})
res["running"] = Version
res["latest"] = rel.Tag
res["newer"] = upgrade.CompareVersions(rel.Tag, Version) == 1
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetDeviceID(w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
idStr := qs.Get("id")
id, err := protocol.DeviceIDFromString(idStr)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if err == nil {
json.NewEncoder(w).Encode(map[string]string{
"id": id.String(),
})
} else {
json.NewEncoder(w).Encode(map[string]string{
"error": err.Error(),
})
}
}
func restGetLang(w http.ResponseWriter, r *http.Request) {
lang := r.Header.Get("Accept-Language")
var langs []string
for _, l := range strings.Split(lang, ",") {
parts := strings.SplitN(l, ";", 2)
langs = append(langs, strings.ToLower(strings.TrimSpace(parts[0])))
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(langs)
}
func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
rel, err := upgrade.LatestRelease(strings.Contains(Version, "-beta"))
if err != nil {
l.Warnln("getting latest release:", err)
http.Error(w, err.Error(), 500)
return
}
if upgrade.CompareVersions(rel.Tag, Version) == 1 {
err = upgrade.UpgradeTo(rel, GoArchExtra)
if err != nil {
l.Warnln("upgrading:", err)
http.Error(w, err.Error(), 500)
return
}
flushResponse(`{"ok": "restarting"}`, w)
l.Infoln("Upgrading")
stop <- exitUpgrading
}
}
func restPostScan(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
folder := qs.Get("folder")
sub := qs.Get("sub")
err := m.ScanFolderSub(folder, sub)
if err != nil {
http.Error(w, err.Error(), 500)
}
}
func getQR(w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var text = qs.Get("text")
code, err := qr.Encode(text, qr.M)
if err != nil {
http.Error(w, "Invalid", 500)
return
}
w.Header().Set("Content-Type", "image/png")
w.Write(code.PNG())
}
func restGetPeerCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
tot := map[string]float64{}
count := map[string]float64{}
for _, folder := range cfg.Folders() {
for _, device := range folder.DeviceIDs() {
deviceStr := device.String()
if m.ConnectedTo(device) {
tot[deviceStr] += m.Completion(device, folder.ID)
} else {
tot[deviceStr] = 0
}
count[deviceStr]++
}
}
comp := map[string]int{}
for device := range tot {
comp[device] = int(tot[device] / count[device])
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(comp)
}
func restGetAutocompleteDirectory(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
qs := r.URL.Query()
current := qs.Get("current")
search, _ := osutil.ExpandTilde(current)
pathSeparator := string(os.PathSeparator)
if strings.HasSuffix(current, pathSeparator) && !strings.HasSuffix(search, pathSeparator) {
search = search + pathSeparator
}
subdirectories, _ := filepath.Glob(search + "*")
ret := make([]string, 0, 10)
for _, subdirectory := range subdirectories {
info, err := os.Stat(subdirectory)
if err == nil && info.IsDir() {
ret = append(ret, subdirectory + pathSeparator)
if len(ret) > 9 {
break
}
}
}
json.NewEncoder(w).Encode(ret)
}
func embeddedStatic(assetDir string) http.Handler {
assets := auto.Assets()
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
file := r.URL.Path
if file[0] == '/' {
file = file[1:]
}
if len(file) == 0 {
file = "index.html"
}
if assetDir != "" {
p := filepath.Join(assetDir, filepath.FromSlash(file))
_, err := os.Stat(p)
if err == nil {
http.ServeFile(w, r, p)
return
}
}
bs, ok := assets[file]
if !ok {
http.NotFound(w, r)
return
}
mtype := mimeTypeForFile(file)
if len(mtype) != 0 {
w.Header().Set("Content-Type", mtype)
}
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
w.Header().Set("Last-Modified", modt)
w.Write(bs)
})
}
func mimeTypeForFile(file string) string {
// We use a built in table of the common types since the system
// TypeByExtension might be unreliable. But if we don't know, we delegate
// to the system.
ext := filepath.Ext(file)
switch ext {
case ".htm", ".html":
return "text/html"
case ".css":
return "text/css"
case ".js":
return "application/javascript"
case ".json":
return "application/json"
case ".png":
return "image/png"
case ".ttf":
return "application/x-font-ttf"
case ".woff":
return "application/x-font-woff"
default:
return mime.TypeByExtension(ext)
}
}
| cmd/syncthing/gui.go | 1 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.9992536902427673,
0.017434101551771164,
0.0001653136860113591,
0.00017623984604142606,
0.1186683177947998
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\treturn\n",
"}\n",
"\n",
"// NeedFiles returns the list of currently needed files, stopping at maxFiles\n",
"// files or maxBlocks blocks. Limits <= 0 are ignored.\n",
"func (m *Model) NeedFolderFilesLimited(folder string, maxFiles, maxBlocks int) []protocol.FileInfo {\n",
"\tdefer m.leveldbPanicWorkaround()\n",
"\n",
"\tm.fmut.RLock()\n",
"\tdefer m.fmut.RUnlock()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// files. Limit <= 0 is ignored.\n",
"func (m *Model) NeedFolderFilesLimited(folder string, maxFiles int) []protocol.FileInfoTruncated {\n"
],
"file_path": "internal/model/model.go",
"type": "replace",
"edit_start_line_idx": 398
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
// Package config implements reading and writing of the syncthing configuration file.
package config
import (
"encoding/xml"
"fmt"
"io"
"os"
"path/filepath"
"reflect"
"sort"
"strconv"
"code.google.com/p/go.crypto/bcrypt"
"github.com/calmh/logger"
"github.com/syncthing/syncthing/internal/osutil"
"github.com/syncthing/syncthing/internal/protocol"
)
var l = logger.DefaultLogger
const CurrentVersion = 6
type Configuration struct {
Version int `xml:"version,attr"`
Folders []FolderConfiguration `xml:"folder"`
Devices []DeviceConfiguration `xml:"device"`
GUI GUIConfiguration `xml:"gui"`
Options OptionsConfiguration `xml:"options"`
XMLName xml.Name `xml:"configuration" json:"-"`
OriginalVersion int `xml:"-" json:"-"` // The version we read from disk, before any conversion
Deprecated_Repositories []FolderConfiguration `xml:"repository" json:"-"`
Deprecated_Nodes []DeviceConfiguration `xml:"node" json:"-"`
}
type FolderConfiguration struct {
ID string `xml:"id,attr"`
Path string `xml:"path,attr"`
Devices []FolderDeviceConfiguration `xml:"device"`
ReadOnly bool `xml:"ro,attr"`
RescanIntervalS int `xml:"rescanIntervalS,attr" default:"60"`
IgnorePerms bool `xml:"ignorePerms,attr"`
Versioning VersioningConfiguration `xml:"versioning"`
LenientMtimes bool `xml:"lenientMtimes"`
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
deviceIDs []protocol.DeviceID
Deprecated_Directory string `xml:"directory,omitempty,attr" json:"-"`
Deprecated_Nodes []FolderDeviceConfiguration `xml:"node" json:"-"`
}
func (f *FolderConfiguration) CreateMarker() error {
if !f.HasMarker() {
marker := filepath.Join(f.Path, ".stfolder")
fd, err := os.Create(marker)
if err != nil {
return err
}
fd.Close()
osutil.HideFile(marker)
}
return nil
}
func (f *FolderConfiguration) HasMarker() bool {
_, err := os.Stat(filepath.Join(f.Path, ".stfolder"))
if err != nil {
return false
}
return true
}
func (r *FolderConfiguration) DeviceIDs() []protocol.DeviceID {
if r.deviceIDs == nil {
for _, n := range r.Devices {
r.deviceIDs = append(r.deviceIDs, n.DeviceID)
}
}
return r.deviceIDs
}
type VersioningConfiguration struct {
Type string `xml:"type,attr"`
Params map[string]string
}
type InternalVersioningConfiguration struct {
Type string `xml:"type,attr,omitempty"`
Params []InternalParam `xml:"param"`
}
type InternalParam struct {
Key string `xml:"key,attr"`
Val string `xml:"val,attr"`
}
func (c *VersioningConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
var tmp InternalVersioningConfiguration
tmp.Type = c.Type
for k, v := range c.Params {
tmp.Params = append(tmp.Params, InternalParam{k, v})
}
return e.EncodeElement(tmp, start)
}
func (c *VersioningConfiguration) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var tmp InternalVersioningConfiguration
err := d.DecodeElement(&tmp, &start)
if err != nil {
return err
}
c.Type = tmp.Type
c.Params = make(map[string]string, len(tmp.Params))
for _, p := range tmp.Params {
c.Params[p.Key] = p.Val
}
return nil
}
type DeviceConfiguration struct {
DeviceID protocol.DeviceID `xml:"id,attr"`
Name string `xml:"name,attr,omitempty"`
Addresses []string `xml:"address,omitempty"`
Compression bool `xml:"compression,attr"`
CertName string `xml:"certName,attr,omitempty"`
Introducer bool `xml:"introducer,attr"`
}
type FolderDeviceConfiguration struct {
DeviceID protocol.DeviceID `xml:"id,attr"`
Deprecated_Name string `xml:"name,attr,omitempty" json:"-"`
Deprecated_Addresses []string `xml:"address,omitempty" json:"-"`
}
type OptionsConfiguration struct {
ListenAddress []string `xml:"listenAddress" default:"0.0.0.0:22000"`
GlobalAnnServer string `xml:"globalAnnounceServer" default:"announce.syncthing.net:22026"`
GlobalAnnEnabled bool `xml:"globalAnnounceEnabled" default:"true"`
LocalAnnEnabled bool `xml:"localAnnounceEnabled" default:"true"`
LocalAnnPort int `xml:"localAnnouncePort" default:"21025"`
LocalAnnMCAddr string `xml:"localAnnounceMCAddr" default:"[ff32::5222]:21026"`
MaxSendKbps int `xml:"maxSendKbps"`
MaxRecvKbps int `xml:"maxRecvKbps"`
ReconnectIntervalS int `xml:"reconnectionIntervalS" default:"60"`
StartBrowser bool `xml:"startBrowser" default:"true"`
UPnPEnabled bool `xml:"upnpEnabled" default:"true"`
UPnPLease int `xml:"upnpLeaseMinutes" default:"0"`
UPnPRenewal int `xml:"upnpRenewalMinutes" default:"30"`
URAccepted int `xml:"urAccepted"` // Accepted usage reporting version; 0 for off (undecided), -1 for off (permanently)
RestartOnWakeup bool `xml:"restartOnWakeup" default:"true"`
AutoUpgradeIntervalH int `xml:"autoUpgradeIntervalH" default:"12"` // 0 for off
KeepTemporariesH int `xml:"keepTemporariesH" default:"24"` // 0 for off
CacheIgnoredFiles bool `xml:"cacheIgnoredFiles" default:"true"`
Deprecated_RescanIntervalS int `xml:"rescanIntervalS,omitempty" json:"-"`
Deprecated_UREnabled bool `xml:"urEnabled,omitempty" json:"-"`
Deprecated_URDeclined bool `xml:"urDeclined,omitempty" json:"-"`
Deprecated_ReadOnly bool `xml:"readOnly,omitempty" json:"-"`
Deprecated_GUIEnabled bool `xml:"guiEnabled,omitempty" json:"-"`
Deprecated_GUIAddress string `xml:"guiAddress,omitempty" json:"-"`
}
type GUIConfiguration struct {
Enabled bool `xml:"enabled,attr" default:"true"`
Address string `xml:"address" default:"127.0.0.1:8080"`
User string `xml:"user,omitempty"`
Password string `xml:"password,omitempty"`
UseTLS bool `xml:"tls,attr"`
APIKey string `xml:"apikey,omitempty"`
}
func New(myID protocol.DeviceID) Configuration {
var cfg Configuration
cfg.Version = CurrentVersion
cfg.OriginalVersion = CurrentVersion
setDefaults(&cfg)
setDefaults(&cfg.Options)
setDefaults(&cfg.GUI)
cfg.prepare(myID)
return cfg
}
func ReadXML(r io.Reader, myID protocol.DeviceID) (Configuration, error) {
var cfg Configuration
setDefaults(&cfg)
setDefaults(&cfg.Options)
setDefaults(&cfg.GUI)
err := xml.NewDecoder(r).Decode(&cfg)
cfg.OriginalVersion = cfg.Version
cfg.prepare(myID)
return cfg, err
}
func (cfg *Configuration) WriteXML(w io.Writer) error {
e := xml.NewEncoder(w)
e.Indent("", " ")
err := e.Encode(cfg)
if err != nil {
return err
}
_, err = w.Write([]byte("\n"))
return err
}
func (cfg *Configuration) prepare(myID protocol.DeviceID) {
fillNilSlices(&cfg.Options)
cfg.Options.ListenAddress = uniqueStrings(cfg.Options.ListenAddress)
// Initialize an empty slice for folders if the config has none
if cfg.Folders == nil {
cfg.Folders = []FolderConfiguration{}
}
// Check for missing, bad or duplicate folder ID:s
var seenFolders = map[string]*FolderConfiguration{}
var uniqueCounter int
for i := range cfg.Folders {
folder := &cfg.Folders[i]
if len(folder.Path) == 0 {
folder.Invalid = "no directory configured"
continue
}
if folder.ID == "" {
folder.ID = "default"
}
if seen, ok := seenFolders[folder.ID]; ok {
l.Warnf("Multiple folders with ID %q; disabling", folder.ID)
seen.Invalid = "duplicate folder ID"
if seen.ID == folder.ID {
uniqueCounter++
seen.ID = fmt.Sprintf("%s~%d", folder.ID, uniqueCounter)
}
folder.Invalid = "duplicate folder ID"
uniqueCounter++
folder.ID = fmt.Sprintf("%s~%d", folder.ID, uniqueCounter)
} else {
seenFolders[folder.ID] = folder
}
}
if cfg.Options.Deprecated_URDeclined {
cfg.Options.URAccepted = -1
}
cfg.Options.Deprecated_URDeclined = false
cfg.Options.Deprecated_UREnabled = false
// Upgrade to v1 configuration if appropriate
if cfg.Version == 1 {
convertV1V2(cfg)
}
// Upgrade to v3 configuration if appropriate
if cfg.Version == 2 {
convertV2V3(cfg)
}
// Upgrade to v4 configuration if appropriate
if cfg.Version == 3 {
convertV3V4(cfg)
}
// Upgrade to v5 configuration if appropriate
if cfg.Version == 4 {
convertV4V5(cfg)
}
// Upgrade to v6 configuration if appropriate
if cfg.Version == 5 {
convertV5V6(cfg)
}
// Hash old cleartext passwords
if len(cfg.GUI.Password) > 0 && cfg.GUI.Password[0] != '$' {
hash, err := bcrypt.GenerateFromPassword([]byte(cfg.GUI.Password), 0)
if err != nil {
l.Warnln("bcrypting password:", err)
} else {
cfg.GUI.Password = string(hash)
}
}
// Build a list of available devices
existingDevices := make(map[protocol.DeviceID]bool)
for _, device := range cfg.Devices {
existingDevices[device.DeviceID] = true
}
// Ensure this device is present in the config
if !existingDevices[myID] {
myName, _ := os.Hostname()
cfg.Devices = append(cfg.Devices, DeviceConfiguration{
DeviceID: myID,
Name: myName,
})
existingDevices[myID] = true
}
sort.Sort(DeviceConfigurationList(cfg.Devices))
// Ensure that any loose devices are not present in the wrong places
// Ensure that there are no duplicate devices
for i := range cfg.Folders {
cfg.Folders[i].Devices = ensureDevicePresent(cfg.Folders[i].Devices, myID)
cfg.Folders[i].Devices = ensureExistingDevices(cfg.Folders[i].Devices, existingDevices)
cfg.Folders[i].Devices = ensureNoDuplicates(cfg.Folders[i].Devices)
sort.Sort(FolderDeviceConfigurationList(cfg.Folders[i].Devices))
}
// An empty address list is equivalent to a single "dynamic" entry
for i := range cfg.Devices {
n := &cfg.Devices[i]
if len(n.Addresses) == 0 || len(n.Addresses) == 1 && n.Addresses[0] == "" {
n.Addresses = []string{"dynamic"}
}
}
}
// ChangeRequiresRestart returns true if updating the configuration requires a
// complete restart.
func ChangeRequiresRestart(from, to Configuration) bool {
// Adding, removing or changing folders requires restart
if !reflect.DeepEqual(from.Folders, to.Folders) {
return true
}
// Removing a device requres restart
toDevs := make(map[protocol.DeviceID]bool, len(from.Devices))
for _, dev := range to.Devices {
toDevs[dev.DeviceID] = true
}
for _, dev := range from.Devices {
if _, ok := toDevs[dev.DeviceID]; !ok {
return true
}
}
// All of the generic options require restart
if !reflect.DeepEqual(from.Options, to.Options) || !reflect.DeepEqual(from.GUI, to.GUI) {
return true
}
return false
}
func convertV5V6(cfg *Configuration) {
// Added ".stfolder" file at folder roots to identify mount issues
// Doesn't affect the config itself, but uses config migrations to identify
// the migration point.
for _, folder := range Wrap("", *cfg).Folders() {
// Best attempt, if it fails, it fails, the user will have to fix
// it up manually, as the repo will not get started.
folder.CreateMarker()
}
cfg.Version = 6
}
func convertV4V5(cfg *Configuration) {
// Renamed a bunch of fields in the structs.
if cfg.Deprecated_Nodes == nil {
cfg.Deprecated_Nodes = []DeviceConfiguration{}
}
if cfg.Deprecated_Repositories == nil {
cfg.Deprecated_Repositories = []FolderConfiguration{}
}
cfg.Devices = cfg.Deprecated_Nodes
cfg.Folders = cfg.Deprecated_Repositories
for i := range cfg.Folders {
cfg.Folders[i].Path = cfg.Folders[i].Deprecated_Directory
cfg.Folders[i].Deprecated_Directory = ""
cfg.Folders[i].Devices = cfg.Folders[i].Deprecated_Nodes
cfg.Folders[i].Deprecated_Nodes = nil
}
cfg.Deprecated_Nodes = nil
cfg.Deprecated_Repositories = nil
cfg.Version = 5
}
func convertV3V4(cfg *Configuration) {
// In previous versions, rescan interval was common for each folder.
// From now, it can be set independently. We have to make sure, that after upgrade
// the individual rescan interval will be defined for every existing folder.
for i := range cfg.Deprecated_Repositories {
cfg.Deprecated_Repositories[i].RescanIntervalS = cfg.Options.Deprecated_RescanIntervalS
}
cfg.Options.Deprecated_RescanIntervalS = 0
// In previous versions, folders held full device configurations.
// Since that's the only place where device configs were in V1, we still have
// to define the deprecated fields to be able to upgrade from V1 to V4.
for i, folder := range cfg.Deprecated_Repositories {
for j := range folder.Deprecated_Nodes {
rncfg := cfg.Deprecated_Repositories[i].Deprecated_Nodes[j]
rncfg.Deprecated_Name = ""
rncfg.Deprecated_Addresses = nil
}
}
cfg.Version = 4
}
func convertV2V3(cfg *Configuration) {
// In previous versions, compression was always on. When upgrading, enable
// compression on all existing new. New devices will get compression on by
// default by the GUI.
for i := range cfg.Deprecated_Nodes {
cfg.Deprecated_Nodes[i].Compression = true
}
// The global discovery format and port number changed in v0.9. Having the
// default announce server but old port number is guaranteed to be legacy.
if cfg.Options.GlobalAnnServer == "announce.syncthing.net:22025" {
cfg.Options.GlobalAnnServer = "announce.syncthing.net:22026"
}
cfg.Version = 3
}
func convertV1V2(cfg *Configuration) {
// Collect the list of devices.
// Replace device configs inside folders with only a reference to the
// device ID. Set all folders to read only if the global read only flag is
// set.
var devices = map[string]FolderDeviceConfiguration{}
for i, folder := range cfg.Deprecated_Repositories {
cfg.Deprecated_Repositories[i].ReadOnly = cfg.Options.Deprecated_ReadOnly
for j, device := range folder.Deprecated_Nodes {
id := device.DeviceID.String()
if _, ok := devices[id]; !ok {
devices[id] = device
}
cfg.Deprecated_Repositories[i].Deprecated_Nodes[j] = FolderDeviceConfiguration{DeviceID: device.DeviceID}
}
}
cfg.Options.Deprecated_ReadOnly = false
// Set and sort the list of devices.
for _, device := range devices {
cfg.Deprecated_Nodes = append(cfg.Deprecated_Nodes, DeviceConfiguration{
DeviceID: device.DeviceID,
Name: device.Deprecated_Name,
Addresses: device.Deprecated_Addresses,
})
}
sort.Sort(DeviceConfigurationList(cfg.Deprecated_Nodes))
// GUI
cfg.GUI.Address = cfg.Options.Deprecated_GUIAddress
cfg.GUI.Enabled = cfg.Options.Deprecated_GUIEnabled
cfg.Options.Deprecated_GUIEnabled = false
cfg.Options.Deprecated_GUIAddress = ""
cfg.Version = 2
}
func setDefaults(data interface{}) error {
s := reflect.ValueOf(data).Elem()
t := s.Type()
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
tag := t.Field(i).Tag
v := tag.Get("default")
if len(v) > 0 {
switch f.Interface().(type) {
case string:
f.SetString(v)
case int:
i, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
f.SetInt(i)
case bool:
f.SetBool(v == "true")
case []string:
// We don't do anything with string slices here. Any default
// we set will be appended to by the XML decoder, so we fill
// those after decoding.
default:
panic(f.Type())
}
}
}
return nil
}
// fillNilSlices sets default value on slices that are still nil.
func fillNilSlices(data interface{}) error {
s := reflect.ValueOf(data).Elem()
t := s.Type()
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
tag := t.Field(i).Tag
v := tag.Get("default")
if len(v) > 0 {
switch f.Interface().(type) {
case []string:
if f.IsNil() {
rv := reflect.MakeSlice(reflect.TypeOf([]string{}), 1, 1)
rv.Index(0).SetString(v)
f.Set(rv)
}
}
}
}
return nil
}
func uniqueStrings(ss []string) []string {
var m = make(map[string]bool, len(ss))
for _, s := range ss {
m[s] = true
}
var us = make([]string, 0, len(m))
for k := range m {
us = append(us, k)
}
return us
}
func ensureDevicePresent(devices []FolderDeviceConfiguration, myID protocol.DeviceID) []FolderDeviceConfiguration {
for _, device := range devices {
if device.DeviceID.Equals(myID) {
return devices
}
}
devices = append(devices, FolderDeviceConfiguration{
DeviceID: myID,
})
return devices
}
func ensureExistingDevices(devices []FolderDeviceConfiguration, existingDevices map[protocol.DeviceID]bool) []FolderDeviceConfiguration {
count := len(devices)
i := 0
loop:
for i < count {
if _, ok := existingDevices[devices[i].DeviceID]; !ok {
devices[i] = devices[count-1]
count--
continue loop
}
i++
}
return devices[0:count]
}
func ensureNoDuplicates(devices []FolderDeviceConfiguration) []FolderDeviceConfiguration {
count := len(devices)
i := 0
seenDevices := make(map[protocol.DeviceID]bool)
loop:
for i < count {
id := devices[i].DeviceID
if _, ok := seenDevices[id]; ok {
devices[i] = devices[count-1]
count--
continue loop
}
seenDevices[id] = true
i++
}
return devices[0:count]
}
type DeviceConfigurationList []DeviceConfiguration
func (l DeviceConfigurationList) Less(a, b int) bool {
return l[a].DeviceID.Compare(l[b].DeviceID) == -1
}
func (l DeviceConfigurationList) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func (l DeviceConfigurationList) Len() int {
return len(l)
}
type FolderDeviceConfigurationList []FolderDeviceConfiguration
func (l FolderDeviceConfigurationList) Less(a, b int) bool {
return l[a].DeviceID.Compare(l[b].DeviceID) == -1
}
func (l FolderDeviceConfigurationList) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func (l FolderDeviceConfigurationList) Len() int {
return len(l)
}
| internal/config/config.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.5864081978797913,
0.009827544912695885,
0.0001659596455283463,
0.0002069711481453851,
0.07208753377199173
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\treturn\n",
"}\n",
"\n",
"// NeedFiles returns the list of currently needed files, stopping at maxFiles\n",
"// files or maxBlocks blocks. Limits <= 0 are ignored.\n",
"func (m *Model) NeedFolderFilesLimited(folder string, maxFiles, maxBlocks int) []protocol.FileInfo {\n",
"\tdefer m.leveldbPanicWorkaround()\n",
"\n",
"\tm.fmut.RLock()\n",
"\tdefer m.fmut.RUnlock()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// files. Limit <= 0 is ignored.\n",
"func (m *Model) NeedFolderFilesLimited(folder string, maxFiles int) []protocol.FileInfoTruncated {\n"
],
"file_path": "internal/model/model.go",
"type": "replace",
"edit_start_line_idx": 398
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package config
import (
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/syncthing/syncthing/internal/events"
"github.com/syncthing/syncthing/internal/osutil"
"github.com/syncthing/syncthing/internal/protocol"
)
// An interface to handle configuration changes, and a wrapper type á la
// http.Handler
type Handler interface {
Changed(Configuration) error
}
type HandlerFunc func(Configuration) error
func (fn HandlerFunc) Changed(cfg Configuration) error {
return fn(cfg)
}
// A wrapper around a Configuration that manages loads, saves and published
// notifications of changes to registered Handlers
type ConfigWrapper struct {
cfg Configuration
path string
deviceMap map[protocol.DeviceID]DeviceConfiguration
folderMap map[string]FolderConfiguration
replaces chan Configuration
mut sync.Mutex
subs []Handler
sMut sync.Mutex
}
// Wrap wraps an existing Configuration structure and ties it to a file on
// disk.
func Wrap(path string, cfg Configuration) *ConfigWrapper {
w := &ConfigWrapper{cfg: cfg, path: path}
w.replaces = make(chan Configuration)
go w.Serve()
return w
}
// Load loads an existing file on disk and returns a new configuration
// wrapper.
func Load(path string, myID protocol.DeviceID) (*ConfigWrapper, error) {
fd, err := os.Open(path)
if err != nil {
return nil, err
}
defer fd.Close()
cfg, err := ReadXML(fd, myID)
if err != nil {
return nil, err
}
return Wrap(path, cfg), nil
}
// Serve handles configuration replace events and calls any interested
// handlers. It is started automatically by Wrap() and Load() and should not
// be run manually.
func (w *ConfigWrapper) Serve() {
for cfg := range w.replaces {
w.sMut.Lock()
subs := w.subs
w.sMut.Unlock()
for _, h := range subs {
h.Changed(cfg)
}
}
}
// Stop stops the Serve() loop. Set and Replace operations will panic after a
// Stop.
func (w *ConfigWrapper) Stop() {
close(w.replaces)
}
// Subscribe registers the given handler to be called on any future
// configuration changes.
func (w *ConfigWrapper) Subscribe(h Handler) {
w.sMut.Lock()
w.subs = append(w.subs, h)
w.sMut.Unlock()
}
// Raw returns the currently wrapped Configuration object.
func (w *ConfigWrapper) Raw() Configuration {
return w.cfg
}
// Replace swaps the current configuration object for the given one.
func (w *ConfigWrapper) Replace(cfg Configuration) {
w.mut.Lock()
defer w.mut.Unlock()
w.cfg = cfg
w.deviceMap = nil
w.folderMap = nil
w.replaces <- cfg
}
// Devices returns a map of devices. Device structures should not be changed,
// other than for the purpose of updating via SetDevice().
func (w *ConfigWrapper) Devices() map[protocol.DeviceID]DeviceConfiguration {
w.mut.Lock()
defer w.mut.Unlock()
if w.deviceMap == nil {
w.deviceMap = make(map[protocol.DeviceID]DeviceConfiguration, len(w.cfg.Devices))
for _, dev := range w.cfg.Devices {
w.deviceMap[dev.DeviceID] = dev
}
}
return w.deviceMap
}
// SetDevice adds a new device to the configuration, or overwrites an existing
// device with the same ID.
func (w *ConfigWrapper) SetDevice(dev DeviceConfiguration) {
w.mut.Lock()
defer w.mut.Unlock()
w.deviceMap = nil
for i := range w.cfg.Devices {
if w.cfg.Devices[i].DeviceID == dev.DeviceID {
w.cfg.Devices[i] = dev
w.replaces <- w.cfg
return
}
}
w.cfg.Devices = append(w.cfg.Devices, dev)
w.replaces <- w.cfg
}
// Devices returns a map of folders. Folder structures should not be changed,
// other than for the purpose of updating via SetFolder().
func (w *ConfigWrapper) Folders() map[string]FolderConfiguration {
w.mut.Lock()
defer w.mut.Unlock()
if w.folderMap == nil {
w.folderMap = make(map[string]FolderConfiguration, len(w.cfg.Folders))
for _, fld := range w.cfg.Folders {
path, err := osutil.ExpandTilde(fld.Path)
if err != nil {
l.Warnln("home:", err)
continue
}
fld.Path = path
w.folderMap[fld.ID] = fld
}
}
return w.folderMap
}
// SetFolder adds a new folder to the configuration, or overwrites an existing
// folder with the same ID.
func (w *ConfigWrapper) SetFolder(fld FolderConfiguration) {
w.mut.Lock()
defer w.mut.Unlock()
w.folderMap = nil
for i := range w.cfg.Folders {
if w.cfg.Folders[i].ID == fld.ID {
w.cfg.Folders[i] = fld
w.replaces <- w.cfg
return
}
}
w.cfg.Folders = append(w.cfg.Folders, fld)
w.replaces <- w.cfg
}
// Options returns the current options configuration object.
func (w *ConfigWrapper) Options() OptionsConfiguration {
w.mut.Lock()
defer w.mut.Unlock()
return w.cfg.Options
}
// SetOptions replaces the current options configuration object.
func (w *ConfigWrapper) SetOptions(opts OptionsConfiguration) {
w.mut.Lock()
defer w.mut.Unlock()
w.cfg.Options = opts
w.replaces <- w.cfg
}
// GUI returns the current GUI configuration object.
func (w *ConfigWrapper) GUI() GUIConfiguration {
w.mut.Lock()
defer w.mut.Unlock()
return w.cfg.GUI
}
// SetGUI replaces the current GUI configuration object.
func (w *ConfigWrapper) SetGUI(gui GUIConfiguration) {
w.mut.Lock()
defer w.mut.Unlock()
w.cfg.GUI = gui
w.replaces <- w.cfg
}
// InvalidateFolder sets the invalid marker on the given folder.
func (w *ConfigWrapper) InvalidateFolder(id string, err string) {
w.mut.Lock()
defer w.mut.Unlock()
w.folderMap = nil
for i := range w.cfg.Folders {
if w.cfg.Folders[i].ID == id {
w.cfg.Folders[i].Invalid = err
w.replaces <- w.cfg
return
}
}
}
// Save writes the configuration to disk, and generates a ConfigSaved event.
func (w *ConfigWrapper) Save() error {
fd, err := ioutil.TempFile(filepath.Dir(w.path), "cfg")
if err != nil {
return err
}
err = w.cfg.WriteXML(fd)
if err != nil {
fd.Close()
return err
}
err = fd.Close()
if err != nil {
return err
}
events.Default.Log(events.ConfigSaved, w.cfg)
return osutil.Rename(fd.Name(), w.path)
}
| internal/config/wrapper.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.012395945377647877,
0.0010546990670263767,
0.00016679379041306674,
0.00022173051547724754,
0.002440999960526824
] |
{
"id": 3,
"code_window": [
"\t}\n",
"\treturn\n",
"}\n",
"\n",
"// NeedFiles returns the list of currently needed files, stopping at maxFiles\n",
"// files or maxBlocks blocks. Limits <= 0 are ignored.\n",
"func (m *Model) NeedFolderFilesLimited(folder string, maxFiles, maxBlocks int) []protocol.FileInfo {\n",
"\tdefer m.leveldbPanicWorkaround()\n",
"\n",
"\tm.fmut.RLock()\n",
"\tdefer m.fmut.RUnlock()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// files. Limit <= 0 is ignored.\n",
"func (m *Model) NeedFolderFilesLimited(folder string, maxFiles int) []protocol.FileInfoTruncated {\n"
],
"file_path": "internal/model/model.go",
"type": "replace",
"edit_start_line_idx": 398
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package protocol
import (
"bytes"
"crypto/sha256"
"encoding/base32"
"errors"
"fmt"
"regexp"
"strings"
"github.com/syncthing/syncthing/internal/luhn"
)
type DeviceID [32]byte
var LocalDeviceID = DeviceID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
// NewDeviceID generates a new device ID from the raw bytes of a certificate
func NewDeviceID(rawCert []byte) DeviceID {
var n DeviceID
hf := sha256.New()
hf.Write(rawCert)
hf.Sum(n[:0])
return n
}
func DeviceIDFromString(s string) (DeviceID, error) {
var n DeviceID
err := n.UnmarshalText([]byte(s))
return n, err
}
func DeviceIDFromBytes(bs []byte) DeviceID {
var n DeviceID
if len(bs) != len(n) {
panic("incorrect length of byte slice representing device ID")
}
copy(n[:], bs)
return n
}
// String returns the canonical string representation of the device ID
func (n DeviceID) String() string {
id := base32.StdEncoding.EncodeToString(n[:])
id = strings.Trim(id, "=")
id, err := luhnify(id)
if err != nil {
// Should never happen
panic(err)
}
id = chunkify(id)
return id
}
func (n DeviceID) GoString() string {
return n.String()
}
func (n DeviceID) Compare(other DeviceID) int {
return bytes.Compare(n[:], other[:])
}
func (n DeviceID) Equals(other DeviceID) bool {
return bytes.Compare(n[:], other[:]) == 0
}
func (n *DeviceID) MarshalText() ([]byte, error) {
return []byte(n.String()), nil
}
func (n *DeviceID) UnmarshalText(bs []byte) error {
id := string(bs)
id = strings.Trim(id, "=")
id = strings.ToUpper(id)
id = untypeoify(id)
id = unchunkify(id)
var err error
switch len(id) {
case 56:
// New style, with check digits
id, err = unluhnify(id)
if err != nil {
return err
}
fallthrough
case 52:
// Old style, no check digits
dec, err := base32.StdEncoding.DecodeString(id + "====")
if err != nil {
return err
}
copy(n[:], dec)
return nil
default:
return errors.New("device ID invalid: incorrect length")
}
}
func luhnify(s string) (string, error) {
if len(s) != 52 {
panic("unsupported string length")
}
res := make([]string, 0, 4)
for i := 0; i < 4; i++ {
p := s[i*13 : (i+1)*13]
l, err := luhn.Base32.Generate(p)
if err != nil {
return "", err
}
res = append(res, fmt.Sprintf("%s%c", p, l))
}
return res[0] + res[1] + res[2] + res[3], nil
}
func unluhnify(s string) (string, error) {
if len(s) != 56 {
return "", fmt.Errorf("unsupported string length %d", len(s))
}
res := make([]string, 0, 4)
for i := 0; i < 4; i++ {
p := s[i*14 : (i+1)*14-1]
l, err := luhn.Base32.Generate(p)
if err != nil {
return "", err
}
if g := fmt.Sprintf("%s%c", p, l); g != s[i*14:(i+1)*14] {
return "", errors.New("check digit incorrect")
}
res = append(res, p)
}
return res[0] + res[1] + res[2] + res[3], nil
}
func chunkify(s string) string {
s = regexp.MustCompile("(.{7})").ReplaceAllString(s, "$1-")
s = strings.Trim(s, "-")
return s
}
func unchunkify(s string) string {
s = strings.Replace(s, "-", "", -1)
s = strings.Replace(s, " ", "", -1)
return s
}
func untypeoify(s string) string {
s = strings.Replace(s, "0", "O", -1)
s = strings.Replace(s, "1", "I", -1)
s = strings.Replace(s, "8", "B", -1)
return s
}
| internal/protocol/deviceid.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.00045464737922884524,
0.0002004569541895762,
0.00016650330508127809,
0.00017183664022013545,
0.00006792362546548247
] |
{
"id": 4,
"code_window": [
"\tdefer m.leveldbPanicWorkaround()\n",
"\n",
"\tm.fmut.RLock()\n",
"\tdefer m.fmut.RUnlock()\n",
"\tnblocks := 0\n",
"\tif rf, ok := m.folderFiles[folder]; ok {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "internal/model/model.go",
"type": "replace",
"edit_start_line_idx": 404
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package model
import (
"bufio"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/syncthing/syncthing/internal/config"
"github.com/syncthing/syncthing/internal/events"
"github.com/syncthing/syncthing/internal/files"
"github.com/syncthing/syncthing/internal/ignore"
"github.com/syncthing/syncthing/internal/lamport"
"github.com/syncthing/syncthing/internal/osutil"
"github.com/syncthing/syncthing/internal/protocol"
"github.com/syncthing/syncthing/internal/scanner"
"github.com/syncthing/syncthing/internal/stats"
"github.com/syncthing/syncthing/internal/symlinks"
"github.com/syncthing/syncthing/internal/versioner"
"github.com/syndtr/goleveldb/leveldb"
)
type folderState int
const (
FolderIdle folderState = iota
FolderScanning
FolderSyncing
FolderCleaning
)
func (s folderState) String() string {
switch s {
case FolderIdle:
return "idle"
case FolderScanning:
return "scanning"
case FolderCleaning:
return "cleaning"
case FolderSyncing:
return "syncing"
default:
return "unknown"
}
}
// How many files to send in each Index/IndexUpdate message.
const (
indexTargetSize = 250 * 1024 // Aim for making index messages no larger than 250 KiB (uncompressed)
indexPerFileSize = 250 // Each FileInfo is approximately this big, in bytes, excluding BlockInfos
IndexPerBlockSize = 40 // Each BlockInfo is approximately this big
indexBatchSize = 1000 // Either way, don't include more files than this
)
type service interface {
Serve()
Stop()
}
type Model struct {
cfg *config.ConfigWrapper
db *leveldb.DB
finder *files.BlockFinder
deviceName string
clientName string
clientVersion string
folderCfgs map[string]config.FolderConfiguration // folder -> cfg
folderFiles map[string]*files.Set // folder -> files
folderDevices map[string][]protocol.DeviceID // folder -> deviceIDs
deviceFolders map[protocol.DeviceID][]string // deviceID -> folders
deviceStatRefs map[protocol.DeviceID]*stats.DeviceStatisticsReference // deviceID -> statsRef
folderIgnores map[string]*ignore.Matcher // folder -> matcher object
folderRunners map[string]service // folder -> puller or scanner
fmut sync.RWMutex // protects the above
folderState map[string]folderState // folder -> state
folderStateChanged map[string]time.Time // folder -> time when state changed
smut sync.RWMutex
protoConn map[protocol.DeviceID]protocol.Connection
rawConn map[protocol.DeviceID]io.Closer
deviceVer map[protocol.DeviceID]string
pmut sync.RWMutex // protects protoConn and rawConn
addedFolder bool
started bool
}
var (
ErrNoSuchFile = errors.New("no such file")
ErrInvalid = errors.New("file is invalid")
SymlinkWarning = sync.Once{}
)
// NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests
// for file data without altering the local folder in any way.
func NewModel(cfg *config.ConfigWrapper, deviceName, clientName, clientVersion string, db *leveldb.DB) *Model {
m := &Model{
cfg: cfg,
db: db,
deviceName: deviceName,
clientName: clientName,
clientVersion: clientVersion,
folderCfgs: make(map[string]config.FolderConfiguration),
folderFiles: make(map[string]*files.Set),
folderDevices: make(map[string][]protocol.DeviceID),
deviceFolders: make(map[protocol.DeviceID][]string),
deviceStatRefs: make(map[protocol.DeviceID]*stats.DeviceStatisticsReference),
folderIgnores: make(map[string]*ignore.Matcher),
folderRunners: make(map[string]service),
folderState: make(map[string]folderState),
folderStateChanged: make(map[string]time.Time),
protoConn: make(map[protocol.DeviceID]protocol.Connection),
rawConn: make(map[protocol.DeviceID]io.Closer),
deviceVer: make(map[protocol.DeviceID]string),
finder: files.NewBlockFinder(db, cfg),
}
var timeout = 20 * 60 // seconds
if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
it, err := strconv.Atoi(t)
if err == nil {
timeout = it
}
}
deadlockDetect(&m.fmut, time.Duration(timeout)*time.Second)
deadlockDetect(&m.smut, time.Duration(timeout)*time.Second)
deadlockDetect(&m.pmut, time.Duration(timeout)*time.Second)
return m
}
// StartRW starts read/write processing on the current model. When in
// read/write mode the model will attempt to keep in sync with the cluster by
// pulling needed files from peer devices.
func (m *Model) StartFolderRW(folder string) {
m.fmut.Lock()
cfg, ok := m.folderCfgs[folder]
if !ok {
panic("cannot start nonexistent folder " + folder)
}
_, ok = m.folderRunners[folder]
if ok {
panic("cannot start already running folder " + folder)
}
p := &Puller{
folder: folder,
dir: cfg.Path,
scanIntv: time.Duration(cfg.RescanIntervalS) * time.Second,
model: m,
ignorePerms: cfg.IgnorePerms,
lenientMtimes: cfg.LenientMtimes,
}
m.folderRunners[folder] = p
m.fmut.Unlock()
if len(cfg.Versioning.Type) > 0 {
factory, ok := versioner.Factories[cfg.Versioning.Type]
if !ok {
l.Fatalf("Requested versioning type %q that does not exist", cfg.Versioning.Type)
}
p.versioner = factory(folder, cfg.Path, cfg.Versioning.Params)
}
if cfg.LenientMtimes {
l.Infof("Folder %q is running with LenientMtimes workaround. Syncing may not work properly.", folder)
}
go p.Serve()
}
// StartRO starts read only processing on the current model. When in
// read only mode the model will announce files to the cluster but not
// pull in any external changes.
func (m *Model) StartFolderRO(folder string) {
m.fmut.Lock()
cfg, ok := m.folderCfgs[folder]
if !ok {
panic("cannot start nonexistent folder " + folder)
}
_, ok = m.folderRunners[folder]
if ok {
panic("cannot start already running folder " + folder)
}
s := &Scanner{
folder: folder,
intv: time.Duration(cfg.RescanIntervalS) * time.Second,
model: m,
}
m.folderRunners[folder] = s
m.fmut.Unlock()
go s.Serve()
}
type ConnectionInfo struct {
protocol.Statistics
Address string
ClientVersion string
}
// ConnectionStats returns a map with connection statistics for each connected device.
func (m *Model) ConnectionStats() map[string]ConnectionInfo {
type remoteAddrer interface {
RemoteAddr() net.Addr
}
m.pmut.RLock()
m.fmut.RLock()
var res = make(map[string]ConnectionInfo)
for device, conn := range m.protoConn {
ci := ConnectionInfo{
Statistics: conn.Statistics(),
ClientVersion: m.deviceVer[device],
}
if nc, ok := m.rawConn[device].(remoteAddrer); ok {
ci.Address = nc.RemoteAddr().String()
}
res[device.String()] = ci
}
m.fmut.RUnlock()
m.pmut.RUnlock()
in, out := protocol.TotalInOut()
res["total"] = ConnectionInfo{
Statistics: protocol.Statistics{
At: time.Now(),
InBytesTotal: in,
OutBytesTotal: out,
},
}
return res
}
// Returns statistics about each device
func (m *Model) DeviceStatistics() map[string]stats.DeviceStatistics {
var res = make(map[string]stats.DeviceStatistics)
for id := range m.cfg.Devices() {
res[id.String()] = m.deviceStatRef(id).GetStatistics()
}
return res
}
// Returns the completion status, in percent, for the given device and folder.
func (m *Model) Completion(device protocol.DeviceID, folder string) float64 {
defer m.leveldbPanicWorkaround()
var tot int64
m.fmut.RLock()
rf, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if !ok {
return 0 // Folder doesn't exist, so we hardly have any of it
}
rf.WithGlobalTruncated(func(f protocol.FileIntf) bool {
if !f.IsDeleted() {
tot += f.Size()
}
return true
})
if tot == 0 {
return 100 // Folder is empty, so we have all of it
}
var need int64
rf.WithNeedTruncated(device, func(f protocol.FileIntf) bool {
if !f.IsDeleted() {
need += f.Size()
}
return true
})
res := 100 * (1 - float64(need)/float64(tot))
if debug {
l.Debugf("%v Completion(%s, %q): %f (%d / %d)", m, device, folder, res, need, tot)
}
return res
}
func sizeOf(fs []protocol.FileInfo) (files, deleted int, bytes int64) {
for _, f := range fs {
fs, de, by := sizeOfFile(f)
files += fs
deleted += de
bytes += by
}
return
}
func sizeOfFile(f protocol.FileIntf) (files, deleted int, bytes int64) {
if !f.IsDeleted() {
files++
} else {
deleted++
}
bytes += f.Size()
return
}
// GlobalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
func (m *Model) GlobalSize(folder string) (files, deleted int, bytes int64) {
defer m.leveldbPanicWorkaround()
m.fmut.RLock()
defer m.fmut.RUnlock()
if rf, ok := m.folderFiles[folder]; ok {
rf.WithGlobalTruncated(func(f protocol.FileIntf) bool {
fs, de, by := sizeOfFile(f)
files += fs
deleted += de
bytes += by
return true
})
}
return
}
// LocalSize returns the number of files, deleted files and total bytes for all
// files in the local folder.
func (m *Model) LocalSize(folder string) (files, deleted int, bytes int64) {
defer m.leveldbPanicWorkaround()
m.fmut.RLock()
defer m.fmut.RUnlock()
if rf, ok := m.folderFiles[folder]; ok {
rf.WithHaveTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
if f.IsInvalid() {
return true
}
fs, de, by := sizeOfFile(f)
files += fs
deleted += de
bytes += by
return true
})
}
return
}
// NeedSize returns the number and total size of currently needed files.
func (m *Model) NeedSize(folder string) (files int, bytes int64) {
defer m.leveldbPanicWorkaround()
m.fmut.RLock()
defer m.fmut.RUnlock()
if rf, ok := m.folderFiles[folder]; ok {
rf.WithNeedTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
fs, de, by := sizeOfFile(f)
files += fs + de
bytes += by
return true
})
}
if debug {
l.Debugf("%v NeedSize(%q): %d %d", m, folder, files, bytes)
}
return
}
// NeedFiles returns the list of currently needed files, stopping at maxFiles
// files or maxBlocks blocks. Limits <= 0 are ignored.
func (m *Model) NeedFolderFilesLimited(folder string, maxFiles, maxBlocks int) []protocol.FileInfo {
defer m.leveldbPanicWorkaround()
m.fmut.RLock()
defer m.fmut.RUnlock()
nblocks := 0
if rf, ok := m.folderFiles[folder]; ok {
fs := make([]protocol.FileInfo, 0, maxFiles)
rf.WithNeed(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
fi := f.(protocol.FileInfo)
fs = append(fs, fi)
nblocks += len(fi.Blocks)
return (maxFiles <= 0 || len(fs) < maxFiles) && (maxBlocks <= 0 || nblocks < maxBlocks)
})
return fs
}
return nil
}
// Index is called when a new device is connected and we receive their full index.
// Implements the protocol.Model interface.
func (m *Model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.FileInfo) {
if debug {
l.Debugf("IDX(in): %s %q: %d files", deviceID, folder, len(fs))
}
if !m.folderSharedWith(folder, deviceID) {
events.Default.Log(events.FolderRejected, map[string]string{
"folder": folder,
"device": deviceID.String(),
})
l.Warnf("Unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration.", folder, deviceID)
return
}
m.fmut.RLock()
files, ok := m.folderFiles[folder]
ignores, _ := m.folderIgnores[folder]
m.fmut.RUnlock()
if !ok {
l.Fatalf("Index for nonexistant folder %q", folder)
}
for i := 0; i < len(fs); {
lamport.Default.Tick(fs[i].Version)
if (ignores != nil && ignores.Match(fs[i].Name)) || symlinkInvalid(fs[i].IsSymlink()) {
if debug {
l.Debugln("dropping update for ignored/unsupported symlink", fs[i])
}
fs[i] = fs[len(fs)-1]
fs = fs[:len(fs)-1]
} else {
i++
}
}
files.Replace(deviceID, fs)
events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
"device": deviceID.String(),
"folder": folder,
"items": len(fs),
"version": files.LocalVersion(deviceID),
})
}
// IndexUpdate is called for incremental updates to connected devices' indexes.
// Implements the protocol.Model interface.
func (m *Model) IndexUpdate(deviceID protocol.DeviceID, folder string, fs []protocol.FileInfo) {
if debug {
l.Debugf("%v IDXUP(in): %s / %q: %d files", m, deviceID, folder, len(fs))
}
if !m.folderSharedWith(folder, deviceID) {
l.Infof("Update for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration.", folder, deviceID)
return
}
m.fmut.RLock()
files, ok := m.folderFiles[folder]
ignores, _ := m.folderIgnores[folder]
m.fmut.RUnlock()
if !ok {
l.Fatalf("IndexUpdate for nonexistant folder %q", folder)
}
for i := 0; i < len(fs); {
lamport.Default.Tick(fs[i].Version)
if (ignores != nil && ignores.Match(fs[i].Name)) || symlinkInvalid(fs[i].IsSymlink()) {
if debug {
l.Debugln("dropping update for ignored/unsupported symlink", fs[i])
}
fs[i] = fs[len(fs)-1]
fs = fs[:len(fs)-1]
} else {
i++
}
}
files.Update(deviceID, fs)
events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
"device": deviceID.String(),
"folder": folder,
"items": len(fs),
"version": files.LocalVersion(deviceID),
})
}
func (m *Model) folderSharedWith(folder string, deviceID protocol.DeviceID) bool {
m.fmut.RLock()
defer m.fmut.RUnlock()
for _, nfolder := range m.deviceFolders[deviceID] {
if nfolder == folder {
return true
}
}
return false
}
func (m *Model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterConfigMessage) {
m.pmut.Lock()
if cm.ClientName == "syncthing" {
m.deviceVer[deviceID] = cm.ClientVersion
} else {
m.deviceVer[deviceID] = cm.ClientName + " " + cm.ClientVersion
}
m.pmut.Unlock()
l.Infof(`Device %s client is "%s %s"`, deviceID, cm.ClientName, cm.ClientVersion)
var changed bool
if name := cm.GetOption("name"); name != "" {
l.Infof("Device %s name is %q", deviceID, name)
device, ok := m.cfg.Devices()[deviceID]
if ok && device.Name == "" {
device.Name = name
m.cfg.SetDevice(device)
changed = true
}
}
if m.cfg.Devices()[deviceID].Introducer {
// This device is an introducer. Go through the announced lists of folders
// and devices and add what we are missing.
for _, folder := range cm.Folders {
// If we don't have this folder yet, skip it. Ideally, we'd
// offer up something in the GUI to create the folder, but for the
// moment we only handle folders that we already have.
if _, ok := m.folderDevices[folder.ID]; !ok {
continue
}
nextDevice:
for _, device := range folder.Devices {
var id protocol.DeviceID
copy(id[:], device.ID)
if _, ok := m.cfg.Devices()[id]; !ok {
// The device is currently unknown. Add it to the config.
l.Infof("Adding device %v to config (vouched for by introducer %v)", id, deviceID)
newDeviceCfg := config.DeviceConfiguration{
DeviceID: id,
Compression: true,
Addresses: []string{"dynamic"},
}
// The introducers' introducers are also our introducers.
if device.Flags&protocol.FlagIntroducer != 0 {
l.Infof("Device %v is now also an introducer", id)
newDeviceCfg.Introducer = true
}
m.cfg.SetDevice(newDeviceCfg)
changed = true
}
for _, er := range m.deviceFolders[id] {
if er == folder.ID {
// We already share the folder with this device, so
// nothing to do.
continue nextDevice
}
}
// We don't yet share this folder with this device. Add the device
// to sharing list of the folder.
l.Infof("Adding device %v to share %q (vouched for by introducer %v)", id, folder.ID, deviceID)
m.deviceFolders[id] = append(m.deviceFolders[id], folder.ID)
m.folderDevices[folder.ID] = append(m.folderDevices[folder.ID], id)
folderCfg := m.cfg.Folders()[folder.ID]
folderCfg.Devices = append(folderCfg.Devices, config.FolderDeviceConfiguration{
DeviceID: id,
})
m.cfg.SetFolder(folderCfg)
changed = true
}
}
}
if changed {
m.cfg.Save()
}
}
// Close removes the peer from the model and closes the underlying connection if possible.
// Implements the protocol.Model interface.
func (m *Model) Close(device protocol.DeviceID, err error) {
l.Infof("Connection to %s closed: %v", device, err)
events.Default.Log(events.DeviceDisconnected, map[string]string{
"id": device.String(),
"error": err.Error(),
})
m.pmut.Lock()
m.fmut.RLock()
for _, folder := range m.deviceFolders[device] {
m.folderFiles[folder].Replace(device, nil)
}
m.fmut.RUnlock()
conn, ok := m.rawConn[device]
if ok {
if conn, ok := conn.(*tls.Conn); ok {
// If the underlying connection is a *tls.Conn, Close() does more
// than it says on the tin. Specifically, it sends a TLS alert
// message, which might block forever if the connection is dead
// and we don't have a deadline site.
conn.SetWriteDeadline(time.Now().Add(250 * time.Millisecond))
}
conn.Close()
}
delete(m.protoConn, device)
delete(m.rawConn, device)
delete(m.deviceVer, device)
m.pmut.Unlock()
}
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
func (m *Model) Request(deviceID protocol.DeviceID, folder, name string, offset int64, size int) ([]byte, error) {
// Verify that the requested file exists in the local model.
m.fmut.RLock()
r, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if !ok {
l.Warnf("Request from %s for file %s in nonexistent folder %q", deviceID, name, folder)
return nil, ErrNoSuchFile
}
lf := r.Get(protocol.LocalDeviceID, name)
if lf.IsInvalid() || lf.IsDeleted() {
if debug {
l.Debugf("%v REQ(in): %s: %q / %q o=%d s=%d; invalid: %v", m, deviceID, folder, name, offset, size, lf)
}
return nil, ErrInvalid
}
if offset > lf.Size() {
if debug {
l.Debugf("%v REQ(in; nonexistent): %s: %q o=%d s=%d", m, deviceID, name, offset, size)
}
return nil, ErrNoSuchFile
}
if debug && deviceID != protocol.LocalDeviceID {
l.Debugf("%v REQ(in): %s: %q / %q o=%d s=%d", m, deviceID, folder, name, offset, size)
}
m.fmut.RLock()
fn := filepath.Join(m.folderCfgs[folder].Path, name)
m.fmut.RUnlock()
var reader io.ReaderAt
var err error
if lf.IsSymlink() {
target, _, err := symlinks.Read(fn)
if err != nil {
return nil, err
}
reader = strings.NewReader(target)
} else {
reader, err = os.Open(fn) // XXX: Inefficient, should cache fd?
if err != nil {
return nil, err
}
defer reader.(*os.File).Close()
}
buf := make([]byte, size)
_, err = reader.ReadAt(buf, offset)
if err != nil {
return nil, err
}
return buf, nil
}
// ReplaceLocal replaces the local folder index with the given list of files.
func (m *Model) ReplaceLocal(folder string, fs []protocol.FileInfo) {
m.fmut.RLock()
m.folderFiles[folder].ReplaceWithDelete(protocol.LocalDeviceID, fs)
m.fmut.RUnlock()
}
func (m *Model) CurrentFolderFile(folder string, file string) protocol.FileInfo {
m.fmut.RLock()
f := m.folderFiles[folder].Get(protocol.LocalDeviceID, file)
m.fmut.RUnlock()
return f
}
func (m *Model) CurrentGlobalFile(folder string, file string) protocol.FileInfo {
m.fmut.RLock()
f := m.folderFiles[folder].GetGlobal(file)
m.fmut.RUnlock()
return f
}
type cFiler struct {
m *Model
r string
}
// Implements scanner.CurrentFiler
func (cf cFiler) CurrentFile(file string) protocol.FileInfo {
return cf.m.CurrentFolderFile(cf.r, file)
}
// ConnectedTo returns true if we are connected to the named device.
func (m *Model) ConnectedTo(deviceID protocol.DeviceID) bool {
m.pmut.RLock()
_, ok := m.protoConn[deviceID]
m.pmut.RUnlock()
if ok {
m.deviceWasSeen(deviceID)
}
return ok
}
func (m *Model) GetIgnores(folder string) ([]string, []string, error) {
var lines []string
m.fmut.RLock()
cfg, ok := m.folderCfgs[folder]
m.fmut.RUnlock()
if !ok {
return lines, nil, fmt.Errorf("Folder %s does not exist", folder)
}
fd, err := os.Open(filepath.Join(cfg.Path, ".stignore"))
if err != nil {
if os.IsNotExist(err) {
return lines, nil, nil
}
l.Warnln("Loading .stignore:", err)
return lines, nil, err
}
defer fd.Close()
scanner := bufio.NewScanner(fd)
for scanner.Scan() {
lines = append(lines, strings.TrimSpace(scanner.Text()))
}
var patterns []string
if matcher := m.folderIgnores[folder]; matcher != nil {
patterns = matcher.Patterns()
}
return lines, patterns, nil
}
func (m *Model) SetIgnores(folder string, content []string) error {
cfg, ok := m.folderCfgs[folder]
if !ok {
return fmt.Errorf("Folder %s does not exist", folder)
}
fd, err := ioutil.TempFile(cfg.Path, ".syncthing.stignore-"+folder)
if err != nil {
l.Warnln("Saving .stignore:", err)
return err
}
defer os.Remove(fd.Name())
for _, line := range content {
_, err = fmt.Fprintln(fd, line)
if err != nil {
l.Warnln("Saving .stignore:", err)
return err
}
}
err = fd.Close()
if err != nil {
l.Warnln("Saving .stignore:", err)
return err
}
file := filepath.Join(cfg.Path, ".stignore")
err = osutil.Rename(fd.Name(), file)
if err != nil {
l.Warnln("Saving .stignore:", err)
return err
}
return m.ScanFolder(folder)
}
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
// folder changes.
func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection) {
deviceID := protoConn.ID()
m.pmut.Lock()
if _, ok := m.protoConn[deviceID]; ok {
panic("add existing device")
}
m.protoConn[deviceID] = protoConn
if _, ok := m.rawConn[deviceID]; ok {
panic("add existing device")
}
m.rawConn[deviceID] = rawConn
cm := m.clusterConfig(deviceID)
protoConn.ClusterConfig(cm)
m.fmut.RLock()
for _, folder := range m.deviceFolders[deviceID] {
fs := m.folderFiles[folder]
go sendIndexes(protoConn, folder, fs, m.folderIgnores[folder])
}
m.fmut.RUnlock()
m.pmut.Unlock()
m.deviceWasSeen(deviceID)
}
func (m *Model) deviceStatRef(deviceID protocol.DeviceID) *stats.DeviceStatisticsReference {
m.fmut.Lock()
defer m.fmut.Unlock()
if sr, ok := m.deviceStatRefs[deviceID]; ok {
return sr
} else {
sr = stats.NewDeviceStatisticsReference(m.db, deviceID)
m.deviceStatRefs[deviceID] = sr
return sr
}
}
func (m *Model) deviceWasSeen(deviceID protocol.DeviceID) {
m.deviceStatRef(deviceID).WasSeen()
}
func sendIndexes(conn protocol.Connection, folder string, fs *files.Set, ignores *ignore.Matcher) {
deviceID := conn.ID()
name := conn.Name()
var err error
if debug {
l.Debugf("sendIndexes for %s-%s/%q starting", deviceID, name, folder)
}
minLocalVer, err := sendIndexTo(true, 0, conn, folder, fs, ignores)
for err == nil {
time.Sleep(5 * time.Second)
if fs.LocalVersion(protocol.LocalDeviceID) <= minLocalVer {
continue
}
minLocalVer, err = sendIndexTo(false, minLocalVer, conn, folder, fs, ignores)
}
if debug {
l.Debugf("sendIndexes for %s-%s/%q exiting: %v", deviceID, name, folder, err)
}
}
func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, folder string, fs *files.Set, ignores *ignore.Matcher) (uint64, error) {
deviceID := conn.ID()
name := conn.Name()
batch := make([]protocol.FileInfo, 0, indexBatchSize)
currentBatchSize := 0
maxLocalVer := uint64(0)
var err error
fs.WithHave(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
if f.LocalVersion <= minLocalVer {
return true
}
if f.LocalVersion > maxLocalVer {
maxLocalVer = f.LocalVersion
}
if (ignores != nil && ignores.Match(f.Name)) || symlinkInvalid(f.IsSymlink()) {
if debug {
l.Debugln("not sending update for ignored/unsupported symlink", f)
}
return true
}
if len(batch) == indexBatchSize || currentBatchSize > indexTargetSize {
if initial {
if err = conn.Index(folder, batch); err != nil {
return false
}
if debug {
l.Debugf("sendIndexes for %s-%s/%q: %d files (<%d bytes) (initial index)", deviceID, name, folder, len(batch), currentBatchSize)
}
initial = false
} else {
if err = conn.IndexUpdate(folder, batch); err != nil {
return false
}
if debug {
l.Debugf("sendIndexes for %s-%s/%q: %d files (<%d bytes) (batched update)", deviceID, name, folder, len(batch), currentBatchSize)
}
}
batch = make([]protocol.FileInfo, 0, indexBatchSize)
currentBatchSize = 0
}
batch = append(batch, f)
currentBatchSize += indexPerFileSize + len(f.Blocks)*IndexPerBlockSize
return true
})
if initial && err == nil {
err = conn.Index(folder, batch)
if debug && err == nil {
l.Debugf("sendIndexes for %s-%s/%q: %d files (small initial index)", deviceID, name, folder, len(batch))
}
} else if len(batch) > 0 && err == nil {
err = conn.IndexUpdate(folder, batch)
if debug && err == nil {
l.Debugf("sendIndexes for %s-%s/%q: %d files (last batch)", deviceID, name, folder, len(batch))
}
}
return maxLocalVer, err
}
func (m *Model) updateLocal(folder string, f protocol.FileInfo) {
f.LocalVersion = 0
m.fmut.RLock()
m.folderFiles[folder].Update(protocol.LocalDeviceID, []protocol.FileInfo{f})
m.fmut.RUnlock()
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
"folder": folder,
"name": f.Name,
"modified": time.Unix(f.Modified, 0),
"flags": fmt.Sprintf("0%o", f.Flags),
"size": f.Size(),
})
}
func (m *Model) requestGlobal(deviceID protocol.DeviceID, folder, name string, offset int64, size int, hash []byte) ([]byte, error) {
m.pmut.RLock()
nc, ok := m.protoConn[deviceID]
m.pmut.RUnlock()
if !ok {
return nil, fmt.Errorf("requestGlobal: no such device: %s", deviceID)
}
if debug {
l.Debugf("%v REQ(out): %s: %q / %q o=%d s=%d h=%x", m, deviceID, folder, name, offset, size, hash)
}
return nc.Request(folder, name, offset, size)
}
func (m *Model) AddFolder(cfg config.FolderConfiguration) {
if m.started {
panic("cannot add folder to started model")
}
if len(cfg.ID) == 0 {
panic("cannot add empty folder id")
}
m.fmut.Lock()
m.folderCfgs[cfg.ID] = cfg
m.folderFiles[cfg.ID] = files.NewSet(cfg.ID, m.db)
m.folderDevices[cfg.ID] = make([]protocol.DeviceID, len(cfg.Devices))
for i, device := range cfg.Devices {
m.folderDevices[cfg.ID][i] = device.DeviceID
m.deviceFolders[device.DeviceID] = append(m.deviceFolders[device.DeviceID], cfg.ID)
}
m.addedFolder = true
m.fmut.Unlock()
}
func (m *Model) ScanFolders() {
m.fmut.RLock()
var folders = make([]string, 0, len(m.folderCfgs))
for folder := range m.folderCfgs {
folders = append(folders, folder)
}
m.fmut.RUnlock()
var wg sync.WaitGroup
wg.Add(len(folders))
for _, folder := range folders {
folder := folder
go func() {
err := m.ScanFolder(folder)
if err != nil {
m.cfg.InvalidateFolder(folder, err.Error())
}
wg.Done()
}()
}
wg.Wait()
}
func (m *Model) ScanFolder(folder string) error {
return m.ScanFolderSub(folder, "")
}
func (m *Model) ScanFolderSub(folder, sub string) error {
if p := filepath.Clean(filepath.Join(folder, sub)); !strings.HasPrefix(p, folder) {
return errors.New("invalid subpath")
}
m.fmut.RLock()
fs, ok := m.folderFiles[folder]
dir := m.folderCfgs[folder].Path
ignores, _ := ignore.Load(filepath.Join(dir, ".stignore"), m.cfg.Options().CacheIgnoredFiles)
m.folderIgnores[folder] = ignores
w := &scanner.Walker{
Dir: dir,
Sub: sub,
Matcher: ignores,
BlockSize: protocol.BlockSize,
TempNamer: defTempNamer,
CurrentFiler: cFiler{m, folder},
IgnorePerms: m.folderCfgs[folder].IgnorePerms,
}
m.fmut.RUnlock()
if !ok {
return errors.New("no such folder")
}
m.setState(folder, FolderScanning)
fchan, err := w.Walk()
if err != nil {
return err
}
batchSize := 100
batch := make([]protocol.FileInfo, 0, batchSize)
for f := range fchan {
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
"folder": folder,
"name": f.Name,
"modified": time.Unix(f.Modified, 0),
"flags": fmt.Sprintf("0%o", f.Flags),
"size": f.Size(),
})
if len(batch) == batchSize {
fs.Update(protocol.LocalDeviceID, batch)
batch = batch[:0]
}
batch = append(batch, f)
}
if len(batch) > 0 {
fs.Update(protocol.LocalDeviceID, batch)
}
batch = batch[:0]
// TODO: We should limit the Have scanning to start at sub
seenPrefix := false
fs.WithHaveTruncated(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfoTruncated)
if !strings.HasPrefix(f.Name, sub) {
// Return true so that we keep iterating, until we get to the part
// of the tree we are interested in. Then return false so we stop
// iterating when we've passed the end of the subtree.
return !seenPrefix
}
seenPrefix = true
if !f.IsDeleted() {
if f.IsInvalid() {
return true
}
if len(batch) == batchSize {
fs.Update(protocol.LocalDeviceID, batch)
batch = batch[:0]
}
if (ignores != nil && ignores.Match(f.Name)) || symlinkInvalid(f.IsSymlink()) {
// File has been ignored or an unsupported symlink. Set invalid bit.
l.Debugln("setting invalid bit on ignored", f)
nf := protocol.FileInfo{
Name: f.Name,
Flags: f.Flags | protocol.FlagInvalid,
Modified: f.Modified,
Version: f.Version, // The file is still the same, so don't bump version
}
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
"folder": folder,
"name": f.Name,
"modified": time.Unix(f.Modified, 0),
"flags": fmt.Sprintf("0%o", f.Flags),
"size": f.Size(),
})
batch = append(batch, nf)
} else if _, err := os.Lstat(filepath.Join(dir, f.Name)); err != nil && os.IsNotExist(err) {
// File has been deleted
nf := protocol.FileInfo{
Name: f.Name,
Flags: f.Flags | protocol.FlagDeleted,
Modified: f.Modified,
Version: lamport.Default.Tick(f.Version),
}
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
"folder": folder,
"name": f.Name,
"modified": time.Unix(f.Modified, 0),
"flags": fmt.Sprintf("0%o", f.Flags),
"size": f.Size(),
})
batch = append(batch, nf)
}
}
return true
})
if len(batch) > 0 {
fs.Update(protocol.LocalDeviceID, batch)
}
m.setState(folder, FolderIdle)
return nil
}
// clusterConfig returns a ClusterConfigMessage that is correct for the given peer device
func (m *Model) clusterConfig(device protocol.DeviceID) protocol.ClusterConfigMessage {
cm := protocol.ClusterConfigMessage{
ClientName: m.clientName,
ClientVersion: m.clientVersion,
Options: []protocol.Option{
{
Key: "name",
Value: m.deviceName,
},
},
}
m.fmut.RLock()
for _, folder := range m.deviceFolders[device] {
cr := protocol.Folder{
ID: folder,
}
for _, device := range m.folderDevices[folder] {
// DeviceID is a value type, but with an underlying array. Copy it
// so we don't grab aliases to the same array later on in device[:]
device := device
// TODO: Set read only bit when relevant
cn := protocol.Device{
ID: device[:],
Flags: protocol.FlagShareTrusted,
}
if deviceCfg := m.cfg.Devices()[device]; deviceCfg.Introducer {
cn.Flags |= protocol.FlagIntroducer
}
cr.Devices = append(cr.Devices, cn)
}
cm.Folders = append(cm.Folders, cr)
}
m.fmut.RUnlock()
return cm
}
func (m *Model) setState(folder string, state folderState) {
m.smut.Lock()
oldState := m.folderState[folder]
changed, ok := m.folderStateChanged[folder]
if state != oldState {
m.folderState[folder] = state
m.folderStateChanged[folder] = time.Now()
eventData := map[string]interface{}{
"folder": folder,
"to": state.String(),
}
if ok {
eventData["duration"] = time.Since(changed).Seconds()
eventData["from"] = oldState.String()
}
events.Default.Log(events.StateChanged, eventData)
}
m.smut.Unlock()
}
func (m *Model) State(folder string) (string, time.Time) {
m.smut.RLock()
state := m.folderState[folder]
changed := m.folderStateChanged[folder]
m.smut.RUnlock()
return state.String(), changed
}
func (m *Model) Override(folder string) {
m.fmut.RLock()
fs := m.folderFiles[folder]
m.fmut.RUnlock()
m.setState(folder, FolderScanning)
batch := make([]protocol.FileInfo, 0, indexBatchSize)
fs.WithNeed(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
need := fi.(protocol.FileInfo)
if len(batch) == indexBatchSize {
fs.Update(protocol.LocalDeviceID, batch)
batch = batch[:0]
}
have := fs.Get(protocol.LocalDeviceID, need.Name)
if have.Name != need.Name {
// We are missing the file
need.Flags |= protocol.FlagDeleted
need.Blocks = nil
} else {
// We have the file, replace with our version
need = have
}
need.Version = lamport.Default.Tick(need.Version)
need.LocalVersion = 0
batch = append(batch, need)
return true
})
if len(batch) > 0 {
fs.Update(protocol.LocalDeviceID, batch)
}
m.setState(folder, FolderIdle)
}
// CurrentLocalVersion returns the change version for the given folder.
// This is guaranteed to increment if the contents of the local folder has
// changed.
func (m *Model) CurrentLocalVersion(folder string) uint64 {
m.fmut.RLock()
fs, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if !ok {
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
return 0
}
return fs.LocalVersion(protocol.LocalDeviceID)
}
// RemoteLocalVersion returns the change version for the given folder, as
// sent by remote peers. This is guaranteed to increment if the contents of
// the remote or global folder has changed.
func (m *Model) RemoteLocalVersion(folder string) uint64 {
m.fmut.RLock()
defer m.fmut.RUnlock()
fs, ok := m.folderFiles[folder]
if !ok {
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
return 0
}
var ver uint64
for _, n := range m.folderDevices[folder] {
ver += fs.LocalVersion(n)
}
return ver
}
func (m *Model) availability(folder string, file string) []protocol.DeviceID {
// Acquire this lock first, as the value returned from foldersFiles can
// gen heavily modified on Close()
m.pmut.RLock()
defer m.pmut.RUnlock()
m.fmut.RLock()
fs, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if !ok {
return nil
}
availableDevices := []protocol.DeviceID{}
for _, device := range fs.Availability(file) {
_, ok := m.protoConn[device]
if ok {
availableDevices = append(availableDevices, device)
}
}
return availableDevices
}
func (m *Model) String() string {
return fmt.Sprintf("model@%p", m)
}
func (m *Model) leveldbPanicWorkaround() {
// When an inconsistency is detected in leveldb we panic(). This is
// appropriate because it should never happen, but currently it does for
// some reason. However it only seems to trigger in the asynchronous full-
// database scans that happen due to REST and usage-reporting calls. In
// those places we defer to this workaround to catch the panic instead of
// taking down syncthing.
// This is just a band-aid and should be removed as soon as we have found
// a real root cause.
if pnc := recover(); pnc != nil {
if err, ok := pnc.(error); ok && strings.Contains(err.Error(), "leveldb") {
l.Infoln("recovered:", err)
} else {
// Any non-leveldb error is genuine and should continue panicing.
panic(err)
}
}
}
func symlinkInvalid(isLink bool) bool {
if !symlinks.Supported && isLink {
SymlinkWarning.Do(func() {
l.Warnln("Symlinks are unsupported as they require Administrator priviledges. This might cause your folder to appear out of sync.")
})
return true
}
return false
}
| internal/model/model.go | 1 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.997638463973999,
0.11467102915048599,
0.00016212323680520058,
0.0005421487148851156,
0.30326390266418457
] |
{
"id": 4,
"code_window": [
"\tdefer m.leveldbPanicWorkaround()\n",
"\n",
"\tm.fmut.RLock()\n",
"\tdefer m.fmut.RUnlock()\n",
"\tnblocks := 0\n",
"\tif rf, ok := m.folderFiles[folder]; ok {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "internal/model/model.go",
"type": "replace",
"edit_start_line_idx": 404
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package protocol
import (
"os"
"strings"
"github.com/calmh/logger"
)
var (
debug = strings.Contains(os.Getenv("STTRACE"), "protocol") || os.Getenv("STTRACE") == "all"
l = logger.DefaultLogger
)
| internal/protocol/debug.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.00017848220886662602,
0.00017295505676884204,
0.00016447596135549247,
0.00017590702918823808,
0.000006087107067287434
] |
{
"id": 4,
"code_window": [
"\tdefer m.leveldbPanicWorkaround()\n",
"\n",
"\tm.fmut.RLock()\n",
"\tdefer m.fmut.RUnlock()\n",
"\tnblocks := 0\n",
"\tif rf, ok := m.folderFiles[folder]; ok {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "internal/model/model.go",
"type": "replace",
"edit_start_line_idx": 404
} | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"bytes"
"flag"
"fmt"
"io"
"log"
"strings"
"testing"
"unicode/utf8"
)
var (
testn = flag.Int("testn", -1, "specific test number to run or -1 for all")
)
// pc replaces any rune r that is repeated n times, for n > 1, with r{n}.
func pc(s string) []byte {
b := bytes.NewBuffer(make([]byte, 0, len(s)))
for i := 0; i < len(s); {
r, sz := utf8.DecodeRuneInString(s[i:])
n := 0
if sz == 1 {
// Special-case one-byte case to handle repetition for invalid UTF-8.
for c := s[i]; i+n < len(s) && s[i+n] == c; n++ {
}
} else {
for _, r2 := range s[i:] {
if r2 != r {
break
}
n++
}
}
b.WriteString(s[i : i+sz])
if n > 1 {
fmt.Fprintf(b, "{%d}", n)
}
i += sz * n
}
return b.Bytes()
}
// pidx finds the index from which two strings start to differ, plus context.
// It returns the index and ellipsis if the index is greater than 0.
func pidx(a, b string) (i int, prefix string) {
for ; i < len(a) && i < len(b) && a[i] == b[i]; i++ {
}
if i < 8 {
return 0, ""
}
i -= 3 // ensure taking at least one full rune before the difference.
for k := i - 7; i > k && !utf8.RuneStart(a[i]); i-- {
}
return i, "..."
}
type PositionTest struct {
input string
pos int
buffer string // expected contents of reorderBuffer, if applicable
}
type positionFunc func(rb *reorderBuffer, s string) (int, []byte)
func runPosTests(t *testing.T, name string, f Form, fn positionFunc, tests []PositionTest) {
rb := reorderBuffer{}
rb.init(f, nil)
for i, test := range tests {
rb.reset()
rb.src = inputString(test.input)
rb.nsrc = len(test.input)
pos, out := fn(&rb, test.input)
if pos != test.pos {
t.Errorf("%s:%d: position is %d; want %d", name, i, pos, test.pos)
}
if outs := string(out); outs != test.buffer {
k, pfx := pidx(outs, test.buffer)
t.Errorf("%s:%d: buffer \nwas %s%+q; \nwant %s%+q", name, i, pfx, pc(outs[k:]), pfx, pc(test.buffer[k:]))
}
}
}
func grave(n int) string {
return rep(0x0300, n)
}
func rep(r rune, n int) string {
return strings.Repeat(string(r), n)
}
const segSize = maxByteBufferSize
var cgj = GraphemeJoiner
var decomposeSegmentTests = []PositionTest{
// illegal runes
{"\xC0", 0, ""},
{"\u00E0\x80", 2, "\u0061\u0300"},
// starter
{"a", 1, "a"},
{"ab", 1, "a"},
// starter + composing
{"a\u0300", 3, "a\u0300"},
{"a\u0300b", 3, "a\u0300"},
// with decomposition
{"\u00C0", 2, "A\u0300"},
{"\u00C0b", 2, "A\u0300"},
// long
{grave(31), 60, grave(30) + cgj},
{grave(30), 60, grave(30)},
{grave(30) + "\uff9e", 60, grave(30) + cgj},
// ends with incomplete UTF-8 encoding
{"\xCC", 0, ""},
{"\u0300\xCC", 2, "\u0300"},
}
func decomposeSegmentF(rb *reorderBuffer, s string) (int, []byte) {
rb.initString(NFD, s)
rb.setFlusher(nil, appendFlush)
p := decomposeSegment(rb, 0, true)
return p, rb.out
}
func TestDecomposeSegment(t *testing.T) {
runPosTests(t, "TestDecomposeSegment", NFC, decomposeSegmentF, decomposeSegmentTests)
}
var firstBoundaryTests = []PositionTest{
// no boundary
{"", -1, ""},
{"\u0300", -1, ""},
{"\x80\x80", -1, ""},
// illegal runes
{"\xff", 0, ""},
{"\u0300\xff", 2, ""},
{"\u0300\xc0\x80\x80", 2, ""},
// boundaries
{"a", 0, ""},
{"\u0300a", 2, ""},
// Hangul
{"\u1103\u1161", 0, ""},
{"\u110B\u1173\u11B7", 0, ""},
{"\u1161\u110B\u1173\u11B7", 3, ""},
{"\u1173\u11B7\u1103\u1161", 6, ""},
// too many combining characters.
{grave(maxNonStarters - 1), -1, ""},
{grave(maxNonStarters), 60, ""},
{grave(maxNonStarters + 1), 60, ""},
}
func firstBoundaryF(rb *reorderBuffer, s string) (int, []byte) {
return rb.f.form.FirstBoundary([]byte(s)), nil
}
func firstBoundaryStringF(rb *reorderBuffer, s string) (int, []byte) {
return rb.f.form.FirstBoundaryInString(s), nil
}
func TestFirstBoundary(t *testing.T) {
runPosTests(t, "TestFirstBoundary", NFC, firstBoundaryF, firstBoundaryTests)
runPosTests(t, "TestFirstBoundaryInString", NFC, firstBoundaryStringF, firstBoundaryTests)
}
var decomposeToLastTests = []PositionTest{
// ends with inert character
{"Hello!", 6, ""},
{"\u0632", 2, ""},
{"a\u0301\u0635", 5, ""},
// ends with non-inert starter
{"a", 0, "a"},
{"a\u0301a", 3, "a"},
{"a\u0301\u03B9", 3, "\u03B9"},
{"a\u0327", 0, "a\u0327"},
// illegal runes
{"\xFF", 1, ""},
{"aa\xFF", 3, ""},
{"\xC0\x80\x80", 3, ""},
{"\xCC\x80\x80", 3, ""},
// ends with incomplete UTF-8 encoding
{"a\xCC", 2, ""},
// ends with combining characters
{"\u0300\u0301", 0, "\u0300\u0301"},
{"a\u0300\u0301", 0, "a\u0300\u0301"},
{"a\u0301\u0308", 0, "a\u0301\u0308"},
{"a\u0308\u0301", 0, "a\u0308\u0301"},
{"aaaa\u0300\u0301", 3, "a\u0300\u0301"},
{"\u0300a\u0300\u0301", 2, "a\u0300\u0301"},
{"\u00C0", 0, "A\u0300"},
{"a\u00C0", 1, "A\u0300"},
// decomposing
{"a\u0300\u00E0", 3, "a\u0300"},
// multisegment decompositions (flushes leading segments)
{"a\u0300\uFDC0", 7, "\u064A"},
{"\uFDC0" + grave(29), 4, "\u064A" + grave(29)},
{"\uFDC0" + grave(30), 4, "\u064A" + grave(30)},
{"\uFDC0" + grave(31), 5, grave(30)},
{"\uFDFA" + grave(14), 31, "\u0645" + grave(14)},
// Overflow
{"\u00E0" + grave(29), 0, "a" + grave(30)},
{"\u00E0" + grave(30), 2, grave(30)},
// Hangul
{"a\u1103", 1, "\u1103"},
{"a\u110B", 1, "\u110B"},
{"a\u110B\u1173", 1, "\u110B\u1173"},
// See comment in composition.go:compBoundaryAfter.
{"a\u110B\u1173\u11B7", 1, "\u110B\u1173\u11B7"},
{"a\uC73C", 1, "\u110B\u1173"},
{"다음", 3, "\u110B\u1173\u11B7"},
{"다", 0, "\u1103\u1161"},
{"\u1103\u1161\u110B\u1173\u11B7", 6, "\u110B\u1173\u11B7"},
{"\u110B\u1173\u11B7\u1103\u1161", 9, "\u1103\u1161"},
{"다음음", 6, "\u110B\u1173\u11B7"},
{"음다다", 6, "\u1103\u1161"},
// maximized buffer
{"a" + grave(30), 0, "a" + grave(30)},
// Buffer overflow
{"a" + grave(31), 3, grave(30)},
// weird UTF-8
{"a\u0300\u11B7", 0, "a\u0300\u11B7"},
}
func decomposeToLast(rb *reorderBuffer, s string) (int, []byte) {
rb.setFlusher([]byte(s), appendFlush)
decomposeToLastBoundary(rb)
buf := rb.flush(nil)
return len(rb.out), buf
}
func TestDecomposeToLastBoundary(t *testing.T) {
runPosTests(t, "TestDecomposeToLastBoundary", NFKC, decomposeToLast, decomposeToLastTests)
}
var lastBoundaryTests = []PositionTest{
// ends with inert character
{"Hello!", 6, ""},
{"\u0632", 2, ""},
// ends with non-inert starter
{"a", 0, ""},
// illegal runes
{"\xff", 1, ""},
{"aa\xff", 3, ""},
{"a\xff\u0300", 1, ""},
{"\xc0\x80\x80", 3, ""},
{"\xc0\x80\x80\u0300", 3, ""},
// ends with incomplete UTF-8 encoding
{"\xCC", -1, ""},
{"\xE0\x80", -1, ""},
{"\xF0\x80\x80", -1, ""},
{"a\xCC", 0, ""},
{"\x80\xCC", 1, ""},
{"\xCC\xCC", 1, ""},
// ends with combining characters
{"a\u0300\u0301", 0, ""},
{"aaaa\u0300\u0301", 3, ""},
{"\u0300a\u0300\u0301", 2, ""},
{"\u00C0", 0, ""},
{"a\u00C0", 1, ""},
// decomposition may recombine
{"\u0226", 0, ""},
// no boundary
{"", -1, ""},
{"\u0300\u0301", -1, ""},
{"\u0300", -1, ""},
{"\x80\x80", -1, ""},
{"\x80\x80\u0301", -1, ""},
// Hangul
{"다음", 3, ""},
{"다", 0, ""},
{"\u1103\u1161\u110B\u1173\u11B7", 6, ""},
{"\u110B\u1173\u11B7\u1103\u1161", 9, ""},
// too many combining characters.
{grave(maxNonStarters - 1), -1, ""},
// May still be preceded with a non-starter.
{grave(maxNonStarters), -1, ""},
// May still need to insert a cgj after the last combiner.
{grave(maxNonStarters + 1), 2, ""},
{grave(maxNonStarters + 2), 4, ""},
{"a" + grave(maxNonStarters-1), 0, ""},
{"a" + grave(maxNonStarters), 0, ""},
// May still need to insert a cgj after the last combiner.
{"a" + grave(maxNonStarters+1), 3, ""},
{"a" + grave(maxNonStarters+2), 5, ""},
}
func lastBoundaryF(rb *reorderBuffer, s string) (int, []byte) {
return rb.f.form.LastBoundary([]byte(s)), nil
}
func TestLastBoundary(t *testing.T) {
runPosTests(t, "TestLastBoundary", NFC, lastBoundaryF, lastBoundaryTests)
}
var quickSpanTests = []PositionTest{
{"", 0, ""},
// starters
{"a", 1, ""},
{"abc", 3, ""},
{"\u043Eb", 3, ""},
// incomplete last rune.
{"\xCC", 1, ""},
{"a\xCC", 2, ""},
// incorrectly ordered combining characters
{"\u0300\u0316", 0, ""},
{"\u0300\u0316cd", 0, ""},
// have a maximum number of combining characters.
{rep(0x035D, 30) + "\u035B", 0, ""},
{"a" + rep(0x035D, 30) + "\u035B", 0, ""},
{"Ɵ" + rep(0x035D, 30) + "\u035B", 0, ""},
{"aa" + rep(0x035D, 30) + "\u035B", 1, ""},
{rep(0x035D, 30) + cgj + "\u035B", 64, ""},
{"a" + rep(0x035D, 30) + cgj + "\u035B", 65, ""},
{"Ɵ" + rep(0x035D, 30) + cgj + "\u035B", 66, ""},
{"aa" + rep(0x035D, 30) + cgj + "\u035B", 66, ""},
}
var quickSpanNFDTests = []PositionTest{
// needs decomposing
{"\u00C0", 0, ""},
{"abc\u00C0", 3, ""},
// correctly ordered combining characters
{"\u0300", 2, ""},
{"ab\u0300", 4, ""},
{"ab\u0300cd", 6, ""},
{"\u0300cd", 4, ""},
{"\u0316\u0300", 4, ""},
{"ab\u0316\u0300", 6, ""},
{"ab\u0316\u0300cd", 8, ""},
{"ab\u0316\u0300\u00C0", 6, ""},
{"\u0316\u0300cd", 6, ""},
{"\u043E\u0308b", 5, ""},
// incorrectly ordered combining characters
{"ab\u0300\u0316", 1, ""}, // TODO: we could skip 'b' as well.
{"ab\u0300\u0316cd", 1, ""},
// Hangul
{"같은", 0, ""},
}
var quickSpanNFCTests = []PositionTest{
// okay composed
{"\u00C0", 2, ""},
{"abc\u00C0", 5, ""},
// correctly ordered combining characters
{"ab\u0300", 1, ""},
{"ab\u0300cd", 1, ""},
{"ab\u0316\u0300", 1, ""},
{"ab\u0316\u0300cd", 1, ""},
{"\u00C0\u035D", 4, ""},
// we do not special case leading combining characters
{"\u0300cd", 0, ""},
{"\u0300", 0, ""},
{"\u0316\u0300", 0, ""},
{"\u0316\u0300cd", 0, ""},
// incorrectly ordered combining characters
{"ab\u0300\u0316", 1, ""},
{"ab\u0300\u0316cd", 1, ""},
// Hangul
{"같은", 6, ""},
// We return the start of the violating segment in case of overflow.
{grave(30) + "\uff9e", 0, ""},
{grave(30), 0, ""},
}
func doQuickSpan(rb *reorderBuffer, s string) (int, []byte) {
return rb.f.form.QuickSpan([]byte(s)), nil
}
func doQuickSpanString(rb *reorderBuffer, s string) (int, []byte) {
return rb.f.form.QuickSpanString(s), nil
}
func TestQuickSpan(t *testing.T) {
runPosTests(t, "TestQuickSpanNFD1", NFD, doQuickSpan, quickSpanTests)
runPosTests(t, "TestQuickSpanNFD2", NFD, doQuickSpan, quickSpanNFDTests)
runPosTests(t, "TestQuickSpanNFC1", NFC, doQuickSpan, quickSpanTests)
runPosTests(t, "TestQuickSpanNFC2", NFC, doQuickSpan, quickSpanNFCTests)
runPosTests(t, "TestQuickSpanStringNFD1", NFD, doQuickSpanString, quickSpanTests)
runPosTests(t, "TestQuickSpanStringNFD2", NFD, doQuickSpanString, quickSpanNFDTests)
runPosTests(t, "TestQuickSpanStringNFC1", NFC, doQuickSpanString, quickSpanTests)
runPosTests(t, "TestQuickSpanStringNFC2", NFC, doQuickSpanString, quickSpanNFCTests)
}
var isNormalTests = []PositionTest{
{"", 1, ""},
// illegal runes
{"\xff", 1, ""},
// starters
{"a", 1, ""},
{"abc", 1, ""},
{"\u043Eb", 1, ""},
// incorrectly ordered combining characters
{"\u0300\u0316", 0, ""},
{"ab\u0300\u0316", 0, ""},
{"ab\u0300\u0316cd", 0, ""},
{"\u0300\u0316cd", 0, ""},
}
var isNormalNFDTests = []PositionTest{
// needs decomposing
{"\u00C0", 0, ""},
{"abc\u00C0", 0, ""},
// correctly ordered combining characters
{"\u0300", 1, ""},
{"ab\u0300", 1, ""},
{"ab\u0300cd", 1, ""},
{"\u0300cd", 1, ""},
{"\u0316\u0300", 1, ""},
{"ab\u0316\u0300", 1, ""},
{"ab\u0316\u0300cd", 1, ""},
{"\u0316\u0300cd", 1, ""},
{"\u043E\u0308b", 1, ""},
// Hangul
{"같은", 0, ""},
}
var isNormalNFCTests = []PositionTest{
// okay composed
{"\u00C0", 1, ""},
{"abc\u00C0", 1, ""},
// need reordering
{"a\u0300", 0, ""},
{"a\u0300cd", 0, ""},
{"a\u0316\u0300", 0, ""},
{"a\u0316\u0300cd", 0, ""},
// correctly ordered combining characters
{"ab\u0300", 1, ""},
{"ab\u0300cd", 1, ""},
{"ab\u0316\u0300", 1, ""},
{"ab\u0316\u0300cd", 1, ""},
{"\u00C0\u035D", 1, ""},
{"\u0300", 1, ""},
{"\u0316\u0300cd", 1, ""},
// Hangul
{"같은", 1, ""},
}
var isNormalNFKXTests = []PositionTest{
// Special case.
{"\u00BC", 0, ""},
}
func isNormalF(rb *reorderBuffer, s string) (int, []byte) {
if rb.f.form.IsNormal([]byte(s)) {
return 1, nil
}
return 0, nil
}
func isNormalStringF(rb *reorderBuffer, s string) (int, []byte) {
if rb.f.form.IsNormalString(s) {
return 1, nil
}
return 0, nil
}
func TestIsNormal(t *testing.T) {
runPosTests(t, "TestIsNormalNFD1", NFD, isNormalF, isNormalTests)
runPosTests(t, "TestIsNormalNFD2", NFD, isNormalF, isNormalNFDTests)
runPosTests(t, "TestIsNormalNFC1", NFC, isNormalF, isNormalTests)
runPosTests(t, "TestIsNormalNFC2", NFC, isNormalF, isNormalNFCTests)
runPosTests(t, "TestIsNormalNFKD1", NFKD, isNormalF, isNormalTests)
runPosTests(t, "TestIsNormalNFKD2", NFKD, isNormalF, isNormalNFDTests)
runPosTests(t, "TestIsNormalNFKD3", NFKD, isNormalF, isNormalNFKXTests)
runPosTests(t, "TestIsNormalNFKC1", NFKC, isNormalF, isNormalTests)
runPosTests(t, "TestIsNormalNFKC2", NFKC, isNormalF, isNormalNFCTests)
runPosTests(t, "TestIsNormalNFKC3", NFKC, isNormalF, isNormalNFKXTests)
}
func TestIsNormalString(t *testing.T) {
runPosTests(t, "TestIsNormalNFD1", NFD, isNormalStringF, isNormalTests)
runPosTests(t, "TestIsNormalNFD2", NFD, isNormalStringF, isNormalNFDTests)
runPosTests(t, "TestIsNormalNFC1", NFC, isNormalStringF, isNormalTests)
runPosTests(t, "TestIsNormalNFC2", NFC, isNormalStringF, isNormalNFCTests)
}
type AppendTest struct {
left string
right string
out string
}
type appendFunc func(f Form, out []byte, s string) []byte
var fstr = []string{"NFC", "NFD", "NFKC", "NFKD"}
func runNormTests(t *testing.T, name string, fn appendFunc) {
for f := NFC; f <= NFKD; f++ {
runAppendTests(t, name, f, fn, normTests[f])
}
}
func runAppendTests(t *testing.T, name string, f Form, fn appendFunc, tests []AppendTest) {
for i, test := range tests {
if *testn >= 0 && i != *testn {
continue
}
out := []byte(test.left)
have := string(fn(f, out, test.right))
if len(have) != len(test.out) {
t.Errorf("%s.%s:%d: length is %d; want %d (%+q vs %+q)", fstr[f], name, i, len(have), len(test.out), pc(have), pc(test.out))
}
if have != test.out {
k, pf := pidx(have, test.out)
t.Errorf("%s.%s:%d: \nwas %s%+q; \nwant %s%+q", fstr[f], name, i, pf, pc(have[k:]), pf, pc(test.out[k:]))
}
// Bootstrap by normalizing input. Ensures that the various variants
// behave the same.
for g := NFC; g <= NFKD; g++ {
if f == g {
continue
}
want := g.String(test.left + test.right)
have := string(fn(g, g.AppendString(nil, test.left), test.right))
if len(have) != len(want) {
t.Errorf("%s(%s.%s):%d: length is %d; want %d (%+q vs %+q)", fstr[g], fstr[f], name, i, len(have), len(want), pc(have), pc(want))
}
if have != want {
k, pf := pidx(have, want)
t.Errorf("%s(%s.%s):%d: \nwas %s%+q; \nwant %s%+q", fstr[g], fstr[f], name, i, pf, pc(have[k:]), pf, pc(want[k:]))
}
}
}
}
var normTests = [][]AppendTest{
appendTestsNFC,
appendTestsNFD,
appendTestsNFKC,
appendTestsNFKD,
}
var appendTestsNFC = []AppendTest{
{"", ascii, ascii},
{"", txt_all, txt_all},
{"\uff9e", grave(30), "\uff9e" + grave(29) + cgj + grave(1)},
{grave(30), "\uff9e", grave(30) + cgj + "\uff9e"},
// Tests designed for Iter.
{ // ordering of non-composing combining characters
"",
"\u0305\u0316",
"\u0316\u0305",
},
{ // segment overflow
"",
"a" + rep(0x0305, maxNonStarters+4) + "\u0316",
"a" + rep(0x0305, maxNonStarters) + cgj + "\u0316" + rep(0x305, 4),
},
}
var appendTestsNFD = []AppendTest{
// TODO: Move some of the tests here.
}
var appendTestsNFKC = []AppendTest{
// empty buffers
{"", "", ""},
{"a", "", "a"},
{"", "a", "a"},
{"", "\u0041\u0307\u0304", "\u01E0"},
// segment split across buffers
{"", "a\u0300b", "\u00E0b"},
{"a", "\u0300b", "\u00E0b"},
{"a", "\u0300\u0316", "\u00E0\u0316"},
{"a", "\u0316\u0300", "\u00E0\u0316"},
{"a", "\u0300a\u0300", "\u00E0\u00E0"},
{"a", "\u0300a\u0300a\u0300", "\u00E0\u00E0\u00E0"},
{"a", "\u0300aaa\u0300aaa\u0300", "\u00E0aa\u00E0aa\u00E0"},
{"a\u0300", "\u0327", "\u00E0\u0327"},
{"a\u0327", "\u0300", "\u00E0\u0327"},
{"a\u0316", "\u0300", "\u00E0\u0316"},
{"\u0041\u0307", "\u0304", "\u01E0"},
// Hangul
{"", "\u110B\u1173", "\uC73C"},
{"", "\u1103\u1161", "\uB2E4"},
{"", "\u110B\u1173\u11B7", "\uC74C"},
{"", "\u320E", "\x28\uAC00\x29"},
{"", "\x28\u1100\u1161\x29", "\x28\uAC00\x29"},
{"\u1103", "\u1161", "\uB2E4"},
{"\u110B", "\u1173\u11B7", "\uC74C"},
{"\u110B\u1173", "\u11B7", "\uC74C"},
{"\uC73C", "\u11B7", "\uC74C"},
// UTF-8 encoding split across buffers
{"a\xCC", "\x80", "\u00E0"},
{"a\xCC", "\x80b", "\u00E0b"},
{"a\xCC", "\x80a\u0300", "\u00E0\u00E0"},
{"a\xCC", "\x80\x80", "\u00E0\x80"},
{"a\xCC", "\x80\xCC", "\u00E0\xCC"},
{"a\u0316\xCC", "\x80a\u0316\u0300", "\u00E0\u0316\u00E0\u0316"},
// ending in incomplete UTF-8 encoding
{"", "\xCC", "\xCC"},
{"a", "\xCC", "a\xCC"},
{"a", "b\xCC", "ab\xCC"},
{"\u0226", "\xCC", "\u0226\xCC"},
// illegal runes
{"", "\x80", "\x80"},
{"", "\x80\x80\x80", "\x80\x80\x80"},
{"", "\xCC\x80\x80\x80", "\xCC\x80\x80\x80"},
{"", "a\x80", "a\x80"},
{"", "a\x80\x80\x80", "a\x80\x80\x80"},
{"", "a\x80\x80\x80\x80\x80\x80", "a\x80\x80\x80\x80\x80\x80"},
{"a", "\x80\x80\x80", "a\x80\x80\x80"},
// overflow
{"", strings.Repeat("\x80", 33), strings.Repeat("\x80", 33)},
{strings.Repeat("\x80", 33), "", strings.Repeat("\x80", 33)},
{strings.Repeat("\x80", 33), strings.Repeat("\x80", 33), strings.Repeat("\x80", 66)},
// overflow of combining characters
{"", grave(34), grave(30) + cgj + grave(4)},
{"", grave(36), grave(30) + cgj + grave(6)},
{grave(29), grave(5), grave(30) + cgj + grave(4)},
{grave(30), grave(4), grave(30) + cgj + grave(4)},
{grave(30), grave(3), grave(30) + cgj + grave(3)},
{grave(30) + "\xCC", "\x80", grave(30) + cgj + grave(1)},
{"", "\uFDFA" + grave(14), "\u0635\u0644\u0649 \u0627\u0644\u0644\u0647 \u0639\u0644\u064a\u0647 \u0648\u0633\u0644\u0645" + grave(14)},
{"", "\uFDFA" + grave(28) + "\u0316", "\u0635\u0644\u0649 \u0627\u0644\u0644\u0647 \u0639\u0644\u064a\u0647 \u0648\u0633\u0644\u0645\u0316" + grave(28)},
// - First rune has a trailing non-starter.
{"\u00d5", grave(30), "\u00d5" + grave(29) + cgj + grave(1)},
// - U+FF9E decomposes into a non-starter in compatibility mode. A CGJ must be
// inserted even when FF9E starts a new segment.
{"\uff9e", grave(30), "\u3099" + grave(29) + cgj + grave(1)},
{grave(30), "\uff9e", grave(30) + cgj + "\u3099"},
// - Many non-starter decompositions in a row causing overflow.
{"", rep(0x340, 31), rep(0x300, 30) + cgj + "\u0300"},
{"", rep(0xFF9E, 31), rep(0x3099, 30) + cgj + "\u3099"},
// weird UTF-8
{"\u00E0\xE1", "\x86", "\u00E0\xE1\x86"},
{"a\u0300\u11B7", "\u0300", "\u00E0\u11B7\u0300"},
{"a\u0300\u11B7\u0300", "\u0300", "\u00E0\u11B7\u0300\u0300"},
{"\u0300", "\xF8\x80\x80\x80\x80\u0300", "\u0300\xF8\x80\x80\x80\x80\u0300"},
{"\u0300", "\xFC\x80\x80\x80\x80\x80\u0300", "\u0300\xFC\x80\x80\x80\x80\x80\u0300"},
{"\xF8\x80\x80\x80\x80\u0300", "\u0300", "\xF8\x80\x80\x80\x80\u0300\u0300"},
{"\xFC\x80\x80\x80\x80\x80\u0300", "\u0300", "\xFC\x80\x80\x80\x80\x80\u0300\u0300"},
{"\xF8\x80\x80\x80", "\x80\u0300\u0300", "\xF8\x80\x80\x80\x80\u0300\u0300"},
{"", strings.Repeat("a\u0316\u0300", 6), strings.Repeat("\u00E0\u0316", 6)},
// large input.
{"", strings.Repeat("a\u0300\u0316", 4000), strings.Repeat("\u00E0\u0316", 4000)},
{"", strings.Repeat("\x80\x80", 4000), strings.Repeat("\x80\x80", 4000)},
{"", "\u0041\u0307\u0304", "\u01E0"},
}
var appendTestsNFKD = []AppendTest{
{"", "a" + grave(64), "a" + grave(30) + cgj + grave(30) + cgj + grave(4)},
{ // segment overflow on unchanged character
"",
"a" + grave(64) + "\u0316",
"a" + grave(30) + cgj + grave(30) + cgj + "\u0316" + grave(4),
},
{ // segment overflow on unchanged character + start value
"",
"a" + grave(98) + "\u0316",
"a" + grave(30) + cgj + grave(30) + cgj + grave(30) + cgj + "\u0316" + grave(8),
},
{ // segment overflow on decomposition. (U+0340 decomposes to U+0300.)
"",
"a" + grave(59) + "\u0340",
"a" + grave(30) + cgj + grave(30),
},
{ // segment overflow on non-starter decomposition
"",
"a" + grave(33) + "\u0340" + grave(30) + "\u0320",
"a" + grave(30) + cgj + grave(30) + cgj + "\u0320" + grave(4),
},
{ // start value after ASCII overflow
"",
rep('a', segSize) + grave(32) + "\u0320",
rep('a', segSize) + grave(30) + cgj + "\u0320" + grave(2),
},
{ // Jamo overflow
"",
"\u1100\u1161" + grave(30) + "\u0320" + grave(2),
"\u1100\u1161" + grave(29) + cgj + "\u0320" + grave(3),
},
{ // Hangul
"",
"\uac00",
"\u1100\u1161",
},
{ // Hangul overflow
"",
"\uac00" + grave(32) + "\u0320",
"\u1100\u1161" + grave(29) + cgj + "\u0320" + grave(3),
},
{ // Hangul overflow in Hangul mode.
"",
"\uac00\uac00" + grave(32) + "\u0320",
"\u1100\u1161\u1100\u1161" + grave(29) + cgj + "\u0320" + grave(3),
},
{ // Hangul overflow in Hangul mode.
"",
strings.Repeat("\uac00", 3) + grave(32) + "\u0320",
strings.Repeat("\u1100\u1161", 3) + grave(29) + cgj + "\u0320" + grave(3),
},
{ // start value after cc=0
"",
"您您" + grave(34) + "\u0320",
"您您" + grave(30) + cgj + "\u0320" + grave(4),
},
{ // start value after normalization
"",
"\u0300\u0320a" + grave(34) + "\u0320",
"\u0320\u0300a" + grave(30) + cgj + "\u0320" + grave(4),
},
}
func TestAppend(t *testing.T) {
runNormTests(t, "Append", func(f Form, out []byte, s string) []byte {
return f.Append(out, []byte(s)...)
})
}
func TestAppendString(t *testing.T) {
runNormTests(t, "AppendString", func(f Form, out []byte, s string) []byte {
return f.AppendString(out, s)
})
}
func TestBytes(t *testing.T) {
runNormTests(t, "Bytes", func(f Form, out []byte, s string) []byte {
buf := []byte{}
buf = append(buf, out...)
buf = append(buf, s...)
return f.Bytes(buf)
})
}
func TestString(t *testing.T) {
runNormTests(t, "String", func(f Form, out []byte, s string) []byte {
outs := string(out) + s
return []byte(f.String(outs))
})
}
func appendBench(f Form, in []byte) func() {
buf := make([]byte, 0, 4*len(in))
return func() {
f.Append(buf, in...)
}
}
func bytesBench(f Form, in []byte) func() {
return func() {
f.Bytes(in)
}
}
func iterBench(f Form, in []byte) func() {
iter := Iter{}
return func() {
iter.Init(f, in)
for !iter.Done() {
iter.Next()
}
}
}
func transformBench(f Form, in []byte) func() {
buf := make([]byte, 4*len(in))
return func() {
if _, n, err := f.Transform(buf, in, true); err != nil || len(in) != n {
log.Panic(n, len(in), err)
}
}
}
func readerBench(f Form, in []byte) func() {
buf := make([]byte, 4*len(in))
return func() {
r := f.Reader(bytes.NewReader(in))
var err error
for err == nil {
_, err = r.Read(buf)
}
if err != io.EOF {
panic("")
}
}
}
func writerBench(f Form, in []byte) func() {
buf := make([]byte, 0, 4*len(in))
return func() {
r := f.Writer(bytes.NewBuffer(buf))
if _, err := r.Write(in); err != nil {
panic("")
}
}
}
func appendBenchmarks(bm []func(), f Form, in []byte) []func() {
bm = append(bm, appendBench(f, in))
bm = append(bm, iterBench(f, in))
bm = append(bm, transformBench(f, in))
bm = append(bm, readerBench(f, in))
bm = append(bm, writerBench(f, in))
return bm
}
func doFormBenchmark(b *testing.B, inf, f Form, s string) {
b.StopTimer()
in := inf.Bytes([]byte(s))
bm := appendBenchmarks(nil, f, in)
b.SetBytes(int64(len(in) * len(bm)))
b.StartTimer()
for i := 0; i < b.N; i++ {
for _, fn := range bm {
fn()
}
}
}
func doSingle(b *testing.B, f func(Form, []byte) func(), s []byte) {
b.StopTimer()
fn := f(NFC, s)
b.SetBytes(int64(len(s)))
b.StartTimer()
for i := 0; i < b.N; i++ {
fn()
}
}
var (
smallNoChange = []byte("nörmalization")
smallChange = []byte("No\u0308rmalization")
ascii = strings.Repeat("There is nothing to change here! ", 500)
)
func lowerBench(f Form, in []byte) func() {
// Use package strings instead of bytes as it doesn't allocate memory
// if there aren't any changes.
s := string(in)
return func() {
strings.ToLower(s)
}
}
func BenchmarkLowerCaseNoChange(b *testing.B) {
doSingle(b, lowerBench, smallNoChange)
}
func BenchmarkLowerCaseChange(b *testing.B) {
doSingle(b, lowerBench, smallChange)
}
func quickSpanBench(f Form, in []byte) func() {
return func() {
f.QuickSpan(in)
}
}
func BenchmarkQuickSpanChangeNFC(b *testing.B) {
doSingle(b, quickSpanBench, smallNoChange)
}
func BenchmarkBytesNoChangeNFC(b *testing.B) {
doSingle(b, bytesBench, smallNoChange)
}
func BenchmarkBytesChangeNFC(b *testing.B) {
doSingle(b, bytesBench, smallChange)
}
func BenchmarkAppendNoChangeNFC(b *testing.B) {
doSingle(b, appendBench, smallNoChange)
}
func BenchmarkAppendChangeNFC(b *testing.B) {
doSingle(b, appendBench, smallChange)
}
func BenchmarkAppendLargeNFC(b *testing.B) {
doSingle(b, appendBench, txt_all_bytes)
}
func BenchmarkIterNoChangeNFC(b *testing.B) {
doSingle(b, iterBench, smallNoChange)
}
func BenchmarkIterChangeNFC(b *testing.B) {
doSingle(b, iterBench, smallChange)
}
func BenchmarkIterLargeNFC(b *testing.B) {
doSingle(b, iterBench, txt_all_bytes)
}
func BenchmarkTransformNoChangeNFC(b *testing.B) {
doSingle(b, transformBench, smallNoChange)
}
func BenchmarkTransformChangeNFC(b *testing.B) {
doSingle(b, transformBench, smallChange)
}
func BenchmarkTransformLargeNFC(b *testing.B) {
doSingle(b, transformBench, txt_all_bytes)
}
func BenchmarkNormalizeAsciiNFC(b *testing.B) {
doFormBenchmark(b, NFC, NFC, ascii)
}
func BenchmarkNormalizeAsciiNFD(b *testing.B) {
doFormBenchmark(b, NFC, NFD, ascii)
}
func BenchmarkNormalizeAsciiNFKC(b *testing.B) {
doFormBenchmark(b, NFC, NFKC, ascii)
}
func BenchmarkNormalizeAsciiNFKD(b *testing.B) {
doFormBenchmark(b, NFC, NFKD, ascii)
}
func BenchmarkNormalizeNFC2NFC(b *testing.B) {
doFormBenchmark(b, NFC, NFC, txt_all)
}
func BenchmarkNormalizeNFC2NFD(b *testing.B) {
doFormBenchmark(b, NFC, NFD, txt_all)
}
func BenchmarkNormalizeNFD2NFC(b *testing.B) {
doFormBenchmark(b, NFD, NFC, txt_all)
}
func BenchmarkNormalizeNFD2NFD(b *testing.B) {
doFormBenchmark(b, NFD, NFD, txt_all)
}
// Hangul is often special-cased, so we test it separately.
func BenchmarkNormalizeHangulNFC2NFC(b *testing.B) {
doFormBenchmark(b, NFC, NFC, txt_kr)
}
func BenchmarkNormalizeHangulNFC2NFD(b *testing.B) {
doFormBenchmark(b, NFC, NFD, txt_kr)
}
func BenchmarkNormalizeHangulNFD2NFC(b *testing.B) {
doFormBenchmark(b, NFD, NFC, txt_kr)
}
func BenchmarkNormalizeHangulNFD2NFD(b *testing.B) {
doFormBenchmark(b, NFD, NFD, txt_kr)
}
var forms = []Form{NFC, NFD, NFKC, NFKD}
func doTextBenchmark(b *testing.B, s string) {
b.StopTimer()
in := []byte(s)
bm := []func(){}
for _, f := range forms {
bm = appendBenchmarks(bm, f, in)
}
b.SetBytes(int64(len(s) * len(bm)))
b.StartTimer()
for i := 0; i < b.N; i++ {
for _, f := range bm {
f()
}
}
}
func BenchmarkCanonicalOrdering(b *testing.B) {
doTextBenchmark(b, txt_canon)
}
func BenchmarkExtendedLatin(b *testing.B) {
doTextBenchmark(b, txt_vn)
}
func BenchmarkMiscTwoByteUtf8(b *testing.B) {
doTextBenchmark(b, twoByteUtf8)
}
func BenchmarkMiscThreeByteUtf8(b *testing.B) {
doTextBenchmark(b, threeByteUtf8)
}
func BenchmarkHangul(b *testing.B) {
doTextBenchmark(b, txt_kr)
}
func BenchmarkJapanese(b *testing.B) {
doTextBenchmark(b, txt_jp)
}
func BenchmarkChinese(b *testing.B) {
doTextBenchmark(b, txt_cn)
}
func BenchmarkOverflow(b *testing.B) {
doTextBenchmark(b, overflow)
}
var overflow = string(bytes.Repeat([]byte("\u035D"), 4096)) + "\u035B"
// Tests sampled from the Canonical ordering tests (Part 2) of
// http://unicode.org/Public/UNIDATA/NormalizationTest.txt
const txt_canon = `\u0061\u0315\u0300\u05AE\u0300\u0062 \u0061\u0300\u0315\u0300\u05AE\u0062
\u0061\u0302\u0315\u0300\u05AE\u0062 \u0061\u0307\u0315\u0300\u05AE\u0062
\u0061\u0315\u0300\u05AE\u030A\u0062 \u0061\u059A\u0316\u302A\u031C\u0062
\u0061\u032E\u059A\u0316\u302A\u0062 \u0061\u0338\u093C\u0334\u0062
\u0061\u059A\u0316\u302A\u0339 \u0061\u0341\u0315\u0300\u05AE\u0062
\u0061\u0348\u059A\u0316\u302A\u0062 \u0061\u0361\u0345\u035D\u035C\u0062
\u0061\u0366\u0315\u0300\u05AE\u0062 \u0061\u0315\u0300\u05AE\u0486\u0062
\u0061\u05A4\u059A\u0316\u302A\u0062 \u0061\u0315\u0300\u05AE\u0613\u0062
\u0061\u0315\u0300\u05AE\u0615\u0062 \u0061\u0617\u0315\u0300\u05AE\u0062
\u0061\u0619\u0618\u064D\u064E\u0062 \u0061\u0315\u0300\u05AE\u0654\u0062
\u0061\u0315\u0300\u05AE\u06DC\u0062 \u0061\u0733\u0315\u0300\u05AE\u0062
\u0061\u0744\u059A\u0316\u302A\u0062 \u0061\u0315\u0300\u05AE\u0745\u0062
\u0061\u09CD\u05B0\u094D\u3099\u0062 \u0061\u0E38\u0E48\u0E38\u0C56\u0062
\u0061\u0EB8\u0E48\u0E38\u0E49\u0062 \u0061\u0F72\u0F71\u0EC8\u0F71\u0062
\u0061\u1039\u05B0\u094D\u3099\u0062 \u0061\u05B0\u094D\u3099\u1A60\u0062
\u0061\u3099\u093C\u0334\u1BE6\u0062 \u0061\u3099\u093C\u0334\u1C37\u0062
\u0061\u1CD9\u059A\u0316\u302A\u0062 \u0061\u2DED\u0315\u0300\u05AE\u0062
\u0061\u2DEF\u0315\u0300\u05AE\u0062 \u0061\u302D\u302E\u059A\u0316\u0062`
// Taken from http://creativecommons.org/licenses/by-sa/3.0/vn/
const txt_vn = `Với các điều kiện sau: Ghi nhận công của tác giả.
Nếu bạn sử dụng, chuyển đổi, hoặc xây dựng dự án từ
nội dung được chia sẻ này, bạn phải áp dụng giấy phép này hoặc
một giấy phép khác có các điều khoản tương tự như giấy phép này
cho dự án của bạn. Hiểu rằng: Miễn — Bất kỳ các điều kiện nào
trên đây cũng có thể được miễn bỏ nếu bạn được sự cho phép của
người sở hữu bản quyền. Phạm vi công chúng — Khi tác phẩm hoặc
bất kỳ chương nào của tác phẩm đã trong vùng dành cho công
chúng theo quy định của pháp luật thì tình trạng của nó không
bị ảnh hưởng bởi giấy phép trong bất kỳ trường hợp nào.`
// Taken from http://creativecommons.org/licenses/by-sa/1.0/deed.ru
const txt_ru = `При обязательном соблюдении следующих условий:
Attribution — Вы должны атрибутировать произведение (указывать
автора и источник) в порядке, предусмотренном автором или
лицензиаром (но только так, чтобы никоим образом не подразумевалось,
что они поддерживают вас или использование вами данного произведения).
Υπό τις ακόλουθες προϋποθέσεις:`
// Taken from http://creativecommons.org/licenses/by-sa/3.0/gr/
const txt_gr = `Αναφορά Δημιουργού — Θα πρέπει να κάνετε την αναφορά στο έργο με τον
τρόπο που έχει οριστεί από το δημιουργό ή το χορηγούντο την άδεια
(χωρίς όμως να εννοείται με οποιονδήποτε τρόπο ότι εγκρίνουν εσάς ή
τη χρήση του έργου από εσάς). Παρόμοια Διανομή — Εάν αλλοιώσετε,
τροποποιήσετε ή δημιουργήσετε περαιτέρω βασισμένοι στο έργο θα
μπορείτε να διανέμετε το έργο που θα προκύψει μόνο με την ίδια ή
παρόμοια άδεια.`
// Taken from http://creativecommons.org/licenses/by-sa/3.0/deed.ar
const txt_ar = `بموجب الشروط التالية نسب المصنف — يجب عليك أن
تنسب العمل بالطريقة التي تحددها المؤلف أو المرخص (ولكن ليس بأي حال من
الأحوال أن توحي وتقترح بتحول أو استخدامك للعمل).
المشاركة على قدم المساواة — إذا كنت يعدل ، والتغيير ، أو الاستفادة
من هذا العمل ، قد ينتج عن توزيع العمل إلا في ظل تشابه او تطابق فى واحد
لهذا الترخيص.`
// Taken from http://creativecommons.org/licenses/by-sa/1.0/il/
const txt_il = `בכפוף לתנאים הבאים: ייחוס — עליך לייחס את היצירה (לתת קרדיט) באופן
המצויין על-ידי היוצר או מעניק הרישיון (אך לא בשום אופן המרמז על כך
שהם תומכים בך או בשימוש שלך ביצירה). שיתוף זהה — אם תחליט/י לשנות,
לעבד או ליצור יצירה נגזרת בהסתמך על יצירה זו, תוכל/י להפיץ את יצירתך
החדשה רק תחת אותו הרישיון או רישיון דומה לרישיון זה.`
const twoByteUtf8 = txt_ru + txt_gr + txt_ar + txt_il
// Taken from http://creativecommons.org/licenses/by-sa/2.0/kr/
const txt_kr = `다음과 같은 조건을 따라야 합니다: 저작자표시
(Attribution) — 저작자나 이용허락자가 정한 방법으로 저작물의
원저작자를 표시하여야 합니다(그러나 원저작자가 이용자나 이용자의
이용을 보증하거나 추천한다는 의미로 표시해서는 안됩니다).
동일조건변경허락 — 이 저작물을 이용하여 만든 이차적 저작물에는 본
라이선스와 동일한 라이선스를 적용해야 합니다.`
// Taken from http://creativecommons.org/licenses/by-sa/3.0/th/
const txt_th = `ภายใต้เงื่อนไข ดังต่อไปนี้ : แสดงที่มา — คุณต้องแสดงที่
มาของงานดังกล่าว ตามรูปแบบที่ผู้สร้างสรรค์หรือผู้อนุญาตกำหนด (แต่
ไม่ใช่ในลักษณะที่ว่า พวกเขาสนับสนุนคุณหรือสนับสนุนการที่
คุณนำงานไปใช้) อนุญาตแบบเดียวกัน — หากคุณดัดแปลง เปลี่ยนรูป หรื
อต่อเติมงานนี้ คุณต้องใช้สัญญาอนุญาตแบบเดียวกันหรือแบบที่เหมื
อนกับสัญญาอนุญาตที่ใช้กับงานนี้เท่านั้น`
const threeByteUtf8 = txt_th
// Taken from http://creativecommons.org/licenses/by-sa/2.0/jp/
const txt_jp = `あなたの従うべき条件は以下の通りです。
表示 — あなたは原著作者のクレジットを表示しなければなりません。
継承 — もしあなたがこの作品を改変、変形または加工した場合、
あなたはその結果生じた作品をこの作品と同一の許諾条件の下でのみ
頒布することができます。`
// http://creativecommons.org/licenses/by-sa/2.5/cn/
const txt_cn = `您可以自由: 复制、发行、展览、表演、放映、
广播或通过信息网络传播本作品 创作演绎作品
对本作品进行商业性使用 惟须遵守下列条件:
署名 — 您必须按照作者或者许可人指定的方式对作品进行署名。
相同方式共享 — 如果您改变、转换本作品或者以本作品为基础进行创作,
您只能采用与本协议相同的许可协议发布基于本作品的演绎作品。`
const txt_cjk = txt_cn + txt_jp + txt_kr
const txt_all = txt_vn + twoByteUtf8 + threeByteUtf8 + txt_cjk
var txt_all_bytes = []byte(txt_all)
| Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/normalize_test.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.02808442898094654,
0.00045191479148343205,
0.00016352738020941615,
0.00017154618399217725,
0.0026616074610501528
] |
{
"id": 4,
"code_window": [
"\tdefer m.leveldbPanicWorkaround()\n",
"\n",
"\tm.fmut.RLock()\n",
"\tdefer m.fmut.RUnlock()\n",
"\tnblocks := 0\n",
"\tif rf, ok := m.folderFiles[folder]; ok {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "internal/model/model.go",
"type": "replace",
"edit_start_line_idx": 404
} | -----BEGIN CERTIFICATE-----
MIID5TCCAk+gAwIBAgIIBYqoKiSgB+owCwYJKoZIhvcNAQELMBQxEjAQBgNVBAMT
CXN5bmN0aGluZzAeFw0xNDA5MTQyMjIzMzVaFw00OTEyMzEyMzU5NTlaMBQxEjAQ
BgNVBAMTCXN5bmN0aGluZzCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGB
AKZK/sjb6ZuVVHPvo77Cp5E8LfiznfoIWJRoX/MczE99iDyFZm1Wf9GFT8WhXICM
C2kgGbr/gAxhkeEcZ500vhA2C+aois1DGcb+vNY53I0qp3vSUl4ow55R0xJ4UjpJ
nJWF8p9iPDMwMP6WQ/E/ekKRKCOt0TFj4xqtiSt0pxPLeHfKVpWXxqIVDhnsoGQ+
NWuUjM3FkmEmhp5DdRtwskiZZYz1zCgoHkFzKt/+IxjCuzbO0+Ti8R3b/d0A+WLN
LHr0SjatajLbHebA+9c3ts6t3V5YzcMqDJ4MyxFtRoXFJjEbcM9IqKQE8t8TIhv8
a302yRikJ2uPx+fXJGospnmWCbaK2rViPbvICSgvSBA3As0f3yPzXsEt+aW5NmDV
fLBX1DU7Ow6oBqZTlI+STrzZR1qfvIuweIWoPqnPNd4sxuoxAK50ViUKdOtSYL/a
F0eM3bqbp2ozhct+Bfmqu2oI/RHXe+RUfAXrlFQ8p6jcISW2ip+oiBtR4GZkncI9
YQIDAQABoz8wPTAOBgNVHQ8BAf8EBAMCAKAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4IBgQBsYc5XVQy5
aJVdwx+mAKiuCs5ZCvV4H4VWY9XUwEJuUUD3yXw2xyzuQl5+lOxfiQcaudhVwARC
Dao75MUctXmx1YU+J5G31cGdC9kbxWuo1xypkK+2Zl+Kwh65aod3OkHVz9oNkKpf
JnXbdph4UiFJzijSruXDDaerrQdABUvlusPozZn8vMwZ21Ls/eNIOJvA0S2d2jep
fvmu7yQPejDp7zcgPdmneuZqmUyXLxxFopYqHqFQVM8f+Y8iZ8HnMiAJgLKQcmro
pp1z/NY0Xr0pLyBY5d/sO+tZmQkyUEWegHtEtQQOO+x8BWinDEAurej/YvZTWTmN
+YoUvGdKyV6XfC6WPFcUDFHY4KPSqS3xoLmoVV4xNjJU3aG/xL4uDencNZR/UFNw
wKsdvm9SX4TpSLlQa0wu1iNv7QyeR4ZKgaBNSwp2rxpatOi7TTs9KRPfjLFLpYAg
bIons/a890SIxpuneuhQZkH63t930EXIZ+9GkU0aUs7MFg5cCmwmlvE=
-----END CERTIFICATE-----
| test/h1/https-cert.pem | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.003643218195065856,
0.0014156039105728269,
0.00016648219025228173,
0.0004371112154331058,
0.001579031115397811
] |
{
"id": 5,
"code_window": [
"\tif rf, ok := m.folderFiles[folder]; ok {\n",
"\t\tfs := make([]protocol.FileInfo, 0, maxFiles)\n",
"\t\trf.WithNeed(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {\n",
"\t\t\tfi := f.(protocol.FileInfo)\n",
"\t\t\tfs = append(fs, fi)\n",
"\t\t\tnblocks += len(fi.Blocks)\n",
"\t\t\treturn (maxFiles <= 0 || len(fs) < maxFiles) && (maxBlocks <= 0 || nblocks < maxBlocks)\n",
"\t\t})\n",
"\t\treturn fs\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tfs := make([]protocol.FileInfoTruncated, 0, maxFiles)\n",
"\t\trf.WithNeedTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {\n",
"\t\t\tfs = append(fs, f.(protocol.FileInfoTruncated))\n",
"\t\t\treturn maxFiles <= 0 || len(fs) < maxFiles\n"
],
"file_path": "internal/model/model.go",
"type": "replace",
"edit_start_line_idx": 406
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"mime"
"net"
"net/http"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"code.google.com/p/go.crypto/bcrypt"
"github.com/calmh/logger"
"github.com/syncthing/syncthing/internal/auto"
"github.com/syncthing/syncthing/internal/config"
"github.com/syncthing/syncthing/internal/discover"
"github.com/syncthing/syncthing/internal/events"
"github.com/syncthing/syncthing/internal/model"
"github.com/syncthing/syncthing/internal/osutil"
"github.com/syncthing/syncthing/internal/protocol"
"github.com/syncthing/syncthing/internal/upgrade"
"github.com/vitrun/qart/qr"
)
type guiError struct {
Time time.Time
Error string
}
var (
configInSync = true
guiErrors = []guiError{}
guiErrorsMut sync.Mutex
modt = time.Now().UTC().Format(http.TimeFormat)
eventSub *events.BufferedSubscription
)
func init() {
l.AddHandler(logger.LevelWarn, showGuiError)
sub := events.Default.Subscribe(events.AllEvents)
eventSub = events.NewBufferedSubscription(sub, 1000)
}
func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) error {
var err error
cert, err := loadCert(confDir, "https-")
if err != nil {
l.Infoln("Loading HTTPS certificate:", err)
l.Infoln("Creating new HTTPS certificate")
newCertificate(confDir, "https-")
cert, err = loadCert(confDir, "https-")
}
if err != nil {
return err
}
tlsCfg := &tls.Config{
Certificates: []tls.Certificate{cert},
ServerName: "syncthing",
}
rawListener, err := net.Listen("tcp", cfg.Address)
if err != nil {
return err
}
listener := &DowngradingListener{rawListener, tlsCfg}
// The GET handlers
getRestMux := http.NewServeMux()
getRestMux.HandleFunc("/rest/ping", restPing)
getRestMux.HandleFunc("/rest/completion", withModel(m, restGetCompletion))
getRestMux.HandleFunc("/rest/config", restGetConfig)
getRestMux.HandleFunc("/rest/config/sync", restGetConfigInSync)
getRestMux.HandleFunc("/rest/connections", withModel(m, restGetConnections))
getRestMux.HandleFunc("/rest/autocomplete/directory", restGetAutocompleteDirectory)
getRestMux.HandleFunc("/rest/discovery", restGetDiscovery)
getRestMux.HandleFunc("/rest/errors", restGetErrors)
getRestMux.HandleFunc("/rest/events", restGetEvents)
getRestMux.HandleFunc("/rest/ignores", withModel(m, restGetIgnores))
getRestMux.HandleFunc("/rest/lang", restGetLang)
getRestMux.HandleFunc("/rest/model", withModel(m, restGetModel))
getRestMux.HandleFunc("/rest/need", withModel(m, restGetNeed))
getRestMux.HandleFunc("/rest/deviceid", restGetDeviceID)
getRestMux.HandleFunc("/rest/report", withModel(m, restGetReport))
getRestMux.HandleFunc("/rest/system", restGetSystem)
getRestMux.HandleFunc("/rest/upgrade", restGetUpgrade)
getRestMux.HandleFunc("/rest/version", restGetVersion)
getRestMux.HandleFunc("/rest/stats/device", withModel(m, restGetDeviceStats))
// Debug endpoints, not for general use
getRestMux.HandleFunc("/rest/debug/peerCompletion", withModel(m, restGetPeerCompletion))
// The POST handlers
postRestMux := http.NewServeMux()
postRestMux.HandleFunc("/rest/ping", restPing)
postRestMux.HandleFunc("/rest/config", withModel(m, restPostConfig))
postRestMux.HandleFunc("/rest/discovery/hint", restPostDiscoveryHint)
postRestMux.HandleFunc("/rest/error", restPostError)
postRestMux.HandleFunc("/rest/error/clear", restClearErrors)
postRestMux.HandleFunc("/rest/ignores", withModel(m, restPostIgnores))
postRestMux.HandleFunc("/rest/model/override", withModel(m, restPostOverride))
postRestMux.HandleFunc("/rest/reset", restPostReset)
postRestMux.HandleFunc("/rest/restart", restPostRestart)
postRestMux.HandleFunc("/rest/shutdown", restPostShutdown)
postRestMux.HandleFunc("/rest/upgrade", restPostUpgrade)
postRestMux.HandleFunc("/rest/scan", withModel(m, restPostScan))
// A handler that splits requests between the two above and disables
// caching
restMux := noCacheMiddleware(getPostHandler(getRestMux, postRestMux))
// The main routing handler
mux := http.NewServeMux()
mux.Handle("/rest/", restMux)
mux.HandleFunc("/qr/", getQR)
// Serve compiled in assets unless an asset directory was set (for development)
mux.Handle("/", embeddedStatic(assetDir))
// Wrap everything in CSRF protection. The /rest prefix should be
// protected, other requests will grant cookies.
handler := csrfMiddleware("/rest", cfg.APIKey, mux)
// Add our version as a header to responses
handler = withVersionMiddleware(handler)
// Wrap everything in basic auth, if user/password is set.
if len(cfg.User) > 0 && len(cfg.Password) > 0 {
handler = basicAuthAndSessionMiddleware(cfg, handler)
}
// Redirect to HTTPS if we are supposed to
if cfg.UseTLS {
handler = redirectToHTTPSMiddleware(handler)
}
srv := http.Server{
Handler: handler,
ReadTimeout: 2 * time.Second,
}
go func() {
err := srv.Serve(listener)
if err != nil {
panic(err)
}
}()
return nil
}
func getPostHandler(get, post http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
get.ServeHTTP(w, r)
case "POST":
post.ServeHTTP(w, r)
default:
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
})
}
func redirectToHTTPSMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Add a generous access-control-allow-origin header since we may be
// redirecting REST requests over protocols
w.Header().Add("Access-Control-Allow-Origin", "*")
if r.TLS == nil {
// Redirect HTTP requests to HTTPS
r.URL.Host = r.Host
r.URL.Scheme = "https"
http.Redirect(w, r, r.URL.String(), http.StatusFound)
} else {
h.ServeHTTP(w, r)
}
})
}
func noCacheMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache")
h.ServeHTTP(w, r)
})
}
func withVersionMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Syncthing-Version", Version)
h.ServeHTTP(w, r)
})
}
func withModel(m *model.Model, h func(m *model.Model, w http.ResponseWriter, r *http.Request)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
h(m, w, r)
}
}
func restPing(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]string{
"ping": "pong",
})
}
func restGetVersion(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]string{
"version": Version,
"longVersion": LongVersion,
"os": runtime.GOOS,
"arch": runtime.GOARCH,
})
}
func restGetCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
var deviceStr = qs.Get("device")
device, err := protocol.DeviceIDFromString(deviceStr)
if err != nil {
http.Error(w, err.Error(), 500)
return
}
res := map[string]float64{
"completion": m.Completion(device, folder),
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
var res = make(map[string]interface{})
res["invalid"] = cfg.Folders()[folder].Invalid
globalFiles, globalDeleted, globalBytes := m.GlobalSize(folder)
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
localFiles, localDeleted, localBytes := m.LocalSize(folder)
res["localFiles"], res["localDeleted"], res["localBytes"] = localFiles, localDeleted, localBytes
needFiles, needBytes := m.NeedSize(folder)
res["needFiles"], res["needBytes"] = needFiles, needBytes
res["inSyncFiles"], res["inSyncBytes"] = globalFiles-needFiles, globalBytes-needBytes
res["state"], res["stateChanged"] = m.State(folder)
res["version"] = m.CurrentLocalVersion(folder) + m.RemoteLocalVersion(folder)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restPostOverride(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
go m.Override(folder)
}
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
files := m.NeedFolderFilesLimited(folder, 100, 2500) // max 100 files or 2500 blocks
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(files)
}
func restGetConnections(m *model.Model, w http.ResponseWriter, r *http.Request) {
var res = m.ConnectionStats()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetDeviceStats(m *model.Model, w http.ResponseWriter, r *http.Request) {
var res = m.DeviceStatistics()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetConfig(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(cfg.Raw())
}
func restPostConfig(m *model.Model, w http.ResponseWriter, r *http.Request) {
var newCfg config.Configuration
err := json.NewDecoder(r.Body).Decode(&newCfg)
if err != nil {
l.Warnln("decoding posted config:", err)
http.Error(w, err.Error(), 500)
return
} else {
if newCfg.GUI.Password != cfg.GUI().Password {
if newCfg.GUI.Password != "" {
hash, err := bcrypt.GenerateFromPassword([]byte(newCfg.GUI.Password), 0)
if err != nil {
l.Warnln("bcrypting password:", err)
http.Error(w, err.Error(), 500)
return
} else {
newCfg.GUI.Password = string(hash)
}
}
}
// Start or stop usage reporting as appropriate
if curAcc := cfg.Options().URAccepted; newCfg.Options.URAccepted > curAcc {
// UR was enabled
newCfg.Options.URAccepted = usageReportVersion
err := sendUsageReport(m)
if err != nil {
l.Infoln("Usage report:", err)
}
go usageReportingLoop(m)
} else if newCfg.Options.URAccepted < curAcc {
// UR was disabled
newCfg.Options.URAccepted = -1
stopUsageReporting()
}
// Activate and save
configInSync = !config.ChangeRequiresRestart(cfg.Raw(), newCfg)
cfg.Replace(newCfg)
cfg.Save()
}
}
func restGetConfigInSync(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]bool{"configInSync": configInSync})
}
func restPostRestart(w http.ResponseWriter, r *http.Request) {
flushResponse(`{"ok": "restarting"}`, w)
go restart()
}
func restPostReset(w http.ResponseWriter, r *http.Request) {
flushResponse(`{"ok": "resetting folders"}`, w)
resetFolders()
go restart()
}
func restPostShutdown(w http.ResponseWriter, r *http.Request) {
flushResponse(`{"ok": "shutting down"}`, w)
go shutdown()
}
func flushResponse(s string, w http.ResponseWriter) {
w.Write([]byte(s + "\n"))
f := w.(http.Flusher)
f.Flush()
}
var cpuUsagePercent [10]float64 // The last ten seconds
var cpuUsageLock sync.RWMutex
func restGetSystem(w http.ResponseWriter, r *http.Request) {
var m runtime.MemStats
runtime.ReadMemStats(&m)
tilde, _ := osutil.ExpandTilde("~")
res := make(map[string]interface{})
res["myID"] = myID.String()
res["goroutines"] = runtime.NumGoroutine()
res["alloc"] = m.Alloc
res["sys"] = m.Sys - m.HeapReleased
res["tilde"] = tilde
if cfg.Options().GlobalAnnEnabled && discoverer != nil {
res["extAnnounceOK"] = discoverer.ExtAnnounceOK()
}
cpuUsageLock.RLock()
var cpusum float64
for _, p := range cpuUsagePercent {
cpusum += p
}
cpuUsageLock.RUnlock()
res["cpuPercent"] = cpusum / 10
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetErrors(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
guiErrorsMut.Lock()
json.NewEncoder(w).Encode(map[string][]guiError{"errors": guiErrors})
guiErrorsMut.Unlock()
}
func restPostError(w http.ResponseWriter, r *http.Request) {
bs, _ := ioutil.ReadAll(r.Body)
r.Body.Close()
showGuiError(0, string(bs))
}
func restClearErrors(w http.ResponseWriter, r *http.Request) {
guiErrorsMut.Lock()
guiErrors = []guiError{}
guiErrorsMut.Unlock()
}
func showGuiError(l logger.LogLevel, err string) {
guiErrorsMut.Lock()
guiErrors = append(guiErrors, guiError{time.Now(), err})
if len(guiErrors) > 5 {
guiErrors = guiErrors[len(guiErrors)-5:]
}
guiErrorsMut.Unlock()
}
func restPostDiscoveryHint(w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var device = qs.Get("device")
var addr = qs.Get("addr")
if len(device) != 0 && len(addr) != 0 && discoverer != nil {
discoverer.Hint(device, []string{addr})
}
}
func restGetDiscovery(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
devices := map[string][]discover.CacheEntry{}
if discoverer != nil {
// Device ids can't be marshalled as keys so we need to manually
// rebuild this map using strings. Discoverer may be nil if discovery
// has not started yet.
for device, entries := range discoverer.All() {
devices[device.String()] = entries
}
}
json.NewEncoder(w).Encode(devices)
}
func restGetReport(m *model.Model, w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(reportData(m))
}
func restGetIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
ignores, patterns, err := m.GetIgnores(qs.Get("folder"))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
json.NewEncoder(w).Encode(map[string][]string{
"ignore": ignores,
"patterns": patterns,
})
}
func restPostIgnores(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
var data map[string][]string
err := json.NewDecoder(r.Body).Decode(&data)
r.Body.Close()
if err != nil {
http.Error(w, err.Error(), 500)
return
}
err = m.SetIgnores(qs.Get("folder"), data["ignore"])
if err != nil {
http.Error(w, err.Error(), 500)
return
}
restGetIgnores(m, w, r)
}
func restGetEvents(w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
sinceStr := qs.Get("since")
limitStr := qs.Get("limit")
since, _ := strconv.Atoi(sinceStr)
limit, _ := strconv.Atoi(limitStr)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
// Flush before blocking, to indicate that we've received the request
// and that it should not be retried.
f := w.(http.Flusher)
f.Flush()
evs := eventSub.Since(since, nil)
if 0 < limit && limit < len(evs) {
evs = evs[len(evs)-limit:]
}
json.NewEncoder(w).Encode(evs)
}
func restGetUpgrade(w http.ResponseWriter, r *http.Request) {
rel, err := upgrade.LatestRelease(strings.Contains(Version, "-beta"))
if err != nil {
http.Error(w, err.Error(), 500)
return
}
res := make(map[string]interface{})
res["running"] = Version
res["latest"] = rel.Tag
res["newer"] = upgrade.CompareVersions(rel.Tag, Version) == 1
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetDeviceID(w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
idStr := qs.Get("id")
id, err := protocol.DeviceIDFromString(idStr)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if err == nil {
json.NewEncoder(w).Encode(map[string]string{
"id": id.String(),
})
} else {
json.NewEncoder(w).Encode(map[string]string{
"error": err.Error(),
})
}
}
func restGetLang(w http.ResponseWriter, r *http.Request) {
lang := r.Header.Get("Accept-Language")
var langs []string
for _, l := range strings.Split(lang, ",") {
parts := strings.SplitN(l, ";", 2)
langs = append(langs, strings.ToLower(strings.TrimSpace(parts[0])))
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(langs)
}
func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
rel, err := upgrade.LatestRelease(strings.Contains(Version, "-beta"))
if err != nil {
l.Warnln("getting latest release:", err)
http.Error(w, err.Error(), 500)
return
}
if upgrade.CompareVersions(rel.Tag, Version) == 1 {
err = upgrade.UpgradeTo(rel, GoArchExtra)
if err != nil {
l.Warnln("upgrading:", err)
http.Error(w, err.Error(), 500)
return
}
flushResponse(`{"ok": "restarting"}`, w)
l.Infoln("Upgrading")
stop <- exitUpgrading
}
}
func restPostScan(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
folder := qs.Get("folder")
sub := qs.Get("sub")
err := m.ScanFolderSub(folder, sub)
if err != nil {
http.Error(w, err.Error(), 500)
}
}
func getQR(w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var text = qs.Get("text")
code, err := qr.Encode(text, qr.M)
if err != nil {
http.Error(w, "Invalid", 500)
return
}
w.Header().Set("Content-Type", "image/png")
w.Write(code.PNG())
}
func restGetPeerCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
tot := map[string]float64{}
count := map[string]float64{}
for _, folder := range cfg.Folders() {
for _, device := range folder.DeviceIDs() {
deviceStr := device.String()
if m.ConnectedTo(device) {
tot[deviceStr] += m.Completion(device, folder.ID)
} else {
tot[deviceStr] = 0
}
count[deviceStr]++
}
}
comp := map[string]int{}
for device := range tot {
comp[device] = int(tot[device] / count[device])
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(comp)
}
func restGetAutocompleteDirectory(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
qs := r.URL.Query()
current := qs.Get("current")
search, _ := osutil.ExpandTilde(current)
pathSeparator := string(os.PathSeparator)
if strings.HasSuffix(current, pathSeparator) && !strings.HasSuffix(search, pathSeparator) {
search = search + pathSeparator
}
subdirectories, _ := filepath.Glob(search + "*")
ret := make([]string, 0, 10)
for _, subdirectory := range subdirectories {
info, err := os.Stat(subdirectory)
if err == nil && info.IsDir() {
ret = append(ret, subdirectory + pathSeparator)
if len(ret) > 9 {
break
}
}
}
json.NewEncoder(w).Encode(ret)
}
func embeddedStatic(assetDir string) http.Handler {
assets := auto.Assets()
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
file := r.URL.Path
if file[0] == '/' {
file = file[1:]
}
if len(file) == 0 {
file = "index.html"
}
if assetDir != "" {
p := filepath.Join(assetDir, filepath.FromSlash(file))
_, err := os.Stat(p)
if err == nil {
http.ServeFile(w, r, p)
return
}
}
bs, ok := assets[file]
if !ok {
http.NotFound(w, r)
return
}
mtype := mimeTypeForFile(file)
if len(mtype) != 0 {
w.Header().Set("Content-Type", mtype)
}
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
w.Header().Set("Last-Modified", modt)
w.Write(bs)
})
}
func mimeTypeForFile(file string) string {
// We use a built in table of the common types since the system
// TypeByExtension might be unreliable. But if we don't know, we delegate
// to the system.
ext := filepath.Ext(file)
switch ext {
case ".htm", ".html":
return "text/html"
case ".css":
return "text/css"
case ".js":
return "application/javascript"
case ".json":
return "application/json"
case ".png":
return "image/png"
case ".ttf":
return "application/x-font-ttf"
case ".woff":
return "application/x-font-woff"
default:
return mime.TypeByExtension(ext)
}
}
| cmd/syncthing/gui.go | 1 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.004120266530662775,
0.00038275073166005313,
0.00015624451043549925,
0.00017248583026230335,
0.0006816686945967376
] |
{
"id": 5,
"code_window": [
"\tif rf, ok := m.folderFiles[folder]; ok {\n",
"\t\tfs := make([]protocol.FileInfo, 0, maxFiles)\n",
"\t\trf.WithNeed(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {\n",
"\t\t\tfi := f.(protocol.FileInfo)\n",
"\t\t\tfs = append(fs, fi)\n",
"\t\t\tnblocks += len(fi.Blocks)\n",
"\t\t\treturn (maxFiles <= 0 || len(fs) < maxFiles) && (maxBlocks <= 0 || nblocks < maxBlocks)\n",
"\t\t})\n",
"\t\treturn fs\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tfs := make([]protocol.FileInfoTruncated, 0, maxFiles)\n",
"\t\trf.WithNeedTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {\n",
"\t\t\tfs = append(fs, f.(protocol.FileInfoTruncated))\n",
"\t\t\treturn maxFiles <= 0 || len(fs) < maxFiles\n"
],
"file_path": "internal/model/model.go",
"type": "replace",
"edit_start_line_idx": 406
} | var validLangs = ["be","bg","cs","de","en","fr","hu","it","lt","nb","nn","pl","pt-PT","sv","zh-CN","zh-TW"]
| gui/lang/valid-langs.js | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.00017077634402085096,
0.00017077634402085096,
0.00017077634402085096,
0.00017077634402085096,
0
] |
{
"id": 5,
"code_window": [
"\tif rf, ok := m.folderFiles[folder]; ok {\n",
"\t\tfs := make([]protocol.FileInfo, 0, maxFiles)\n",
"\t\trf.WithNeed(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {\n",
"\t\t\tfi := f.(protocol.FileInfo)\n",
"\t\t\tfs = append(fs, fi)\n",
"\t\t\tnblocks += len(fi.Blocks)\n",
"\t\t\treturn (maxFiles <= 0 || len(fs) < maxFiles) && (maxBlocks <= 0 || nblocks < maxBlocks)\n",
"\t\t})\n",
"\t\treturn fs\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tfs := make([]protocol.FileInfoTruncated, 0, maxFiles)\n",
"\t\trf.WithNeedTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {\n",
"\t\t\tfs = append(fs, f.(protocol.FileInfoTruncated))\n",
"\t\t\treturn maxFiles <= 0 || len(fs) < maxFiles\n"
],
"file_path": "internal/model/model.go",
"type": "replace",
"edit_start_line_idx": 406
} | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package transform
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"strconv"
"strings"
"testing"
"time"
"unicode/utf8"
)
type lowerCaseASCII struct{}
func (lowerCaseASCII) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := len(src)
if n > len(dst) {
n, err = len(dst), ErrShortDst
}
for i, c := range src[:n] {
if 'A' <= c && c <= 'Z' {
c += 'a' - 'A'
}
dst[i] = c
}
return n, n, err
}
var errYouMentionedX = errors.New("you mentioned X")
type dontMentionX struct{}
func (dontMentionX) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := len(src)
if n > len(dst) {
n, err = len(dst), ErrShortDst
}
for i, c := range src[:n] {
if c == 'X' {
return i, i, errYouMentionedX
}
dst[i] = c
}
return n, n, err
}
// doublerAtEOF is a strange Transformer that transforms "this" to "tthhiiss",
// but only if atEOF is true.
type doublerAtEOF struct{}
func (doublerAtEOF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if !atEOF {
return 0, 0, ErrShortSrc
}
for i, c := range src {
if 2*i+2 >= len(dst) {
return 2 * i, i, ErrShortDst
}
dst[2*i+0] = c
dst[2*i+1] = c
}
return 2 * len(src), len(src), nil
}
// rleDecode and rleEncode implement a toy run-length encoding: "aabbbbbbbbbb"
// is encoded as "2a10b". The decoding is assumed to not contain any numbers.
type rleDecode struct{}
func (rleDecode) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
loop:
for len(src) > 0 {
n := 0
for i, c := range src {
if '0' <= c && c <= '9' {
n = 10*n + int(c-'0')
continue
}
if i == 0 {
return nDst, nSrc, errors.New("rleDecode: bad input")
}
if n > len(dst) {
return nDst, nSrc, ErrShortDst
}
for j := 0; j < n; j++ {
dst[j] = c
}
dst, src = dst[n:], src[i+1:]
nDst, nSrc = nDst+n, nSrc+i+1
continue loop
}
if atEOF {
return nDst, nSrc, errors.New("rleDecode: bad input")
}
return nDst, nSrc, ErrShortSrc
}
return nDst, nSrc, nil
}
type rleEncode struct {
// allowStutter means that "xxxxxxxx" can be encoded as "5x3x"
// instead of always as "8x".
allowStutter bool
}
func (e rleEncode) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for len(src) > 0 {
n, c0 := len(src), src[0]
for i, c := range src[1:] {
if c != c0 {
n = i + 1
break
}
}
if n == len(src) && !atEOF && !e.allowStutter {
return nDst, nSrc, ErrShortSrc
}
s := strconv.Itoa(n)
if len(s) >= len(dst) {
return nDst, nSrc, ErrShortDst
}
copy(dst, s)
dst[len(s)] = c0
dst, src = dst[len(s)+1:], src[n:]
nDst, nSrc = nDst+len(s)+1, nSrc+n
}
return nDst, nSrc, nil
}
// trickler consumes all input bytes, but writes a single byte at a time to dst.
type trickler []byte
func (t *trickler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
*t = append(*t, src...)
if len(*t) == 0 {
return 0, 0, nil
}
if len(dst) == 0 {
return 0, len(src), ErrShortDst
}
dst[0] = (*t)[0]
*t = (*t)[1:]
if len(*t) > 0 {
err = ErrShortDst
}
return 1, len(src), err
}
// delayedTrickler is like trickler, but delays writing output to dst. This is
// highly unlikely to be relevant in practice, but it seems like a good idea
// to have some tolerance as long as progress can be detected.
type delayedTrickler []byte
func (t *delayedTrickler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if len(*t) > 0 && len(dst) > 0 {
dst[0] = (*t)[0]
*t = (*t)[1:]
nDst = 1
}
*t = append(*t, src...)
if len(*t) > 0 {
err = ErrShortDst
}
return nDst, len(src), err
}
type testCase struct {
desc string
t Transformer
src string
dstSize int
srcSize int
ioSize int
wantStr string
wantErr error
wantIter int // number of iterations taken; 0 means we don't care.
}
func (t testCase) String() string {
return tstr(t.t) + "; " + t.desc
}
func tstr(t Transformer) string {
if stringer, ok := t.(fmt.Stringer); ok {
return stringer.String()
}
s := fmt.Sprintf("%T", t)
return s[1+strings.Index(s, "."):]
}
func (c chain) String() string {
buf := &bytes.Buffer{}
buf.WriteString("Chain(")
for i, l := range c.link[:len(c.link)-1] {
if i != 0 {
fmt.Fprint(buf, ", ")
}
buf.WriteString(tstr(l.t))
}
buf.WriteString(")")
return buf.String()
}
var testCases = []testCase{
{
desc: "empty",
t: lowerCaseASCII{},
src: "",
dstSize: 100,
srcSize: 100,
wantStr: "",
},
{
desc: "basic",
t: lowerCaseASCII{},
src: "Hello WORLD.",
dstSize: 100,
srcSize: 100,
wantStr: "hello world.",
},
{
desc: "small dst",
t: lowerCaseASCII{},
src: "Hello WORLD.",
dstSize: 3,
srcSize: 100,
wantStr: "hello world.",
},
{
desc: "small src",
t: lowerCaseASCII{},
src: "Hello WORLD.",
dstSize: 100,
srcSize: 4,
wantStr: "hello world.",
},
{
desc: "small buffers",
t: lowerCaseASCII{},
src: "Hello WORLD.",
dstSize: 3,
srcSize: 4,
wantStr: "hello world.",
},
{
desc: "very small buffers",
t: lowerCaseASCII{},
src: "Hello WORLD.",
dstSize: 1,
srcSize: 1,
wantStr: "hello world.",
},
{
desc: "basic",
t: dontMentionX{},
src: "The First Rule of Transform Club: don't mention Mister X, ever.",
dstSize: 100,
srcSize: 100,
wantStr: "The First Rule of Transform Club: don't mention Mister ",
wantErr: errYouMentionedX,
},
{
desc: "small buffers",
t: dontMentionX{},
src: "The First Rule of Transform Club: don't mention Mister X, ever.",
dstSize: 10,
srcSize: 10,
wantStr: "The First Rule of Transform Club: don't mention Mister ",
wantErr: errYouMentionedX,
},
{
desc: "very small buffers",
t: dontMentionX{},
src: "The First Rule of Transform Club: don't mention Mister X, ever.",
dstSize: 1,
srcSize: 1,
wantStr: "The First Rule of Transform Club: don't mention Mister ",
wantErr: errYouMentionedX,
},
{
desc: "only transform at EOF",
t: doublerAtEOF{},
src: "this",
dstSize: 100,
srcSize: 100,
wantStr: "tthhiiss",
},
{
desc: "basic",
t: rleDecode{},
src: "1a2b3c10d11e0f1g",
dstSize: 100,
srcSize: 100,
wantStr: "abbcccddddddddddeeeeeeeeeeeg",
},
{
desc: "long",
t: rleDecode{},
src: "12a23b34c45d56e99z",
dstSize: 100,
srcSize: 100,
wantStr: strings.Repeat("a", 12) +
strings.Repeat("b", 23) +
strings.Repeat("c", 34) +
strings.Repeat("d", 45) +
strings.Repeat("e", 56) +
strings.Repeat("z", 99),
},
{
desc: "tight buffers",
t: rleDecode{},
src: "1a2b3c10d11e0f1g",
dstSize: 11,
srcSize: 3,
wantStr: "abbcccddddddddddeeeeeeeeeeeg",
},
{
desc: "short dst",
t: rleDecode{},
src: "1a2b3c10d11e0f1g",
dstSize: 10,
srcSize: 3,
wantStr: "abbcccdddddddddd",
wantErr: ErrShortDst,
},
{
desc: "short src",
t: rleDecode{},
src: "1a2b3c10d11e0f1g",
dstSize: 11,
srcSize: 2,
ioSize: 2,
wantStr: "abbccc",
wantErr: ErrShortSrc,
},
{
desc: "basic",
t: rleEncode{},
src: "abbcccddddddddddeeeeeeeeeeeg",
dstSize: 100,
srcSize: 100,
wantStr: "1a2b3c10d11e1g",
},
{
desc: "long",
t: rleEncode{},
src: strings.Repeat("a", 12) +
strings.Repeat("b", 23) +
strings.Repeat("c", 34) +
strings.Repeat("d", 45) +
strings.Repeat("e", 56) +
strings.Repeat("z", 99),
dstSize: 100,
srcSize: 100,
wantStr: "12a23b34c45d56e99z",
},
{
desc: "tight buffers",
t: rleEncode{},
src: "abbcccddddddddddeeeeeeeeeeeg",
dstSize: 3,
srcSize: 12,
wantStr: "1a2b3c10d11e1g",
},
{
desc: "short dst",
t: rleEncode{},
src: "abbcccddddddddddeeeeeeeeeeeg",
dstSize: 2,
srcSize: 12,
wantStr: "1a2b3c",
wantErr: ErrShortDst,
},
{
desc: "short src",
t: rleEncode{},
src: "abbcccddddddddddeeeeeeeeeeeg",
dstSize: 3,
srcSize: 11,
ioSize: 11,
wantStr: "1a2b3c10d",
wantErr: ErrShortSrc,
},
{
desc: "allowStutter = false",
t: rleEncode{allowStutter: false},
src: "aaaabbbbbbbbccccddddd",
dstSize: 10,
srcSize: 10,
wantStr: "4a8b4c5d",
},
{
desc: "allowStutter = true",
t: rleEncode{allowStutter: true},
src: "aaaabbbbbbbbccccddddd",
dstSize: 10,
srcSize: 10,
ioSize: 10,
wantStr: "4a6b2b4c4d1d",
},
{
desc: "trickler",
t: &trickler{},
src: "abcdefghijklm",
dstSize: 3,
srcSize: 15,
wantStr: "abcdefghijklm",
},
{
desc: "delayedTrickler",
t: &delayedTrickler{},
src: "abcdefghijklm",
dstSize: 3,
srcSize: 15,
wantStr: "abcdefghijklm",
},
}
func TestReader(t *testing.T) {
for _, tc := range testCases {
reset(tc.t)
r := NewReader(strings.NewReader(tc.src), tc.t)
// Differently sized dst and src buffers are not part of the
// exported API. We override them manually.
r.dst = make([]byte, tc.dstSize)
r.src = make([]byte, tc.srcSize)
got, err := ioutil.ReadAll(r)
str := string(got)
if str != tc.wantStr || err != tc.wantErr {
t.Errorf("%s:\ngot %q, %v\nwant %q, %v", tc, str, err, tc.wantStr, tc.wantErr)
}
}
}
func reset(t Transformer) {
var dst [128]byte
for err := ErrShortDst; err != nil; {
_, _, err = t.Transform(dst[:], nil, true)
}
}
func TestWriter(t *testing.T) {
tests := append(testCases, chainTests()...)
for _, tc := range tests {
sizes := []int{1, 2, 3, 4, 5, 10, 100, 1000}
if tc.ioSize > 0 {
sizes = []int{tc.ioSize}
}
for _, sz := range sizes {
bb := &bytes.Buffer{}
reset(tc.t)
w := NewWriter(bb, tc.t)
// Differently sized dst and src buffers are not part of the
// exported API. We override them manually.
w.dst = make([]byte, tc.dstSize)
w.src = make([]byte, tc.srcSize)
src := make([]byte, sz)
var err error
for b := tc.src; len(b) > 0 && err == nil; {
n := copy(src, b)
b = b[n:]
m := 0
m, err = w.Write(src[:n])
if m != n && err == nil {
t.Errorf("%s:%d: did not consume all bytes %d < %d", tc, sz, m, n)
}
}
if err == nil {
err = w.Close()
}
str := bb.String()
if str != tc.wantStr || err != tc.wantErr {
t.Errorf("%s:%d:\ngot %q, %v\nwant %q, %v", tc, sz, str, err, tc.wantStr, tc.wantErr)
}
}
}
}
func TestNop(t *testing.T) {
testCases := []struct {
str string
dstSize int
err error
}{
{"", 0, nil},
{"", 10, nil},
{"a", 0, ErrShortDst},
{"a", 1, nil},
{"a", 10, nil},
}
for i, tc := range testCases {
dst := make([]byte, tc.dstSize)
nDst, nSrc, err := Nop.Transform(dst, []byte(tc.str), true)
want := tc.str
if tc.dstSize < len(want) {
want = want[:tc.dstSize]
}
if got := string(dst[:nDst]); got != want || err != tc.err || nSrc != nDst {
t.Errorf("%d:\ngot %q, %d, %v\nwant %q, %d, %v", i, got, nSrc, err, want, nDst, tc.err)
}
}
}
func TestDiscard(t *testing.T) {
testCases := []struct {
str string
dstSize int
}{
{"", 0},
{"", 10},
{"a", 0},
{"ab", 10},
}
for i, tc := range testCases {
nDst, nSrc, err := Discard.Transform(make([]byte, tc.dstSize), []byte(tc.str), true)
if nDst != 0 || nSrc != len(tc.str) || err != nil {
t.Errorf("%d:\ngot %q, %d, %v\nwant 0, %d, nil", i, nDst, nSrc, err, len(tc.str))
}
}
}
// mkChain creates a Chain transformer. x must be alternating between transformer
// and bufSize, like T, (sz, T)*
func mkChain(x ...interface{}) *chain {
t := []Transformer{}
for i := 0; i < len(x); i += 2 {
t = append(t, x[i].(Transformer))
}
c := Chain(t...).(*chain)
for i, j := 1, 1; i < len(x); i, j = i+2, j+1 {
c.link[j].b = make([]byte, x[i].(int))
}
return c
}
func chainTests() []testCase {
return []testCase{
{
desc: "nil error",
t: mkChain(rleEncode{}, 100, lowerCaseASCII{}),
src: "ABB",
dstSize: 100,
srcSize: 100,
wantStr: "1a2b",
wantErr: nil,
wantIter: 1,
},
{
desc: "short dst buffer",
t: mkChain(lowerCaseASCII{}, 3, rleDecode{}),
src: "1a2b3c10d11e0f1g",
dstSize: 10,
srcSize: 3,
wantStr: "abbcccdddddddddd",
wantErr: ErrShortDst,
},
{
desc: "short internal dst buffer",
t: mkChain(lowerCaseASCII{}, 3, rleDecode{}, 10, Nop),
src: "1a2b3c10d11e0f1g",
dstSize: 100,
srcSize: 3,
wantStr: "abbcccdddddddddd",
wantErr: errShortInternal,
},
{
desc: "short internal dst buffer from input",
t: mkChain(rleDecode{}, 10, Nop),
src: "1a2b3c10d11e0f1g",
dstSize: 100,
srcSize: 3,
wantStr: "abbcccdddddddddd",
wantErr: errShortInternal,
},
{
desc: "empty short internal dst buffer",
t: mkChain(lowerCaseASCII{}, 3, rleDecode{}, 10, Nop),
src: "4a7b11e0f1g",
dstSize: 100,
srcSize: 3,
wantStr: "aaaabbbbbbb",
wantErr: errShortInternal,
},
{
desc: "empty short internal dst buffer from input",
t: mkChain(rleDecode{}, 10, Nop),
src: "4a7b11e0f1g",
dstSize: 100,
srcSize: 3,
wantStr: "aaaabbbbbbb",
wantErr: errShortInternal,
},
{
desc: "short internal src buffer after full dst buffer",
t: mkChain(Nop, 5, rleEncode{}, 10, Nop),
src: "cccccddddd",
dstSize: 100,
srcSize: 100,
wantStr: "",
wantErr: errShortInternal,
wantIter: 1,
},
{
desc: "short internal src buffer after short dst buffer; test lastFull",
t: mkChain(rleDecode{}, 5, rleEncode{}, 4, Nop),
src: "2a1b4c6d",
dstSize: 100,
srcSize: 100,
wantStr: "2a1b",
wantErr: errShortInternal,
},
{
desc: "short internal src buffer after successful complete fill",
t: mkChain(Nop, 3, rleDecode{}),
src: "123a4b",
dstSize: 4,
srcSize: 3,
wantStr: "",
wantErr: errShortInternal,
wantIter: 1,
},
{
desc: "short internal src buffer after short dst buffer; test lastFull",
t: mkChain(rleDecode{}, 5, rleEncode{}),
src: "2a1b4c6d",
dstSize: 4,
srcSize: 100,
wantStr: "2a1b",
wantErr: errShortInternal,
},
{
desc: "short src buffer",
t: mkChain(rleEncode{}, 5, Nop),
src: "abbcccddddeeeee",
dstSize: 4,
srcSize: 4,
ioSize: 4,
wantStr: "1a2b3c",
wantErr: ErrShortSrc,
},
{
desc: "process all in one go",
t: mkChain(rleEncode{}, 5, Nop),
src: "abbcccddddeeeeeffffff",
dstSize: 100,
srcSize: 100,
wantStr: "1a2b3c4d5e6f",
wantErr: nil,
wantIter: 1,
},
{
desc: "complete processing downstream after error",
t: mkChain(dontMentionX{}, 2, rleDecode{}, 5, Nop),
src: "3a4b5eX",
dstSize: 100,
srcSize: 100,
ioSize: 100,
wantStr: "aaabbbbeeeee",
wantErr: errYouMentionedX,
},
{
desc: "return downstream fatal errors first (followed by short dst)",
t: mkChain(dontMentionX{}, 8, rleDecode{}, 4, Nop),
src: "3a4b5eX",
dstSize: 100,
srcSize: 100,
ioSize: 100,
wantStr: "aaabbbb",
wantErr: errShortInternal,
},
{
desc: "return downstream fatal errors first (followed by short src)",
t: mkChain(dontMentionX{}, 5, Nop, 1, rleDecode{}),
src: "1a5bX",
dstSize: 100,
srcSize: 100,
ioSize: 100,
wantStr: "",
wantErr: errShortInternal,
},
{
desc: "short internal",
t: mkChain(Nop, 11, rleEncode{}, 3, Nop),
src: "abbcccddddddddddeeeeeeeeeeeg",
dstSize: 3,
srcSize: 100,
wantStr: "1a2b3c10d",
wantErr: errShortInternal,
},
}
}
func doTransform(tc testCase) (res string, iter int, err error) {
reset(tc.t)
dst := make([]byte, tc.dstSize)
out, in := make([]byte, 0, 2*len(tc.src)), []byte(tc.src)
for {
iter++
src, atEOF := in, true
if len(src) > tc.srcSize {
src, atEOF = src[:tc.srcSize], false
}
nDst, nSrc, err := tc.t.Transform(dst, src, atEOF)
out = append(out, dst[:nDst]...)
in = in[nSrc:]
switch {
case err == nil && len(in) != 0:
case err == ErrShortSrc && nSrc > 0:
case err == ErrShortDst && (nDst > 0 || nSrc > 0):
default:
return string(out), iter, err
}
}
}
func TestChain(t *testing.T) {
if c, ok := Chain().(nop); !ok {
t.Errorf("empty chain: %v; want Nop", c)
}
// Test Chain for a single Transformer.
for _, tc := range testCases {
tc.t = Chain(tc.t)
str, _, err := doTransform(tc)
if str != tc.wantStr || err != tc.wantErr {
t.Errorf("%s:\ngot %q, %v\nwant %q, %v", tc, str, err, tc.wantStr, tc.wantErr)
}
}
tests := chainTests()
sizes := []int{1, 2, 3, 4, 5, 7, 10, 100, 1000}
addTest := func(tc testCase, t *chain) {
if t.link[0].t != tc.t && tc.wantErr == ErrShortSrc {
tc.wantErr = errShortInternal
}
if t.link[len(t.link)-2].t != tc.t && tc.wantErr == ErrShortDst {
tc.wantErr = errShortInternal
}
tc.t = t
tests = append(tests, tc)
}
for _, tc := range testCases {
for _, sz := range sizes {
tt := tc
tt.dstSize = sz
addTest(tt, mkChain(tc.t, tc.dstSize, Nop))
addTest(tt, mkChain(tc.t, tc.dstSize, Nop, 2, Nop))
addTest(tt, mkChain(Nop, tc.srcSize, tc.t, tc.dstSize, Nop))
if sz >= tc.dstSize && (tc.wantErr != ErrShortDst || sz == tc.dstSize) {
addTest(tt, mkChain(Nop, tc.srcSize, tc.t))
addTest(tt, mkChain(Nop, 100, Nop, tc.srcSize, tc.t))
}
}
}
for _, tc := range testCases {
tt := tc
tt.dstSize = 1
tt.wantStr = ""
addTest(tt, mkChain(tc.t, tc.dstSize, Discard))
addTest(tt, mkChain(Nop, tc.srcSize, tc.t, tc.dstSize, Discard))
addTest(tt, mkChain(Nop, tc.srcSize, tc.t, tc.dstSize, Nop, tc.dstSize, Discard))
}
for _, tc := range testCases {
tt := tc
tt.dstSize = 100
tt.wantStr = strings.Replace(tc.src, "0f", "", -1)
// Chain encoders and decoders.
if _, ok := tc.t.(rleEncode); ok && tc.wantErr == nil {
addTest(tt, mkChain(tc.t, tc.dstSize, Nop, 1000, rleDecode{}))
addTest(tt, mkChain(tc.t, tc.dstSize, Nop, tc.dstSize, rleDecode{}))
addTest(tt, mkChain(Nop, tc.srcSize, tc.t, tc.dstSize, Nop, 100, rleDecode{}))
// decoding needs larger destinations
addTest(tt, mkChain(Nop, tc.srcSize, tc.t, tc.dstSize, rleDecode{}, 100, Nop))
addTest(tt, mkChain(Nop, tc.srcSize, tc.t, tc.dstSize, Nop, 100, rleDecode{}, 100, Nop))
} else if _, ok := tc.t.(rleDecode); ok && tc.wantErr == nil {
// The internal buffer size may need to be the sum of the maximum segment
// size of the two encoders!
addTest(tt, mkChain(tc.t, 2*tc.dstSize, rleEncode{}))
addTest(tt, mkChain(tc.t, tc.dstSize, Nop, 101, rleEncode{}))
addTest(tt, mkChain(Nop, tc.srcSize, tc.t, tc.dstSize, Nop, 100, rleEncode{}))
addTest(tt, mkChain(Nop, tc.srcSize, tc.t, tc.dstSize, Nop, 200, rleEncode{}, 100, Nop))
}
}
for _, tc := range tests {
str, iter, err := doTransform(tc)
mi := tc.wantIter != 0 && tc.wantIter != iter
if str != tc.wantStr || err != tc.wantErr || mi {
t.Errorf("%s:\ngot iter:%d, %q, %v\nwant iter:%d, %q, %v", tc, iter, str, err, tc.wantIter, tc.wantStr, tc.wantErr)
}
break
}
}
func TestRemoveFunc(t *testing.T) {
filter := RemoveFunc(func(r rune) bool {
return strings.IndexRune("ab\u0300\u1234,", r) != -1
})
tests := []testCase{
{
src: ",",
wantStr: "",
},
{
src: "c",
wantStr: "c",
},
{
src: "\u2345",
wantStr: "\u2345",
},
{
src: "tschüß",
wantStr: "tschüß",
},
{
src: ",до,свидания,",
wantStr: "досвидания",
},
{
src: "a\xbd\xb2=\xbc ⌘",
wantStr: "\uFFFD\uFFFD=\uFFFD ⌘",
},
{
// If we didn't replace illegal bytes with RuneError, the result
// would be \u0300 or the code would need to be more complex.
src: "\xcc\u0300\x80",
wantStr: "\uFFFD\uFFFD",
},
{
src: "\xcc\u0300\x80",
dstSize: 3,
wantStr: "\uFFFD\uFFFD",
wantIter: 2,
},
{
src: "\u2345",
dstSize: 2,
wantStr: "",
wantErr: ErrShortDst,
},
{
src: "\xcc",
dstSize: 2,
wantStr: "",
wantErr: ErrShortDst,
},
{
src: "\u0300",
dstSize: 2,
srcSize: 1,
wantStr: "",
wantErr: ErrShortSrc,
},
{
t: RemoveFunc(func(r rune) bool {
return r == utf8.RuneError
}),
src: "\xcc\u0300\x80",
wantStr: "\u0300",
},
}
for _, tc := range tests {
tc.desc = tc.src
if tc.t == nil {
tc.t = filter
}
if tc.dstSize == 0 {
tc.dstSize = 100
}
if tc.srcSize == 0 {
tc.srcSize = 100
}
str, iter, err := doTransform(tc)
mi := tc.wantIter != 0 && tc.wantIter != iter
if str != tc.wantStr || err != tc.wantErr || mi {
t.Errorf("%+q:\ngot iter:%d, %+q, %v\nwant iter:%d, %+q, %v", tc.src, iter, str, err, tc.wantIter, tc.wantStr, tc.wantErr)
}
tc.src = str
idem, _, _ := doTransform(tc)
if str != idem {
t.Errorf("%+q: found %+q; want %+q", tc.src, idem, str)
}
}
}
func testString(t *testing.T, f func(Transformer, string) (string, int, error)) {
for _, tt := range append(testCases, chainTests()...) {
if tt.desc == "allowStutter = true" {
// We don't have control over the buffer size, so we eliminate tests
// that depend on a specific buffer size being set.
continue
}
reset(tt.t)
if tt.wantErr == ErrShortDst || tt.wantErr == ErrShortSrc {
// The result string will be different.
continue
}
got, n, err := f(tt.t, tt.src)
if tt.wantErr != err {
t.Errorf("%s:error: got %v; want %v", tt.desc, err, tt.wantErr)
}
if got, want := err == nil, n == len(tt.src); got != want {
t.Errorf("%s:n: got %v; want %v", tt.desc, got, want)
}
if got != tt.wantStr {
t.Errorf("%s:string: got %q; want %q", tt.desc, got, tt.wantStr)
}
}
}
func TestBytes(t *testing.T) {
testString(t, func(z Transformer, s string) (string, int, error) {
b, n, err := Bytes(z, []byte(s))
return string(b), n, err
})
}
func TestString(t *testing.T) {
testString(t, String)
// Overrun the internal destination buffer.
for i, s := range []string{
strings.Repeat("a", initialBufSize-1),
strings.Repeat("a", initialBufSize+0),
strings.Repeat("a", initialBufSize+1),
strings.Repeat("A", initialBufSize-1),
strings.Repeat("A", initialBufSize+0),
strings.Repeat("A", initialBufSize+1),
strings.Repeat("A", 2*initialBufSize-1),
strings.Repeat("A", 2*initialBufSize+0),
strings.Repeat("A", 2*initialBufSize+1),
strings.Repeat("a", initialBufSize-2) + "A",
strings.Repeat("a", initialBufSize-1) + "A",
strings.Repeat("a", initialBufSize+0) + "A",
strings.Repeat("a", initialBufSize+1) + "A",
} {
got, _, _ := String(lowerCaseASCII{}, s)
if want := strings.ToLower(s); got != want {
t.Errorf("%d:dst buffer test: got %s (%d); want %s (%d)", i, got, len(got), want, len(want))
}
}
// Overrun the internal source buffer.
for i, s := range []string{
strings.Repeat("a", initialBufSize-1),
strings.Repeat("a", initialBufSize+0),
strings.Repeat("a", initialBufSize+1),
strings.Repeat("a", 2*initialBufSize+1),
strings.Repeat("a", 2*initialBufSize+0),
strings.Repeat("a", 2*initialBufSize+1),
} {
got, _, _ := String(rleEncode{}, s)
if want := fmt.Sprintf("%da", len(s)); got != want {
t.Errorf("%d:src buffer test: got %s (%d); want %s (%d)", i, got, len(got), want, len(want))
}
}
// Test allocations for non-changing strings.
// Note we still need to allocate a single buffer.
for i, s := range []string{
"",
"123",
"123456789",
strings.Repeat("a", initialBufSize),
strings.Repeat("a", 10*initialBufSize),
} {
if n := testing.AllocsPerRun(5, func() { String(&lowerCaseASCII{}, s) }); n > 1 {
t.Errorf("%d: #allocs was %f; want 1", i, n)
}
}
}
// TestBytesAllocation tests that buffer growth stays limited with the trickler
// transformer, which behaves oddly but within spec. In case buffer growth is
// not correctly handled, the test will either panic with a failed allocation or
// thrash. To ensure the tests terminate under the last condition, we time out
// after some sufficiently long period of time.
func TestBytesAllocation(t *testing.T) {
done := make(chan bool)
go func() {
in := bytes.Repeat([]byte{'a'}, 1000)
tr := trickler(make([]byte, 1))
Bytes(&tr, in)
done <- true
}()
select {
case <-done:
case <-time.After(3 * time.Second):
t.Error("time out, likely due to excessive allocation")
}
}
// TestStringAllocation tests that buffer growth stays limited with the trickler
// transformer, which behaves oddly but within spec. In case buffer growth is
// not correctly handled, the test will either panic with a failed allocation or
// thrash. To ensure the tests terminate under the last condition, we time out
// after some sufficiently long period of time.
func TestStringAllocation(t *testing.T) {
done := make(chan bool)
go func() {
in := strings.Repeat("a", 1000)
tr := trickler(make([]byte, 1))
String(&tr, in)
done <- true
}()
select {
case <-done:
case <-time.After(3 * time.Second):
t.Error("time out, likely due to excessive allocation")
}
}
func BenchmarkStringLower(b *testing.B) {
in := strings.Repeat("a", 4096)
for i := 0; i < b.N; i++ {
String(&lowerCaseASCII{}, in)
}
}
| Godeps/_workspace/src/code.google.com/p/go.text/transform/transform_test.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.0008704822976142168,
0.00017964963626582175,
0.00015985002391971648,
0.00017442970420233905,
0.00006689505971735343
] |
{
"id": 5,
"code_window": [
"\tif rf, ok := m.folderFiles[folder]; ok {\n",
"\t\tfs := make([]protocol.FileInfo, 0, maxFiles)\n",
"\t\trf.WithNeed(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {\n",
"\t\t\tfi := f.(protocol.FileInfo)\n",
"\t\t\tfs = append(fs, fi)\n",
"\t\t\tnblocks += len(fi.Blocks)\n",
"\t\t\treturn (maxFiles <= 0 || len(fs) < maxFiles) && (maxBlocks <= 0 || nblocks < maxBlocks)\n",
"\t\t})\n",
"\t\treturn fs\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tfs := make([]protocol.FileInfoTruncated, 0, maxFiles)\n",
"\t\trf.WithNeedTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {\n",
"\t\t\tfs = append(fs, f.(protocol.FileInfoTruncated))\n",
"\t\t\treturn maxFiles <= 0 || len(fs) < maxFiles\n"
],
"file_path": "internal/model/model.go",
"type": "replace",
"edit_start_line_idx": 406
} | // Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package versioner
import (
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/syncthing/syncthing/internal/osutil"
)
func init() {
// Register the constructor for this type of versioner with the name "staggered"
Factories["staggered"] = NewStaggered
}
type Interval struct {
step int64
end int64
}
// The type holds our configuration
type Staggered struct {
versionsPath string
cleanInterval int64
folderPath string
interval [4]Interval
mutex *sync.Mutex
}
// Check if file or dir
func isFile(path string) bool {
fileInfo, err := os.Stat(path)
if err != nil {
l.Infoln("versioner isFile:", err)
return false
}
return fileInfo.Mode().IsRegular()
}
const TimeLayout = "20060102-150405"
func versionExt(path string) string {
pathSplit := strings.Split(path, "~")
if len(pathSplit) > 1 {
return pathSplit[len(pathSplit)-1]
} else {
return ""
}
}
// Rename versions with old version format
func (v Staggered) renameOld() {
err := filepath.Walk(v.versionsPath, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if f.Mode().IsRegular() {
versionUnix, err := strconv.ParseInt(strings.Replace(filepath.Ext(path), ".v", "", 1), 10, 0)
if err == nil {
l.Infoln("Renaming file", path, "from old to new version format")
versiondate := time.Unix(versionUnix, 0)
name := path[:len(path)-len(filepath.Ext(path))]
err = osutil.Rename(path, name+"~"+versiondate.Format(TimeLayout))
if err != nil {
l.Infoln("Error renaming to new format", err)
}
}
}
return nil
})
if err != nil {
l.Infoln("Versioner: error scanning versions dir", err)
return
}
}
// The constructor function takes a map of parameters and creates the type.
func NewStaggered(folderID, folderPath string, params map[string]string) Versioner {
maxAge, err := strconv.ParseInt(params["maxAge"], 10, 0)
if err != nil {
maxAge = 31536000 // Default: ~1 year
}
cleanInterval, err := strconv.ParseInt(params["cleanInterval"], 10, 0)
if err != nil {
cleanInterval = 3600 // Default: clean once per hour
}
// Use custom path if set, otherwise .stversions in folderPath
var versionsDir string
if params["versionsPath"] == "" {
if debug {
l.Debugln("using default dir .stversions")
}
versionsDir = filepath.Join(folderPath, ".stversions")
} else {
if debug {
l.Debugln("using dir", params["versionsPath"])
}
versionsDir = params["versionsPath"]
}
var mutex sync.Mutex
s := Staggered{
versionsPath: versionsDir,
cleanInterval: cleanInterval,
folderPath: folderPath,
interval: [4]Interval{
{30, 3600}, // first hour -> 30 sec between versions
{3600, 86400}, // next day -> 1 h between versions
{86400, 592000}, // next 30 days -> 1 day between versions
{604800, maxAge}, // next year -> 1 week between versions
},
mutex: &mutex,
}
if debug {
l.Debugf("instantiated %#v", s)
}
// Rename version with old version format
s.renameOld()
go func() {
s.clean()
for _ = range time.Tick(time.Duration(cleanInterval) * time.Second) {
s.clean()
}
}()
return s
}
func (v Staggered) clean() {
if debug {
l.Debugln("Versioner clean: Waiting for lock on", v.versionsPath)
}
v.mutex.Lock()
defer v.mutex.Unlock()
if debug {
l.Debugln("Versioner clean: Cleaning", v.versionsPath)
}
_, err := os.Stat(v.versionsPath)
if err != nil {
if os.IsNotExist(err) {
if debug {
l.Debugln("creating versions dir", v.versionsPath)
}
os.MkdirAll(v.versionsPath, 0755)
osutil.HideFile(v.versionsPath)
} else {
l.Warnln("Versioner: can't create versions dir", err)
}
}
versionsPerFile := make(map[string][]string)
filesPerDir := make(map[string]int)
err = filepath.Walk(v.versionsPath, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
switch mode := f.Mode(); {
case mode.IsDir():
filesPerDir[path] = 0
if path != v.versionsPath {
dir := filepath.Dir(path)
filesPerDir[dir]++
}
case mode.IsRegular():
extension := versionExt(path)
dir := filepath.Dir(path)
name := path[:len(path)-len(extension)-1]
filesPerDir[dir]++
versionsPerFile[name] = append(versionsPerFile[name], path)
}
return nil
})
if err != nil {
l.Warnln("Versioner: error scanning versions dir", err)
return
}
for _, versionList := range versionsPerFile {
// List from filepath.Walk is sorted
v.expire(versionList)
}
for path, numFiles := range filesPerDir {
if numFiles > 0 {
continue
}
if path == v.versionsPath {
if debug {
l.Debugln("Cleaner: versions dir is empty, don't delete", path)
}
continue
}
if debug {
l.Debugln("Cleaner: deleting empty directory", path)
}
err = os.Remove(path)
if err != nil {
l.Warnln("Versioner: can't remove directory", path, err)
}
}
if debug {
l.Debugln("Cleaner: Finished cleaning", v.versionsPath)
}
}
func (v Staggered) expire(versions []string) {
if debug {
l.Debugln("Versioner: Expiring versions", versions)
}
var prevAge int64
firstFile := true
for _, file := range versions {
if isFile(file) {
versionTime, err := time.Parse(TimeLayout, versionExt(file))
if err != nil {
l.Infof("Versioner: file name %q is invalid: %v", file, err)
continue
}
age := int64(time.Since(versionTime).Seconds())
// If the file is older than the max age of the last interval, remove it
if lastIntv := v.interval[len(v.interval)-1]; lastIntv.end > 0 && age > lastIntv.end {
if debug {
l.Debugln("Versioner: File over maximum age -> delete ", file)
}
err = os.Remove(file)
if err != nil {
l.Warnf("Versioner: can't remove %q: %v", file, err)
}
continue
}
// If it's the first (oldest) file in the list we can skip the interval checks
if firstFile {
prevAge = age
firstFile = false
continue
}
// Find the interval the file fits in
var usedInterval Interval
for _, usedInterval = range v.interval {
if age < usedInterval.end {
break
}
}
if prevAge-age < usedInterval.step {
if debug {
l.Debugln("too many files in step -> delete", file)
}
err = os.Remove(file)
if err != nil {
l.Warnf("Versioner: can't remove %q: %v", file, err)
}
continue
}
prevAge = age
} else {
l.Infof("non-file %q is named like a file version", file)
}
}
}
// Move away the named file to a version archive. If this function returns
// nil, the named file does not exist any more (has been archived).
func (v Staggered) Archive(filePath string) error {
if debug {
l.Debugln("Waiting for lock on ", v.versionsPath)
}
v.mutex.Lock()
defer v.mutex.Unlock()
fileInfo, err := os.Stat(filePath)
if err != nil {
if os.IsNotExist(err) {
if debug {
l.Debugln("not archiving nonexistent file", filePath)
}
return nil
} else {
return err
}
}
_, err = os.Stat(v.versionsPath)
if err != nil {
if os.IsNotExist(err) {
if debug {
l.Debugln("creating versions dir", v.versionsPath)
}
os.MkdirAll(v.versionsPath, 0755)
osutil.HideFile(v.versionsPath)
} else {
return err
}
}
if debug {
l.Debugln("archiving", filePath)
}
file := filepath.Base(filePath)
inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath))
if err != nil {
return err
}
dir := filepath.Join(v.versionsPath, inFolderPath)
err = os.MkdirAll(dir, 0755)
if err != nil && !os.IsExist(err) {
return err
}
ver := file + "~" + fileInfo.ModTime().Format(TimeLayout)
dst := filepath.Join(dir, ver)
if debug {
l.Debugln("moving to", dst)
}
err = osutil.Rename(filePath, dst)
if err != nil {
return err
}
versions, err := filepath.Glob(filepath.Join(dir, file+"~[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]"))
if err != nil {
l.Warnln("Versioner: error finding versions for", file, err)
return nil
}
sort.Strings(versions)
v.expire(versions)
return nil
}
| internal/versioner/staggered.go | 0 | https://github.com/syncthing/syncthing/commit/59a85c1d751c85e585ec93398c3ba5c50bdef91f | [
0.0008552504586987197,
0.0001915993052534759,
0.00016416948346886784,
0.0001710256765363738,
0.00011138842819491401
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.